fix(cli): keep /model and /reasoning conversation-scoped (#1178)

This commit is contained in:
Sarah Wooders
2026-02-26 17:58:42 -08:00
committed by GitHub
parent c5a8871be7
commit 0025c387ec
4 changed files with 378 additions and 110 deletions

View File

@@ -7,6 +7,7 @@ import type {
GoogleAIModelSettings, GoogleAIModelSettings,
OpenAIModelSettings, OpenAIModelSettings,
} from "@letta-ai/letta-client/resources/agents/agents"; } from "@letta-ai/letta-client/resources/agents/agents";
import type { Conversation } from "@letta-ai/letta-client/resources/conversations/conversations";
import { OPENAI_CODEX_PROVIDER_NAME } from "../providers/openai-codex-provider"; import { OPENAI_CODEX_PROVIDER_NAME } from "../providers/openai-codex-provider";
import { getModelContextWindow } from "./available-models"; import { getModelContextWindow } from "./available-models";
import { getClient } from "./client"; import { getClient } from "./client";
@@ -209,6 +210,34 @@ export async function updateAgentLLMConfig(
return finalAgent; return finalAgent;
} }
/**
* Updates a conversation's model and model settings.
*
* Uses conversation-scoped model overrides so different conversations can
* run with different models without mutating the agent's default model.
*
* @param conversationId - The conversation ID (or "default")
* @param modelHandle - The model handle (e.g., "anthropic/claude-sonnet-4-5-20250929")
* @param updateArgs - Additional config args (reasoning_effort, enable_reasoner, etc.)
* @returns The updated conversation from the server
*/
export async function updateConversationLLMConfig(
conversationId: string,
modelHandle: string,
updateArgs?: Record<string, unknown>,
): Promise<Conversation> {
const client = await getClient();
const modelSettings = buildModelSettings(modelHandle, updateArgs);
const hasModelSettings = Object.keys(modelSettings).length > 0;
const payload = {
model: modelHandle,
...(hasModelSettings && { model_settings: modelSettings }),
} as unknown as Parameters<typeof client.conversations.update>[1];
return client.conversations.update(conversationId, payload);
}
export interface SystemPromptUpdateResult { export interface SystemPromptUpdateResult {
success: boolean; success: boolean;
message: string; message: string;

View File

@@ -416,6 +416,32 @@ function inferReasoningEffortFromModelPreset(
return null; return null;
} }
function buildModelHandleFromLlmConfig(
llmConfig: LlmConfig | null | undefined,
): string | null {
if (!llmConfig) return null;
if (llmConfig.model_endpoint_type && llmConfig.model) {
return `${llmConfig.model_endpoint_type}/${llmConfig.model}`;
}
return llmConfig.model ?? null;
}
function mapHandleToLlmConfigPatch(modelHandle: string): Partial<LlmConfig> {
const [provider, ...modelParts] = modelHandle.split("/");
const modelName = modelParts.join("/");
if (!provider || !modelName) {
return {
model: modelHandle,
};
}
const endpointType =
provider === OPENAI_CODEX_PROVIDER_NAME ? "chatgpt_oauth" : provider;
return {
model: modelName,
model_endpoint_type: endpointType as LlmConfig["model_endpoint_type"],
};
}
// Helper to get appropriate error hint based on stop reason and current model // Helper to get appropriate error hint based on stop reason and current model
function getErrorHintForStopReason( function getErrorHintForStopReason(
stopReason: StopReasonType | null, stopReason: StopReasonType | null,
@@ -1399,10 +1425,16 @@ export default function App({
const [currentToolsetPreference, setCurrentToolsetPreference] = const [currentToolsetPreference, setCurrentToolsetPreference] =
useState<ToolsetPreference>("auto"); useState<ToolsetPreference>("auto");
const [llmConfig, setLlmConfig] = useState<LlmConfig | null>(null); const [llmConfig, setLlmConfig] = useState<LlmConfig | null>(null);
const [hasConversationModelOverride, setHasConversationModelOverride] =
useState(false);
const llmConfigRef = useRef(llmConfig); const llmConfigRef = useRef(llmConfig);
useEffect(() => { useEffect(() => {
llmConfigRef.current = llmConfig; llmConfigRef.current = llmConfig;
}, [llmConfig]); }, [llmConfig]);
const hasConversationModelOverrideRef = useRef(hasConversationModelOverride);
useEffect(() => {
hasConversationModelOverrideRef.current = hasConversationModelOverride;
}, [hasConversationModelOverride]);
const agentStateRef = useRef(agentState); const agentStateRef = useRef(agentState);
useEffect(() => { useEffect(() => {
agentStateRef.current = agentState; agentStateRef.current = agentState;
@@ -1416,9 +1448,10 @@ export default function App({
const agentName = agentState?.name ?? null; const agentName = agentState?.name ?? null;
const [agentDescription, setAgentDescription] = useState<string | null>(null); const [agentDescription, setAgentDescription] = useState<string | null>(null);
const [agentLastRunAt, setAgentLastRunAt] = useState<string | null>(null); const [agentLastRunAt, setAgentLastRunAt] = useState<string | null>(null);
// Prefer agent.model (canonical handle) over reconstructing from llm_config fields, // Prefer the currently-active model handle, then fall back to agent.model
// which may not faithfully reproduce the original handle (e.g. "openai/gpt-5" vs "openai/gpt-5.3-codex"). // (canonical handle) and finally llm_config reconstruction.
const currentModelLabel = const currentModelLabel =
currentModelHandle ||
agentState?.model || agentState?.model ||
(llmConfig?.model_endpoint_type && llmConfig?.model (llmConfig?.model_endpoint_type && llmConfig?.model
? `${llmConfig.model_endpoint_type}/${llmConfig.model}` ? `${llmConfig.model_endpoint_type}/${llmConfig.model}`
@@ -1447,8 +1480,11 @@ export default function App({
// Derive reasoning effort from model_settings (canonical) with llm_config as legacy fallback. // Derive reasoning effort from model_settings (canonical) with llm_config as legacy fallback.
// Some providers may omit explicit effort for default tiers (e.g., Sonnet 4.6 high), // Some providers may omit explicit effort for default tiers (e.g., Sonnet 4.6 high),
// so fall back to the selected model preset when needed. // so fall back to the selected model preset when needed.
const effectiveModelSettings = hasConversationModelOverride
? undefined
: agentState?.model_settings;
const currentReasoningEffort: ModelReasoningEffort | null = const currentReasoningEffort: ModelReasoningEffort | null =
deriveReasoningEffort(agentState?.model_settings, llmConfig) ?? deriveReasoningEffort(effectiveModelSettings, llmConfig) ??
inferReasoningEffortFromModelPreset(currentModelId, currentModelLabel); inferReasoningEffortFromModelPreset(currentModelId, currentModelLabel);
// Billing tier for conditional UI and error context (fetched once on mount) // Billing tier for conditional UI and error context (fetched once on mount)
@@ -3124,6 +3160,113 @@ export default function App({
} }
}, [loadingState, agentId]); }, [loadingState, agentId]);
// Keep effective model state in sync with the active conversation override.
useEffect(() => {
if (
loadingState !== "ready" ||
!agentId ||
agentId === "loading" ||
!agentState
) {
return;
}
let cancelled = false;
const applyAgentModelLocally = () => {
const agentModelHandle =
agentState.model ??
buildModelHandleFromLlmConfig(agentState.llm_config);
setHasConversationModelOverride(false);
setLlmConfig(agentState.llm_config);
setCurrentModelHandle(agentModelHandle ?? null);
const modelInfo = getModelInfoForLlmConfig(
agentModelHandle || "",
agentState.llm_config as unknown as {
reasoning_effort?: string | null;
enable_reasoner?: boolean | null;
},
);
setCurrentModelId(modelInfo?.id ?? (agentModelHandle || null));
};
const syncConversationModel = async () => {
try {
const client = await getClient();
const conversation =
await client.conversations.retrieve(conversationId);
if (cancelled) return;
const conversationModel = (conversation as { model?: string | null })
.model;
const conversationModelSettings = (
conversation as {
model_settings?: AgentState["model_settings"] | null;
}
).model_settings;
const hasOverride =
conversationModel !== undefined && conversationModel !== null
? true
: conversationModelSettings !== undefined &&
conversationModelSettings !== null;
if (!hasOverride) {
applyAgentModelLocally();
return;
}
const agentModelHandle =
agentState.model ??
buildModelHandleFromLlmConfig(agentState.llm_config);
const effectiveModelHandle = conversationModel ?? agentModelHandle;
if (!effectiveModelHandle) {
applyAgentModelLocally();
return;
}
const reasoningEffort = deriveReasoningEffort(
conversationModelSettings,
agentState.llm_config,
);
setHasConversationModelOverride(true);
setCurrentModelHandle(effectiveModelHandle);
const modelInfo = getModelInfoForLlmConfig(effectiveModelHandle, {
reasoning_effort: reasoningEffort,
enable_reasoner:
(
agentState.llm_config as {
enable_reasoner?: boolean | null;
}
).enable_reasoner ?? null,
});
setCurrentModelId(modelInfo?.id ?? effectiveModelHandle);
setLlmConfig({
...agentState.llm_config,
...mapHandleToLlmConfigPatch(effectiveModelHandle),
...(typeof reasoningEffort === "string"
? { reasoning_effort: reasoningEffort }
: {}),
});
} catch (error) {
if (cancelled) return;
debugLog(
"conversation-model",
"Failed to sync conversation model override: %O",
error,
);
applyAgentModelLocally();
}
};
void syncConversationModel();
return () => {
cancelled = true;
};
}, [agentId, agentState, conversationId, loadingState]);
// Helper to append an error to the transcript // Helper to append an error to the transcript
// Also tracks the error in telemetry so we know an error was shown. // Also tracks the error in telemetry so we know an error was shown.
// Pass `true` or `{ skip: true }` to suppress telemetry (e.g. hint // Pass `true` or `{ skip: true }` to suppress telemetry (e.g. hint
@@ -3982,34 +4125,32 @@ export default function App({
currentEffort !== agentEffort || currentEffort !== agentEffort ||
currentEnableReasoner !== agentEnableReasoner currentEnableReasoner !== agentEnableReasoner
) { ) {
// Model has changed - update local state if (!hasConversationModelOverrideRef.current) {
setLlmConfig(agent.llm_config); // Model has changed at the agent level - update local state.
setLlmConfig(agent.llm_config);
// Derive model ID from llm_config for ModelSelector // Derive model ID from llm_config for ModelSelector.
// Try to find matching model by handle in models.json const agentModelHandle = buildModelHandleFromLlmConfig(
const { getModelInfoForLlmConfig } = await import( agent.llm_config,
"../agent/model" );
);
const agentModelHandle =
agent.llm_config.model_endpoint_type && agent.llm_config.model
? `${agent.llm_config.model_endpoint_type}/${agent.llm_config.model}`
: agent.llm_config.model;
const modelInfo = getModelInfoForLlmConfig( const modelInfo = getModelInfoForLlmConfig(
agentModelHandle || "", agentModelHandle || "",
agent.llm_config as unknown as { agent.llm_config as unknown as {
reasoning_effort?: string | null; reasoning_effort?: string | null;
enable_reasoner?: boolean | null; enable_reasoner?: boolean | null;
}, },
); );
if (modelInfo) { if (modelInfo) {
setCurrentModelId(modelInfo.id); setCurrentModelId(modelInfo.id);
} else { } else {
// Model not in models.json (e.g., BYOK model) - use handle as ID // Model not in models.json (e.g., BYOK model) - use handle as ID
setCurrentModelId(agentModelHandle || null); setCurrentModelId(agentModelHandle || null);
}
setCurrentModelHandle(agentModelHandle || null);
} }
// Also update agent state if other fields changed // Always keep base agent state fresh.
setAgentState(agent); setAgentState(agent);
setAgentDescription(agent.description ?? null); setAgentDescription(agent.description ?? null);
const lastRunCompletion = ( const lastRunCompletion = (
@@ -5665,6 +5806,7 @@ export default function App({
const reasoningCycleLastConfirmedAgentStateRef = useRef<AgentState | null>( const reasoningCycleLastConfirmedAgentStateRef = useRef<AgentState | null>(
null, null,
); );
const reasoningCyclePatchedAgentStateRef = useRef(false);
const resetPendingReasoningCycle = useCallback(() => { const resetPendingReasoningCycle = useCallback(() => {
if (reasoningCycleTimerRef.current) { if (reasoningCycleTimerRef.current) {
@@ -5674,6 +5816,7 @@ export default function App({
reasoningCycleDesiredRef.current = null; reasoningCycleDesiredRef.current = null;
reasoningCycleLastConfirmedRef.current = null; reasoningCycleLastConfirmedRef.current = null;
reasoningCycleLastConfirmedAgentStateRef.current = null; reasoningCycleLastConfirmedAgentStateRef.current = null;
reasoningCyclePatchedAgentStateRef.current = false;
}, []); }, []);
const handleAgentSelect = useCallback( const handleAgentSelect = useCallback(
@@ -10655,33 +10798,42 @@ ${SYSTEM_REMINDER_CLOSE}
phase: "running", phase: "running",
}); });
const { updateAgentLLMConfig } = await import("../agent/modify"); const { updateConversationLLMConfig } = await import(
const updatedAgent = await updateAgentLLMConfig( "../agent/modify"
agentId, );
const updatedConversation = await updateConversationLLMConfig(
conversationIdRef.current,
modelHandle, modelHandle,
model.updateArgs, model.updateArgs,
); );
// The API may not echo reasoning_effort back in llm_config or model_settings.effort, const conversationModelSettings = (
// so populate it from model.updateArgs as a reliable fallback. updatedConversation as {
model_settings?: AgentState["model_settings"] | null;
}
).model_settings;
// The API may not echo reasoning_effort back, so populate it from
// model.updateArgs as a reliable fallback.
const rawEffort = modelUpdateArgs?.reasoning_effort; const rawEffort = modelUpdateArgs?.reasoning_effort;
const resolvedReasoningEffort =
typeof rawEffort === "string"
? rawEffort
: (deriveReasoningEffort(
conversationModelSettings,
llmConfigRef.current,
) ?? null);
setHasConversationModelOverride(true);
setLlmConfig({ setLlmConfig({
...updatedAgent.llm_config, ...(llmConfigRef.current ?? ({} as LlmConfig)),
...(typeof rawEffort === "string" ...mapHandleToLlmConfigPatch(modelHandle),
? { reasoning_effort: rawEffort as ModelReasoningEffort } ...(typeof resolvedReasoningEffort === "string"
? {
reasoning_effort:
resolvedReasoningEffort as ModelReasoningEffort,
}
: {}), : {}),
}); });
// Refresh agentState so model_settings (canonical reasoning effort source) is current.
// Include `model` so currentModelLabel (and the status bar) updates immediately.
setAgentState((prev) =>
prev
? {
...prev,
model: updatedAgent.model,
llm_config: updatedAgent.llm_config,
model_settings: updatedAgent.model_settings,
}
: updatedAgent,
);
setCurrentModelId(modelId); setCurrentModelId(modelId);
// Reset context token tracking since different models have different tokenizers // Reset context token tracking since different models have different tokenizers
@@ -11344,38 +11496,42 @@ ${SYSTEM_REMINDER_CLOSE}
const cmd = commandRunner.start("/reasoning", "Setting reasoning..."); const cmd = commandRunner.start("/reasoning", "Setting reasoning...");
try { try {
const { updateAgentLLMConfig } = await import("../agent/modify"); const { updateConversationLLMConfig } = await import(
const updatedAgent = await updateAgentLLMConfig( "../agent/modify"
agentId, );
const updatedConversation = await updateConversationLLMConfig(
conversationIdRef.current,
desired.modelHandle, desired.modelHandle,
{ {
reasoning_effort: desired.effort, reasoning_effort: desired.effort,
}, },
); );
const conversationModelSettings = (
updatedConversation as {
model_settings?: AgentState["model_settings"] | null;
}
).model_settings;
const resolvedReasoningEffort =
deriveReasoningEffort(
conversationModelSettings,
llmConfigRef.current,
) ?? desired.effort;
// The API may not echo reasoning_effort back; populate from desired.effort. setHasConversationModelOverride(true);
// The API may not echo reasoning_effort back; preserve explicit desired effort.
setLlmConfig({ setLlmConfig({
...updatedAgent.llm_config, ...(llmConfigRef.current ?? ({} as LlmConfig)),
reasoning_effort: desired.effort as ModelReasoningEffort, ...mapHandleToLlmConfigPatch(desired.modelHandle),
reasoning_effort: resolvedReasoningEffort as ModelReasoningEffort,
}); });
// Refresh agentState so model_settings (canonical reasoning effort source) is current.
// Include `model` so currentModelLabel (and the status bar) updates immediately.
setAgentState((prev) =>
prev
? {
...prev,
model: updatedAgent.model,
llm_config: updatedAgent.llm_config,
model_settings: updatedAgent.model_settings,
}
: updatedAgent,
);
setCurrentModelId(desired.modelId); setCurrentModelId(desired.modelId);
setCurrentModelHandle(desired.modelHandle);
// Clear pending state. // Clear pending state.
reasoningCycleDesiredRef.current = null; reasoningCycleDesiredRef.current = null;
reasoningCycleLastConfirmedRef.current = null; reasoningCycleLastConfirmedRef.current = null;
reasoningCycleLastConfirmedAgentStateRef.current = null; reasoningCycleLastConfirmedAgentStateRef.current = null;
reasoningCyclePatchedAgentStateRef.current = false;
const display = const display =
desired.effort === "medium" desired.effort === "medium"
@@ -11395,10 +11551,14 @@ ${SYSTEM_REMINDER_CLOSE}
reasoningCycleLastConfirmedRef.current = null; reasoningCycleLastConfirmedRef.current = null;
setLlmConfig(prev); setLlmConfig(prev);
// Also revert the agentState optimistic patch // Also revert the agentState optimistic patch
if (reasoningCycleLastConfirmedAgentStateRef.current) { if (
reasoningCyclePatchedAgentStateRef.current &&
reasoningCycleLastConfirmedAgentStateRef.current
) {
setAgentState(reasoningCycleLastConfirmedAgentStateRef.current); setAgentState(reasoningCycleLastConfirmedAgentStateRef.current);
reasoningCycleLastConfirmedAgentStateRef.current = null; reasoningCycleLastConfirmedAgentStateRef.current = null;
} }
reasoningCyclePatchedAgentStateRef.current = false;
const { getModelInfo } = await import("../agent/model"); const { getModelInfo } = await import("../agent/model");
const modelHandle = const modelHandle =
@@ -11438,10 +11598,12 @@ ${SYSTEM_REMINDER_CLOSE}
: current?.model; : current?.model;
if (!modelHandle) return; if (!modelHandle) return;
// Derive current effort from agentState.model_settings (canonical) with llmConfig fallback // Derive current effort from effective model settings (conversation override aware)
const modelSettingsForEffort = hasConversationModelOverrideRef.current
? undefined
: agentStateRef.current?.model_settings;
const currentEffort = const currentEffort =
deriveReasoningEffort(agentStateRef.current?.model_settings, current) ?? deriveReasoningEffort(modelSettingsForEffort, current) ?? "none";
"none";
const { models } = await import("../agent/model"); const { models } = await import("../agent/model");
const tiers = models const tiers = models
@@ -11476,54 +11638,61 @@ ${SYSTEM_REMINDER_CLOSE}
if (!reasoningCycleLastConfirmedRef.current) { if (!reasoningCycleLastConfirmedRef.current) {
reasoningCycleLastConfirmedRef.current = current ?? null; reasoningCycleLastConfirmedRef.current = current ?? null;
reasoningCycleLastConfirmedAgentStateRef.current = reasoningCycleLastConfirmedAgentStateRef.current =
agentStateRef.current ?? null; hasConversationModelOverrideRef.current
? null
: (agentStateRef.current ?? null);
} }
// Optimistic UI update (footer changes immediately). // Optimistic UI update (footer changes immediately).
setLlmConfig((prev) => setLlmConfig((prev) =>
prev ? ({ ...prev, reasoning_effort: next.effort } as LlmConfig) : prev, prev ? ({ ...prev, reasoning_effort: next.effort } as LlmConfig) : prev,
); );
// Also patch agentState.model_settings for OpenAI/Anthropic/Bedrock so the footer // Patch agentState.model_settings only when operating on agent defaults.
// (which prefers model_settings) reflects the change without waiting for the server. if (!hasConversationModelOverrideRef.current) {
setAgentState((prev) => { reasoningCyclePatchedAgentStateRef.current = true;
if (!prev) return prev ?? null; setAgentState((prev) => {
const ms = prev.model_settings; if (!prev) return prev ?? null;
if (!ms || !("provider_type" in ms)) return prev; const ms = prev.model_settings;
if (ms.provider_type === "openai") { if (!ms || !("provider_type" in ms)) return prev;
return { if (ms.provider_type === "openai") {
...prev, return {
model_settings: { ...prev,
...ms, model_settings: {
reasoning: { ...ms,
...(ms as { reasoning?: Record<string, unknown> }).reasoning, reasoning: {
reasoning_effort: next.effort as ...(ms as { reasoning?: Record<string, unknown> }).reasoning,
| "none" reasoning_effort: next.effort as
| "minimal" | "none"
| "low" | "minimal"
| "medium" | "low"
| "high" | "medium"
| "xhigh", | "high"
| "xhigh",
},
}, },
}, } as AgentState;
} as AgentState; }
} if (
if ( ms.provider_type === "anthropic" ||
ms.provider_type === "anthropic" || ms.provider_type === "bedrock"
ms.provider_type === "bedrock" ) {
) { // Map "xhigh" → "max": footer derivation only recognizes "max" for Anthropic effort.
// Map "xhigh" → "max": footer derivation only recognizes "max" for Anthropic effort. // Cast needed: "max" is valid on the backend but not yet in the SDK type.
// Cast needed: "max" is valid on the backend but not yet in the SDK type. const anthropicEffort =
const anthropicEffort = next.effort === "xhigh" ? "max" : next.effort; next.effort === "xhigh" ? "max" : next.effort;
return { return {
...prev, ...prev,
model_settings: { model_settings: {
...ms, ...ms,
effort: anthropicEffort as "low" | "medium" | "high" | "max", effort: anthropicEffort as "low" | "medium" | "high" | "max",
}, },
} as AgentState; } as AgentState;
} }
return prev; return prev;
}); });
} else {
reasoningCyclePatchedAgentStateRef.current = false;
}
setCurrentModelId(next.id); setCurrentModelId(next.id);
// Debounce the server update. // Debounce the server update.

View File

@@ -41,6 +41,54 @@ describe("model preset refresh wiring", () => {
); );
}); });
test("modify.ts exposes conversation-scoped model updater", () => {
const path = fileURLToPath(
new URL("../../agent/modify.ts", import.meta.url),
);
const source = readFileSync(path, "utf-8");
const start = source.indexOf(
"export async function updateConversationLLMConfig(",
);
const end = source.indexOf(
"export interface SystemPromptUpdateResult",
start,
);
expect(start).toBeGreaterThanOrEqual(0);
expect(end).toBeGreaterThan(start);
const updateSegment = source.slice(start, end);
expect(updateSegment).toContain(
"buildModelSettings(modelHandle, updateArgs)",
);
expect(updateSegment).toContain(
"Parameters<typeof client.conversations.update>[1]",
);
expect(updateSegment).toContain(
"client.conversations.update(conversationId, payload)",
);
expect(updateSegment).toContain("model: modelHandle");
expect(updateSegment).not.toContain("client.agents.update(");
});
test("/model handler updates conversation model (not agent model)", () => {
const path = fileURLToPath(new URL("../../cli/App.tsx", import.meta.url));
const source = readFileSync(path, "utf-8");
const start = source.indexOf("const handleModelSelect = useCallback(");
const end = source.indexOf(
"const handleSystemPromptSelect = useCallback(",
start,
);
expect(start).toBeGreaterThanOrEqual(0);
expect(end).toBeGreaterThan(start);
const segment = source.slice(start, end);
expect(segment).toContain("updateConversationLLMConfig(");
expect(segment).toContain("conversationIdRef.current");
expect(segment).not.toContain("updateAgentLLMConfig(");
});
test("interactive resume flow refreshes model preset without explicit --model", () => { test("interactive resume flow refreshes model preset without explicit --model", () => {
const path = fileURLToPath(new URL("../../index.ts", import.meta.url)); const path = fileURLToPath(new URL("../../index.ts", import.meta.url));
const source = readFileSync(path, "utf-8"); const source = readFileSync(path, "utf-8");

View File

@@ -46,6 +46,28 @@ describe("reasoning tier cycle wiring", () => {
expect(callbackBlocks.length).toBeGreaterThanOrEqual(2); expect(callbackBlocks.length).toBeGreaterThanOrEqual(2);
}); });
test("flush uses conversation-scoped reasoning updates", () => {
const appPath = fileURLToPath(
new URL("../../cli/App.tsx", import.meta.url),
);
const source = readFileSync(appPath, "utf-8");
const start = source.indexOf(
"const flushPendingReasoningEffort = useCallback(",
);
const end = source.indexOf(
"const handleCycleReasoningEffort = useCallback(",
start,
);
expect(start).toBeGreaterThanOrEqual(0);
expect(end).toBeGreaterThan(start);
const segment = source.slice(start, end);
expect(segment).toContain("updateConversationLLMConfig(");
expect(segment).toContain("conversationIdRef.current");
expect(segment).not.toContain("updateAgentLLMConfig(");
});
test("tab-based reasoning cycling is opt-in only", () => { test("tab-based reasoning cycling is opt-in only", () => {
const appPath = fileURLToPath( const appPath = fileURLToPath(
new URL("../../cli/App.tsx", import.meta.url), new URL("../../cli/App.tsx", import.meta.url),