feat: letta code
This commit is contained in:
355
src/cli/helpers/accumulator.ts
Normal file
355
src/cli/helpers/accumulator.ts
Normal file
@@ -0,0 +1,355 @@
|
||||
// src/cli/accumulator.ts
|
||||
// Minimal, token-aware accumulator for Letta streams.
|
||||
// - Single transcript via { order[], byId: Map }.
|
||||
// - Tool calls update in-place (same toolCallId for call+return).
|
||||
// - Exposes `onChunk` to feed SDK events and `toLines` to render.
|
||||
|
||||
import type { Letta } from "@letta-ai/letta-client";
|
||||
|
||||
// One line per transcript row. Tool calls evolve in-place.
|
||||
// For tool call returns, merge into the tool call matching the toolCallId
|
||||
export type Line =
|
||||
| { kind: "user"; id: string; text: string }
|
||||
| {
|
||||
kind: "reasoning";
|
||||
id: string;
|
||||
text: string;
|
||||
phase: "streaming" | "finished";
|
||||
}
|
||||
| {
|
||||
kind: "assistant";
|
||||
id: string;
|
||||
text: string;
|
||||
phase: "streaming" | "finished";
|
||||
}
|
||||
| {
|
||||
kind: "tool_call";
|
||||
id: string;
|
||||
// from the tool call object
|
||||
// toolCallId and name should come in the very first chunk
|
||||
toolCallId?: string;
|
||||
name?: string;
|
||||
argsText?: string;
|
||||
// from the tool return object
|
||||
resultText?: string;
|
||||
resultOk?: boolean;
|
||||
// state that's useful for rendering
|
||||
phase: "streaming" | "ready" | "running" | "finished";
|
||||
}
|
||||
| { kind: "error"; id: string; text: string }
|
||||
| {
|
||||
kind: "command";
|
||||
id: string;
|
||||
input: string;
|
||||
output: string;
|
||||
phase?: "running" | "finished";
|
||||
success?: boolean;
|
||||
};
|
||||
|
||||
// Top-level state object for all streaming events
|
||||
export type Buffers = {
|
||||
tokenCount: number;
|
||||
order: string[];
|
||||
byId: Map<string, Line>;
|
||||
pendingToolByRun: Map<string, string>; // temporary id per run until real id
|
||||
toolCallIdToLineId: Map<string, string>;
|
||||
lastOtid: string | null; // Track the last otid to detect transitions
|
||||
pendingRefresh?: boolean; // Track throttled refresh state
|
||||
};
|
||||
|
||||
export function createBuffers(): Buffers {
|
||||
return {
|
||||
tokenCount: 0,
|
||||
order: [],
|
||||
byId: new Map(),
|
||||
pendingToolByRun: new Map(),
|
||||
toolCallIdToLineId: new Map(),
|
||||
lastOtid: null,
|
||||
};
|
||||
}
|
||||
|
||||
// Guarantees that there's only one line per ID
|
||||
// If byId already has that id, returns the Line (for mutation)
|
||||
// If not, makes a new line and adds it
|
||||
function ensure<T extends Line>(b: Buffers, id: string, make: () => T): T {
|
||||
const existing = b.byId.get(id) as T | undefined;
|
||||
if (existing) return existing;
|
||||
const created = make();
|
||||
b.byId.set(id, created);
|
||||
b.order.push(id);
|
||||
return created;
|
||||
}
|
||||
|
||||
// Mark a line as finished if it has a phase (immutable update)
|
||||
function markAsFinished(b: Buffers, id: string) {
|
||||
const line = b.byId.get(id);
|
||||
// console.log(`[MARK_FINISHED] Called for ${id}, line exists: ${!!line}, kind: ${line?.kind}, phase: ${(line as any)?.phase}`);
|
||||
if (line && "phase" in line && line.phase === "streaming") {
|
||||
const updatedLine = { ...line, phase: "finished" as const };
|
||||
b.byId.set(id, updatedLine);
|
||||
// console.log(`[MARK_FINISHED] Successfully marked ${id} as finished`);
|
||||
} else {
|
||||
// console.log(`[MARK_FINISHED] Did NOT mark ${id} as finished (conditions not met)`);
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to mark previous otid's line as finished when transitioning to new otid
|
||||
function handleOtidTransition(b: Buffers, newOtid: string | undefined) {
|
||||
// console.log(`[OTID_TRANSITION] Called with newOtid=${newOtid}, lastOtid=${b.lastOtid}`);
|
||||
|
||||
// If transitioning to a different otid (including null/undefined), finish only assistant/reasoning lines.
|
||||
// Tool calls should finish exclusively when a tool_return arrives (merged by toolCallId).
|
||||
if (b.lastOtid && b.lastOtid !== newOtid) {
|
||||
const prev = b.byId.get(b.lastOtid);
|
||||
// console.log(`[OTID_TRANSITION] Found prev line: kind=${prev?.kind}, phase=${(prev as any)?.phase}`);
|
||||
if (prev && (prev.kind === "assistant" || prev.kind === "reasoning")) {
|
||||
// console.log(`[OTID_TRANSITION] Marking ${b.lastOtid} as finished (was ${(prev as any).phase})`);
|
||||
markAsFinished(b, b.lastOtid);
|
||||
}
|
||||
}
|
||||
|
||||
// Update last otid (can be null)
|
||||
b.lastOtid = newOtid ?? null;
|
||||
// console.log(`[OTID_TRANSITION] Updated lastOtid to ${b.lastOtid}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark the current (last) line as finished when the stream ends.
|
||||
* Call this after stream completion to ensure the final line isn't stuck in "streaming" state.
|
||||
*/
|
||||
export function markCurrentLineAsFinished(b: Buffers) {
|
||||
// console.log(`[MARK_CURRENT_FINISHED] Called with lastOtid=${b.lastOtid}`);
|
||||
if (!b.lastOtid) {
|
||||
// console.log(`[MARK_CURRENT_FINISHED] No lastOtid, returning`);
|
||||
return;
|
||||
}
|
||||
// Try both the plain otid and the -tool suffix (in case of collision workaround)
|
||||
const prev = b.byId.get(b.lastOtid) || b.byId.get(`${b.lastOtid}-tool`);
|
||||
// console.log(`[MARK_CURRENT_FINISHED] Found line: kind=${prev?.kind}, phase=${(prev as any)?.phase}`);
|
||||
if (prev && (prev.kind === "assistant" || prev.kind === "reasoning")) {
|
||||
// console.log(`[MARK_CURRENT_FINISHED] Marking ${b.lastOtid} as finished`);
|
||||
markAsFinished(b, b.lastOtid);
|
||||
} else {
|
||||
// console.log(`[MARK_CURRENT_FINISHED] Not marking (not assistant/reasoning or doesn't exist)`);
|
||||
}
|
||||
}
|
||||
|
||||
type ToolCallLine = Extract<Line, { kind: "tool_call" }>;
|
||||
|
||||
// Flatten common SDK "parts" → text
|
||||
function isRecord(v: unknown): v is Record<string, unknown> {
|
||||
return v !== null && typeof v === "object";
|
||||
}
|
||||
function getStringProp(obj: Record<string, unknown>, key: string) {
|
||||
const v = obj[key];
|
||||
return typeof v === "string" ? v : undefined;
|
||||
}
|
||||
function extractTextPart(v: unknown): string {
|
||||
if (typeof v === "string") return v;
|
||||
if (Array.isArray(v)) {
|
||||
return v
|
||||
.map((p) => (isRecord(p) ? (getStringProp(p, "text") ?? "") : ""))
|
||||
.join("");
|
||||
}
|
||||
if (isRecord(v)) {
|
||||
return getStringProp(v, "text") ?? getStringProp(v, "delta") ?? "";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
// Feed one SDK chunk; mutate buffers in place.
|
||||
export function onChunk(
|
||||
b: Buffers,
|
||||
chunk: Letta.agents.LettaStreamingResponse,
|
||||
) {
|
||||
switch (chunk.messageType) {
|
||||
case "reasoning_message": {
|
||||
const id = chunk.otid;
|
||||
// console.log(`[REASONING] Received chunk with otid=${id}, delta="${chunk.reasoning?.substring(0, 50)}..."`);
|
||||
if (!id) {
|
||||
// console.log(`[REASONING] No otid, breaking`);
|
||||
break;
|
||||
}
|
||||
|
||||
// Handle otid transition (mark previous line as finished)
|
||||
handleOtidTransition(b, id);
|
||||
|
||||
const delta = chunk.reasoning;
|
||||
const line = ensure(b, id, () => ({
|
||||
kind: "reasoning",
|
||||
id,
|
||||
text: "",
|
||||
phase: "streaming",
|
||||
}));
|
||||
if (delta) {
|
||||
// Immutable update: create new object with updated text
|
||||
const updatedLine = { ...line, text: line.text + delta };
|
||||
b.byId.set(id, updatedLine);
|
||||
b.tokenCount += delta.length;
|
||||
// console.log(`[REASONING] Updated ${id}, phase=${updatedLine.phase}, textLen=${updatedLine.text.length}`);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case "assistant_message": {
|
||||
const id = chunk.otid;
|
||||
if (!id) break;
|
||||
|
||||
// Handle otid transition (mark previous line as finished)
|
||||
handleOtidTransition(b, id);
|
||||
|
||||
const delta = extractTextPart(chunk.content); // NOTE: may be list of parts
|
||||
const line = ensure(b, id, () => ({
|
||||
kind: "assistant",
|
||||
id,
|
||||
text: "",
|
||||
phase: "streaming",
|
||||
}));
|
||||
if (delta) {
|
||||
// Immutable update: create new object with updated text
|
||||
const updatedLine = { ...line, text: line.text + delta };
|
||||
b.byId.set(id, updatedLine);
|
||||
b.tokenCount += delta.length;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case "tool_call_message":
|
||||
case "approval_request_message": {
|
||||
/* POST-FIX VERSION (what this should look like after backend fix):
|
||||
const id = chunk.otid;
|
||||
|
||||
// Handle otid transition (mark previous line as finished)
|
||||
handleOtidTransition(b, id);
|
||||
|
||||
if (!id) break;
|
||||
|
||||
const toolCallId = chunk.toolCall?.toolCallId;
|
||||
const name = chunk.toolCall?.name;
|
||||
const argsText = chunk.toolCall?.arguments;
|
||||
|
||||
// Record correlation: toolCallId → line id (otid)
|
||||
if (toolCallId) b.toolCallIdToLineId.set(toolCallId, id);
|
||||
*/
|
||||
|
||||
let id = chunk.otid;
|
||||
// console.log(`[TOOL_CALL] Received ${chunk.messageType} with otid=${id}, toolCallId=${chunk.toolCall?.toolCallId}, name=${chunk.toolCall?.name}`);
|
||||
|
||||
const toolCallId = chunk.toolCall?.toolCallId;
|
||||
const name = chunk.toolCall?.name;
|
||||
const argsText = chunk.toolCall?.arguments;
|
||||
|
||||
// ========== START BACKEND BUG WORKAROUND (Remove after OTID fix) ==========
|
||||
// Bug: Backend sends same otid for reasoning and tool_call, and multiple otids for same tool_call
|
||||
|
||||
// Check if we already have a line for this toolCallId (prevents duplicates)
|
||||
if (toolCallId && b.toolCallIdToLineId.has(toolCallId)) {
|
||||
// Update the existing line instead of creating a new one
|
||||
const existingId = b.toolCallIdToLineId.get(toolCallId);
|
||||
if (existingId) {
|
||||
id = existingId;
|
||||
}
|
||||
|
||||
// Handle otid transition for tracking purposes
|
||||
handleOtidTransition(b, chunk.otid);
|
||||
} else {
|
||||
// Check if this otid is already used by a reasoning line
|
||||
if (id && b.byId.has(id)) {
|
||||
const existing = b.byId.get(id);
|
||||
if (existing && existing.kind === "reasoning") {
|
||||
// Mark the reasoning as finished before we create the tool_call
|
||||
markAsFinished(b, id);
|
||||
// Use a different ID for the tool_call to avoid overwriting the reasoning
|
||||
id = `${id}-tool`;
|
||||
}
|
||||
}
|
||||
// ========== END BACKEND BUG WORKAROUND ==========
|
||||
|
||||
// This part stays after fix:
|
||||
// Handle otid transition (mark previous line as finished)
|
||||
// This must happen BEFORE the break, so reasoning gets finished even when tool has no otid
|
||||
handleOtidTransition(b, id);
|
||||
|
||||
if (!id) {
|
||||
// console.log(`[TOOL_CALL] No otid, breaking`);
|
||||
break;
|
||||
}
|
||||
|
||||
// Record correlation: toolCallId → line id (otid) for future updates
|
||||
if (toolCallId) b.toolCallIdToLineId.set(toolCallId, id);
|
||||
}
|
||||
|
||||
const desiredPhase =
|
||||
chunk.messageType === "approval_request_message"
|
||||
? "ready"
|
||||
: "streaming";
|
||||
const line = ensure<ToolCallLine>(b, id, () => ({
|
||||
kind: "tool_call",
|
||||
id,
|
||||
toolCallId: toolCallId,
|
||||
name: name,
|
||||
phase: desiredPhase,
|
||||
}));
|
||||
|
||||
// If this is an approval request and the line already exists, bump phase to ready
|
||||
if (
|
||||
chunk.messageType === "approval_request_message" &&
|
||||
line.phase !== "finished"
|
||||
) {
|
||||
b.byId.set(id, { ...line, phase: "ready" });
|
||||
}
|
||||
|
||||
// if argsText is not empty, add it to the line (immutable update)
|
||||
if (argsText !== undefined) {
|
||||
const updatedLine = {
|
||||
...line,
|
||||
argsText: (line.argsText ?? "") + argsText,
|
||||
};
|
||||
b.byId.set(id, updatedLine);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case "tool_return_message": {
|
||||
// Tool return is a special case
|
||||
// It will have a different otid than the tool call, but we want to merge into the tool call
|
||||
const toolCallId = chunk.toolCallId;
|
||||
const resultText = chunk.toolReturn;
|
||||
const status = chunk.status;
|
||||
|
||||
// Look up the line by toolCallId
|
||||
// Keep a mapping of toolCallId to line id (otid)
|
||||
const id = toolCallId ? b.toolCallIdToLineId.get(toolCallId) : undefined;
|
||||
if (!id) break;
|
||||
|
||||
const line = ensure<ToolCallLine>(b, id, () => ({
|
||||
kind: "tool_call",
|
||||
id,
|
||||
phase: "finished",
|
||||
}));
|
||||
|
||||
// Immutable update: create new object with result
|
||||
const updatedLine = {
|
||||
...line,
|
||||
resultText,
|
||||
phase: "finished" as const,
|
||||
resultOk: status === "success",
|
||||
};
|
||||
b.byId.set(id, updatedLine);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
break; // ignore ping/usage/etc
|
||||
}
|
||||
}
|
||||
|
||||
// Derive a flat transcript
|
||||
export function toLines(b: Buffers): Line[] {
|
||||
const out: Line[] = [];
|
||||
for (const id of b.order) {
|
||||
const line = b.byId.get(id);
|
||||
if (line) out.push(line);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
157
src/cli/helpers/backfill.ts
Normal file
157
src/cli/helpers/backfill.ts
Normal file
@@ -0,0 +1,157 @@
|
||||
import type { Letta } from "@letta-ai/letta-client";
|
||||
import type { Buffers } from "./accumulator";
|
||||
|
||||
// const PASTE_LINE_THRESHOLD = 5;
|
||||
// const PASTE_CHAR_THRESHOLD = 500;
|
||||
const CLIP_CHAR_LIMIT_TEXT = 500;
|
||||
// const CLIP_CHAR_LIMIT_JSON = 1000;
|
||||
|
||||
// function countLines(text: string): number {
|
||||
// return (text.match(/\r\n|\r|\n/g) || []).length + 1;
|
||||
// }
|
||||
|
||||
function clip(s: string, limit: number): string {
|
||||
if (!s) return "";
|
||||
return s.length > limit ? `${s.slice(0, limit)}…` : s;
|
||||
}
|
||||
|
||||
function renderAssistantContentParts(
|
||||
parts: Letta.AssistantMessageContent,
|
||||
): string {
|
||||
// AssistantContent can be a string or an array of text parts
|
||||
if (typeof parts === "string") return parts;
|
||||
let out = "";
|
||||
for (const p of parts) {
|
||||
if (p.type === "text") {
|
||||
out += p.text || "";
|
||||
}
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
function renderUserContentParts(parts: Letta.UserMessageContent): string {
|
||||
// UserContent can be a string or an array of text OR image parts
|
||||
// for text parts, we clip them if they're too big (eg copy-pasted chunks)
|
||||
// for image parts, we just show a placeholder
|
||||
if (typeof parts === "string") return parts;
|
||||
|
||||
let out = "";
|
||||
for (const p of parts) {
|
||||
if (p.type === "text") {
|
||||
const text = p.text || "";
|
||||
out += clip(text, CLIP_CHAR_LIMIT_TEXT);
|
||||
} else if (p.type === "image") {
|
||||
out += `[Image]`;
|
||||
}
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
export function backfillBuffers(
|
||||
buffers: Buffers,
|
||||
history: Letta.LettaMessageUnion[],
|
||||
): void {
|
||||
// Clear buffers to ensure idempotency (in case this is called multiple times)
|
||||
buffers.order = [];
|
||||
buffers.byId.clear();
|
||||
buffers.toolCallIdToLineId.clear();
|
||||
buffers.pendingToolByRun.clear();
|
||||
buffers.lastOtid = null;
|
||||
// Note: we don't reset tokenCount here (it resets per-turn in onSubmit)
|
||||
|
||||
// Iterate over the history and add the messages to the buffers
|
||||
// Want to add user, reasoning, assistant, tool call + tool return
|
||||
for (const msg of history) {
|
||||
// Use otid as line ID when available (like streaming does), fall back to msg.id
|
||||
const lineId = "otid" in msg && msg.otid ? msg.otid : msg.id;
|
||||
|
||||
switch (msg.messageType) {
|
||||
// user message - content parts may include text and image parts
|
||||
case "user_message": {
|
||||
const exists = buffers.byId.has(lineId);
|
||||
buffers.byId.set(lineId, {
|
||||
kind: "user",
|
||||
id: lineId,
|
||||
text: renderUserContentParts(msg.content),
|
||||
});
|
||||
if (!exists) buffers.order.push(lineId);
|
||||
break;
|
||||
}
|
||||
|
||||
// reasoning message -
|
||||
case "reasoning_message": {
|
||||
const exists = buffers.byId.has(lineId);
|
||||
buffers.byId.set(lineId, {
|
||||
kind: "reasoning",
|
||||
id: lineId,
|
||||
text: msg.reasoning,
|
||||
phase: "finished",
|
||||
});
|
||||
if (!exists) buffers.order.push(lineId);
|
||||
break;
|
||||
}
|
||||
|
||||
// assistant message - content parts may include text and image parts
|
||||
case "assistant_message": {
|
||||
const exists = buffers.byId.has(lineId);
|
||||
buffers.byId.set(lineId, {
|
||||
kind: "assistant",
|
||||
id: lineId,
|
||||
text: renderAssistantContentParts(msg.content),
|
||||
phase: "finished",
|
||||
});
|
||||
if (!exists) buffers.order.push(lineId);
|
||||
break;
|
||||
}
|
||||
|
||||
// tool call message OR approval request (they're the same in history)
|
||||
case "tool_call_message":
|
||||
case "approval_request_message": {
|
||||
if ("toolCall" in msg && msg.toolCall?.toolCallId) {
|
||||
const toolCall = msg.toolCall;
|
||||
const toolCallId = toolCall.toolCallId;
|
||||
const exists = buffers.byId.has(lineId);
|
||||
|
||||
buffers.byId.set(lineId, {
|
||||
kind: "tool_call",
|
||||
id: lineId,
|
||||
toolCallId: toolCallId,
|
||||
name: toolCall.name,
|
||||
argsText: toolCall.arguments,
|
||||
phase: "ready",
|
||||
});
|
||||
if (!exists) buffers.order.push(lineId);
|
||||
|
||||
// Maintain mapping for tool return to find this line
|
||||
buffers.toolCallIdToLineId.set(toolCallId, lineId);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// tool return message - merge into the existing tool call line
|
||||
case "tool_return_message": {
|
||||
const toolCallId = msg.toolCallId;
|
||||
if (!toolCallId) break;
|
||||
|
||||
// Look up the line using the mapping (like streaming does)
|
||||
const toolCallLineId = buffers.toolCallIdToLineId.get(toolCallId);
|
||||
if (!toolCallLineId) break;
|
||||
|
||||
const existingLine = buffers.byId.get(toolCallLineId);
|
||||
if (!existingLine || existingLine.kind !== "tool_call") break;
|
||||
|
||||
// Update the existing line with the result
|
||||
buffers.byId.set(toolCallLineId, {
|
||||
...existingLine,
|
||||
resultText: msg.toolReturn,
|
||||
resultOk: msg.status === "success",
|
||||
phase: "finished",
|
||||
});
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
break; // ignore other message types
|
||||
}
|
||||
}
|
||||
}
|
||||
170
src/cli/helpers/clipboard.ts
Normal file
170
src/cli/helpers/clipboard.ts
Normal file
@@ -0,0 +1,170 @@
|
||||
// Clipboard utilities for detecting and importing images from system clipboard
|
||||
import { execFileSync } from "node:child_process";
|
||||
import { existsSync, readFileSync, statSync } from "node:fs";
|
||||
import { basename, extname, isAbsolute, resolve } from "node:path";
|
||||
import { allocateImage } from "./pasteRegistry";
|
||||
|
||||
const IMAGE_EXTS = new Set([
|
||||
".png",
|
||||
".jpg",
|
||||
".jpeg",
|
||||
".gif",
|
||||
".webp",
|
||||
".bmp",
|
||||
".svg",
|
||||
".tif",
|
||||
".tiff",
|
||||
".heic",
|
||||
".heif",
|
||||
".avif",
|
||||
]);
|
||||
|
||||
function countLines(text: string): number {
|
||||
return (text.match(/\r\n|\r|\n/g) || []).length + 1;
|
||||
}
|
||||
|
||||
// Translate various image paste formats into [Image #N] placeholders
|
||||
export function translatePasteForImages(paste: string): string {
|
||||
let s = paste || "";
|
||||
|
||||
// 1) iTerm2 OSC 1337 inline file transfer: ESC ] 1337;File=...:BASE64 <BEL or ST>
|
||||
try {
|
||||
// Build regex via code points to avoid control chars in literal
|
||||
const ESC = "\u001B";
|
||||
const BEL = "\u0007";
|
||||
const ST = `${ESC}\\`; // ESC \
|
||||
const pattern = `${ESC}]1337;File=([^${BEL}${ESC}]*):([\\s\\S]*?)(?:${BEL}|${ST})`;
|
||||
const OSC = new RegExp(pattern, "g");
|
||||
s = s.replace(OSC, (_m, paramsStr: string, base64: string) => {
|
||||
const params: Record<string, string> = {};
|
||||
for (const seg of String(paramsStr || "").split(";")) {
|
||||
const [k, v] = seg.split("=");
|
||||
if (k && v)
|
||||
params[k.trim().toLowerCase()] = decodeURIComponent(v.trim());
|
||||
}
|
||||
const name = params.name || undefined;
|
||||
const mt = params.type || params.mime || "application/octet-stream";
|
||||
const id = allocateImage({ data: base64, mediaType: mt, filename: name });
|
||||
return `[Image #${id}]`;
|
||||
});
|
||||
} catch {}
|
||||
|
||||
// 2) Data URL images
|
||||
try {
|
||||
const DATA_URL = /data:image\/([a-zA-Z0-9.+-]+);base64,([A-Za-z0-9+/=]+)/g;
|
||||
s = s.replace(DATA_URL, (_m, subtype: string, b64: string) => {
|
||||
const mt = `image/${subtype}`;
|
||||
const id = allocateImage({ data: b64, mediaType: mt });
|
||||
return `[Image #${id}]`;
|
||||
});
|
||||
} catch {}
|
||||
|
||||
// 3) Single image file path paste
|
||||
try {
|
||||
const trimmed = s.trim();
|
||||
const singleLine = countLines(trimmed) <= 1;
|
||||
if (singleLine) {
|
||||
let filePath = trimmed;
|
||||
if (/^file:\/\//i.test(filePath)) {
|
||||
try {
|
||||
// Decode file:// URL
|
||||
const u = new URL(filePath);
|
||||
filePath = decodeURIComponent(u.pathname);
|
||||
// On Windows, pathname starts with /C:/
|
||||
if (process.platform === "win32" && /^\/[A-Za-z]:\//.test(filePath)) {
|
||||
filePath = filePath.slice(1);
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
// If relative, resolve against CWD
|
||||
if (!isAbsolute(filePath)) filePath = resolve(process.cwd(), filePath);
|
||||
const ext = extname(filePath || "").toLowerCase();
|
||||
if (
|
||||
IMAGE_EXTS.has(ext) &&
|
||||
existsSync(filePath) &&
|
||||
statSync(filePath).isFile()
|
||||
) {
|
||||
const buf = readFileSync(filePath);
|
||||
const b64 = buf.toString("base64");
|
||||
const mt =
|
||||
ext === ".png"
|
||||
? "image/png"
|
||||
: ext === ".jpg" || ext === ".jpeg"
|
||||
? "image/jpeg"
|
||||
: ext === ".gif"
|
||||
? "image/gif"
|
||||
: ext === ".webp"
|
||||
? "image/webp"
|
||||
: ext === ".bmp"
|
||||
? "image/bmp"
|
||||
: ext === ".svg"
|
||||
? "image/svg+xml"
|
||||
: ext === ".tif" || ext === ".tiff"
|
||||
? "image/tiff"
|
||||
: ext === ".heic"
|
||||
? "image/heic"
|
||||
: ext === ".heif"
|
||||
? "image/heif"
|
||||
: ext === ".avif"
|
||||
? "image/avif"
|
||||
: "application/octet-stream";
|
||||
const id = allocateImage({
|
||||
data: b64,
|
||||
mediaType: mt,
|
||||
filename: basename(filePath),
|
||||
});
|
||||
s = `[Image #${id}]`;
|
||||
}
|
||||
}
|
||||
} catch {}
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
// Attempt to import an image directly from OS clipboard on macOS via JXA (built-in)
|
||||
export function tryImportClipboardImageMac(): string | null {
|
||||
if (process.platform !== "darwin") return null;
|
||||
try {
|
||||
const jxa = `
|
||||
ObjC.import('AppKit');
|
||||
(function() {
|
||||
var pb = $.NSPasteboard.generalPasteboard;
|
||||
var types = ['public.png','public.jpeg','public.tiff','public.heic','public.heif','public.bmp','public.gif','public.svg-image'];
|
||||
for (var i = 0; i < types.length; i++) {
|
||||
var t = types[i];
|
||||
var d = pb.dataForType(t);
|
||||
if (d) {
|
||||
var b64 = d.base64EncodedStringWithOptions(0).js;
|
||||
return t + '|' + b64;
|
||||
}
|
||||
}
|
||||
return '';
|
||||
})();
|
||||
`;
|
||||
const out = execFileSync("osascript", ["-l", "JavaScript", "-e", jxa], {
|
||||
encoding: "utf8",
|
||||
stdio: ["ignore", "pipe", "ignore"],
|
||||
}).trim();
|
||||
if (!out) return null;
|
||||
const idx = out.indexOf("|");
|
||||
if (idx <= 0) return null;
|
||||
const uti = out.slice(0, idx);
|
||||
const b64 = out.slice(idx + 1);
|
||||
if (!b64) return null;
|
||||
const map: Record<string, string> = {
|
||||
"public.png": "image/png",
|
||||
"public.jpeg": "image/jpeg",
|
||||
"public.tiff": "image/tiff",
|
||||
"public.heic": "image/heic",
|
||||
"public.heif": "image/heif",
|
||||
"public.bmp": "image/bmp",
|
||||
"public.gif": "image/gif",
|
||||
"public.svg-image": "image/svg+xml",
|
||||
};
|
||||
const mediaType = map[uti] || "image/png";
|
||||
const id = allocateImage({ data: b64, mediaType });
|
||||
return `[Image #${id}]`;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
193
src/cli/helpers/diff.ts
Normal file
193
src/cli/helpers/diff.ts
Normal file
@@ -0,0 +1,193 @@
|
||||
import { basename } from "node:path";
|
||||
import * as Diff from "diff";
|
||||
|
||||
export const ADV_DIFF_CONTEXT_LINES = 1; // easy to adjust later
|
||||
export const ADV_DIFF_IGNORE_WHITESPACE = true; // easy to flip later
|
||||
|
||||
export type AdvancedDiffVariant = "write" | "edit" | "multi_edit";
|
||||
|
||||
export interface AdvancedEditInput {
|
||||
kind: "edit";
|
||||
filePath: string;
|
||||
oldString: string;
|
||||
newString: string;
|
||||
replaceAll?: boolean;
|
||||
}
|
||||
|
||||
export interface AdvancedWriteInput {
|
||||
kind: "write";
|
||||
filePath: string;
|
||||
content: string;
|
||||
}
|
||||
|
||||
export interface AdvancedMultiEditInput {
|
||||
kind: "multi_edit";
|
||||
filePath: string;
|
||||
edits: Array<{
|
||||
old_string: string;
|
||||
new_string: string;
|
||||
replace_all?: boolean;
|
||||
}>;
|
||||
}
|
||||
|
||||
export type AdvancedDiffInput =
|
||||
| AdvancedEditInput
|
||||
| AdvancedWriteInput
|
||||
| AdvancedMultiEditInput;
|
||||
|
||||
export interface AdvancedHunkLine {
|
||||
raw: string; // original line from structuredPatch (includes prefix)
|
||||
}
|
||||
|
||||
export interface AdvancedHunk {
|
||||
oldStart: number;
|
||||
newStart: number;
|
||||
lines: AdvancedHunkLine[]; // pass through; renderer will compute numbers/word pairs
|
||||
}
|
||||
|
||||
export interface AdvancedDiffSuccess {
|
||||
mode: "advanced";
|
||||
fileName: string;
|
||||
oldStr: string;
|
||||
newStr: string;
|
||||
hunks: AdvancedHunk[];
|
||||
}
|
||||
|
||||
export interface AdvancedDiffFallback {
|
||||
mode: "fallback";
|
||||
reason: string;
|
||||
}
|
||||
|
||||
export interface AdvancedDiffUnpreviewable {
|
||||
mode: "unpreviewable";
|
||||
reason: string;
|
||||
}
|
||||
|
||||
export type AdvancedDiffResult =
|
||||
| AdvancedDiffSuccess
|
||||
| AdvancedDiffFallback
|
||||
| AdvancedDiffUnpreviewable;
|
||||
|
||||
function readFileOrNull(p: string): string | null {
|
||||
try {
|
||||
const _file = Bun.file(p);
|
||||
// Note: Bun.file().text() is async, but we need sync for diff preview
|
||||
// Fall back to node:fs for sync reading
|
||||
return require("node:fs").readFileSync(p, "utf-8");
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function applyFirstOccurrence(
|
||||
content: string,
|
||||
oldStr: string,
|
||||
newStr: string,
|
||||
): { ok: true; out: string } | { ok: false; reason: string } {
|
||||
const idx = content.indexOf(oldStr);
|
||||
if (idx === -1) return { ok: false, reason: "old_string not found" };
|
||||
const out =
|
||||
content.slice(0, idx) + newStr + content.slice(idx + oldStr.length);
|
||||
return { ok: true, out };
|
||||
}
|
||||
|
||||
function applyAllOccurrences(
|
||||
content: string,
|
||||
oldStr: string,
|
||||
newStr: string,
|
||||
): { ok: true; out: string } | { ok: false; reason: string } {
|
||||
if (!oldStr) return { ok: false, reason: "old_string empty" };
|
||||
const occurrences = content.split(oldStr).length - 1;
|
||||
if (occurrences === 0) return { ok: false, reason: "old_string not found" };
|
||||
return { ok: true, out: content.split(oldStr).join(newStr) };
|
||||
}
|
||||
|
||||
export function computeAdvancedDiff(
|
||||
input: AdvancedDiffInput,
|
||||
opts?: { oldStrOverride?: string },
|
||||
): AdvancedDiffResult {
|
||||
const fileName = basename(input.filePath || "");
|
||||
|
||||
// Fetch current content (oldStr). For write on new file, treat missing as '' and continue.
|
||||
const fileContent =
|
||||
opts?.oldStrOverride !== undefined
|
||||
? opts.oldStrOverride
|
||||
: readFileOrNull(input.filePath);
|
||||
if (fileContent === null && input.kind !== "write") {
|
||||
return { mode: "fallback", reason: "File not readable" };
|
||||
}
|
||||
|
||||
const oldStr = fileContent ?? "";
|
||||
let newStr = oldStr;
|
||||
|
||||
if (input.kind === "write") {
|
||||
newStr = input.content;
|
||||
} else if (input.kind === "edit") {
|
||||
const replaceAll = !!input.replaceAll;
|
||||
const applied = replaceAll
|
||||
? applyAllOccurrences(oldStr, input.oldString, input.newString)
|
||||
: applyFirstOccurrence(oldStr, input.oldString, input.newString);
|
||||
if (!applied.ok) {
|
||||
return {
|
||||
mode: "unpreviewable",
|
||||
reason: `Edit cannot be previewed: ${applied.reason}`,
|
||||
};
|
||||
}
|
||||
newStr = applied.out;
|
||||
} else if (input.kind === "multi_edit") {
|
||||
let working = oldStr;
|
||||
for (const e of input.edits) {
|
||||
const replaceAll = !!e.replace_all;
|
||||
if (replaceAll) {
|
||||
const occ = working.split(e.old_string).length - 1;
|
||||
if (occ === 0)
|
||||
return { mode: "unpreviewable", reason: "Edit not found in file" };
|
||||
const res = applyAllOccurrences(working, e.old_string, e.new_string);
|
||||
if (!res.ok)
|
||||
return {
|
||||
mode: "unpreviewable",
|
||||
reason: `Edit cannot be previewed: ${res.reason}`,
|
||||
};
|
||||
working = res.out;
|
||||
} else {
|
||||
const occ = working.split(e.old_string).length - 1;
|
||||
if (occ === 0)
|
||||
return { mode: "unpreviewable", reason: "Edit not found in file" };
|
||||
if (occ > 1)
|
||||
return {
|
||||
mode: "unpreviewable",
|
||||
reason: `Multiple matches (${occ}), replace_all=false`,
|
||||
};
|
||||
const res = applyFirstOccurrence(working, e.old_string, e.new_string);
|
||||
if (!res.ok)
|
||||
return {
|
||||
mode: "unpreviewable",
|
||||
reason: `Edit cannot be previewed: ${res.reason}`,
|
||||
};
|
||||
working = res.out;
|
||||
}
|
||||
}
|
||||
newStr = working;
|
||||
}
|
||||
|
||||
const patch = Diff.structuredPatch(
|
||||
fileName,
|
||||
fileName,
|
||||
oldStr,
|
||||
newStr,
|
||||
"Current",
|
||||
"Proposed",
|
||||
{
|
||||
context: ADV_DIFF_CONTEXT_LINES,
|
||||
ignoreWhitespace: ADV_DIFF_IGNORE_WHITESPACE,
|
||||
},
|
||||
);
|
||||
|
||||
const hunks: AdvancedHunk[] = patch.hunks.map((h) => ({
|
||||
oldStart: h.oldStart,
|
||||
newStart: h.newStart,
|
||||
lines: h.lines.map((l) => ({ raw: l })),
|
||||
}));
|
||||
|
||||
return { mode: "advanced", fileName, oldStr, newStr, hunks };
|
||||
}
|
||||
69
src/cli/helpers/formatArgsDisplay.ts
Normal file
69
src/cli/helpers/formatArgsDisplay.ts
Normal file
@@ -0,0 +1,69 @@
|
||||
// Utility to format tool argument JSON strings into a concise display label
|
||||
// Copied from old letta-code repo to preserve exact formatting behavior
|
||||
|
||||
// Small helpers
|
||||
const isRecord = (v: unknown): v is Record<string, unknown> =>
|
||||
typeof v === "object" && v !== null;
|
||||
|
||||
export function formatArgsDisplay(argsJson: string): {
|
||||
display: string;
|
||||
parsed: Record<string, unknown>;
|
||||
} {
|
||||
let parsed: Record<string, unknown> = {};
|
||||
let display = "…";
|
||||
try {
|
||||
if (argsJson?.trim()) {
|
||||
const p = JSON.parse(argsJson);
|
||||
if (isRecord(p)) {
|
||||
// Drop noisy keys for display
|
||||
const clone: Record<string, unknown> = { ...p } as Record<
|
||||
string,
|
||||
unknown
|
||||
>;
|
||||
if ("request_heartbeat" in clone) delete clone.request_heartbeat;
|
||||
parsed = clone;
|
||||
const keys = Object.keys(parsed);
|
||||
if (
|
||||
keys.length === 1 &&
|
||||
["query", "path", "file_path", "command", "label"].includes(keys[0])
|
||||
) {
|
||||
const v = parsed[keys[0]];
|
||||
display = typeof v === "string" ? v : String(v);
|
||||
} else {
|
||||
display = Object.entries(parsed)
|
||||
.map(([k, v]) => {
|
||||
if (v === undefined || v === null) return `${k}=${v}`;
|
||||
if (typeof v === "boolean" || typeof v === "number")
|
||||
return `${k}=${v}`;
|
||||
if (typeof v === "string")
|
||||
return v.length > 50 ? `${k}=…` : `${k}="${v}"`;
|
||||
if (Array.isArray(v)) return `${k}=[${v.length} items]`;
|
||||
if (typeof v === "object")
|
||||
return `${k}={${Object.keys(v as Record<string, unknown>).length} props}`;
|
||||
const str = JSON.stringify(v);
|
||||
return str.length > 50 ? `${k}=…` : `${k}=${str}`;
|
||||
})
|
||||
.join(", ");
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Fallback: try to extract common keys without full JSON parse
|
||||
try {
|
||||
const s = argsJson || "";
|
||||
const fp = /"file_path"\s*:\s*"([^"]+)"/.exec(s);
|
||||
const old = /"old_string"\s*:\s*"([\s\S]*?)"\s*(,|\})/.exec(s);
|
||||
const neu = /"new_string"\s*:\s*"([\s\S]*?)"\s*(,|\})/.exec(s);
|
||||
const cont = /"content"\s*:\s*"([\s\S]*?)"\s*(,|\})/.exec(s);
|
||||
const parts: string[] = [];
|
||||
if (fp) parts.push(`file_path="${fp[1]}"`);
|
||||
if (old) parts.push(`old_string=…`);
|
||||
if (neu) parts.push(`new_string=…`);
|
||||
if (cont) parts.push(`content=…`);
|
||||
if (parts.length) display = parts.join(", ");
|
||||
} catch {
|
||||
// If all else fails, use the ellipsis
|
||||
}
|
||||
}
|
||||
return { display, parsed };
|
||||
}
|
||||
162
src/cli/helpers/pasteRegistry.ts
Normal file
162
src/cli/helpers/pasteRegistry.ts
Normal file
@@ -0,0 +1,162 @@
|
||||
// Clipboard paste registry - manages mappings from placeholders to actual content
|
||||
// Supports both large text pastes and image pastes (multi-modal)
|
||||
|
||||
export interface ImageEntry {
|
||||
data: string; // base64
|
||||
mediaType: string;
|
||||
filename?: string;
|
||||
}
|
||||
|
||||
// Text placeholder registry (for large pasted text collapsed into a placeholder)
|
||||
const textRegistry = new Map<number, string>();
|
||||
|
||||
// Image placeholder registry (maps id -> base64 + mediaType)
|
||||
const imageRegistry = new Map<number, ImageEntry>();
|
||||
|
||||
let nextId = 1;
|
||||
|
||||
// ---------- Text placeholders ----------
|
||||
|
||||
export function allocatePaste(content: string): number {
|
||||
const id = nextId++;
|
||||
textRegistry.set(id, content);
|
||||
return id;
|
||||
}
|
||||
|
||||
export function resolvePlaceholders(text: string): string {
|
||||
if (!text) return text;
|
||||
return text.replace(
|
||||
/\[Pasted text #(\d+) \+(\d+) lines\]/g,
|
||||
(_match, idStr) => {
|
||||
const id = Number(idStr);
|
||||
const content = textRegistry.get(id);
|
||||
return content !== undefined ? content : _match;
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
export function extractTextPlaceholderIds(text: string): number[] {
|
||||
const ids: number[] = [];
|
||||
if (!text) return ids;
|
||||
const re = /\[Pasted text #(\d+) \+(\d+) lines\]/g;
|
||||
let match: RegExpExecArray | null;
|
||||
// biome-ignore lint/suspicious/noAssignInExpressions: Standard pattern for regex matching
|
||||
while ((match = re.exec(text)) !== null) {
|
||||
const id = Number(match[1]);
|
||||
if (!Number.isNaN(id)) ids.push(id);
|
||||
}
|
||||
return ids;
|
||||
}
|
||||
|
||||
export function hasAnyTextPlaceholders(text: string): boolean {
|
||||
return /\[Pasted text #\d+ \+\d+ lines\]/.test(text || "");
|
||||
}
|
||||
|
||||
// ---------- Image placeholders ----------
|
||||
|
||||
export function allocateImage(args: {
|
||||
data: string;
|
||||
mediaType: string;
|
||||
filename?: string;
|
||||
}): number {
|
||||
const id = nextId++;
|
||||
imageRegistry.set(id, {
|
||||
data: args.data,
|
||||
mediaType: args.mediaType,
|
||||
filename: args.filename,
|
||||
});
|
||||
return id;
|
||||
}
|
||||
|
||||
export function getImage(id: number): ImageEntry | undefined {
|
||||
return imageRegistry.get(id);
|
||||
}
|
||||
|
||||
export function extractImagePlaceholderIds(text: string): number[] {
|
||||
const ids: number[] = [];
|
||||
if (!text) return ids;
|
||||
const re = /\[Image #(\d+)\]/g;
|
||||
let match: RegExpExecArray | null;
|
||||
// biome-ignore lint/suspicious/noAssignInExpressions: Standard pattern for regex matching
|
||||
while ((match = re.exec(text)) !== null) {
|
||||
const id = Number(match[1]);
|
||||
if (!Number.isNaN(id)) ids.push(id);
|
||||
}
|
||||
return ids;
|
||||
}
|
||||
|
||||
export function hasAnyImagePlaceholders(text: string): boolean {
|
||||
return /\[Image #\d+\]/.test(text || "");
|
||||
}
|
||||
|
||||
// ---------- Cleanup ----------
|
||||
|
||||
export function clearPlaceholdersInText(text: string): void {
|
||||
// Clear text placeholders referenced in this text
|
||||
for (const id of extractTextPlaceholderIds(text)) {
|
||||
if (textRegistry.has(id)) textRegistry.delete(id);
|
||||
}
|
||||
// Clear image placeholders referenced in this text
|
||||
for (const id of extractImagePlaceholderIds(text)) {
|
||||
if (imageRegistry.has(id)) imageRegistry.delete(id);
|
||||
}
|
||||
}
|
||||
|
||||
// ---------- Content Builder ----------
|
||||
|
||||
// Convert display text (with placeholders) into Letta content parts
|
||||
// Text placeholders are resolved; image placeholders become image content
|
||||
type Base64ImageSource = { type: "base64"; mediaType: string; data: string };
|
||||
type ContentPart =
|
||||
| { type: "text"; text: string }
|
||||
| { type: "image"; source: Base64ImageSource };
|
||||
|
||||
export function buildMessageContentFromDisplay(text: string): ContentPart[] {
|
||||
const parts: ContentPart[] = [];
|
||||
if (!text) return [{ type: "text", text: "" }];
|
||||
|
||||
const re = /\[Image #(\d+)\]/g;
|
||||
let lastIdx = 0;
|
||||
let match: RegExpExecArray | null;
|
||||
|
||||
const pushText = (s: string) => {
|
||||
if (!s) return;
|
||||
const resolved = resolvePlaceholders(s);
|
||||
if (resolved.length === 0) return;
|
||||
const prev = parts[parts.length - 1];
|
||||
if (prev && prev.type === "text") {
|
||||
prev.text = (prev.text || "") + resolved;
|
||||
} else {
|
||||
parts.push({ type: "text", text: resolved });
|
||||
}
|
||||
};
|
||||
|
||||
// biome-ignore lint/suspicious/noAssignInExpressions: Standard pattern for regex matching
|
||||
while ((match = re.exec(text)) !== null) {
|
||||
const start = match.index;
|
||||
const end = start + match[0].length;
|
||||
const before = text.slice(lastIdx, start);
|
||||
pushText(before);
|
||||
const id = Number(match[1]);
|
||||
const img = getImage(id);
|
||||
if (img?.data) {
|
||||
parts.push({
|
||||
type: "image",
|
||||
source: {
|
||||
type: "base64",
|
||||
mediaType: img.mediaType || "image/jpeg",
|
||||
data: img.data,
|
||||
},
|
||||
});
|
||||
} else {
|
||||
// If mapping missing, keep the literal placeholder as text
|
||||
pushText(match[0]);
|
||||
}
|
||||
lastIdx = end;
|
||||
}
|
||||
// Remainder
|
||||
pushText(text.slice(lastIdx));
|
||||
|
||||
if (parts.length === 0) return [{ type: "text", text }];
|
||||
return parts;
|
||||
}
|
||||
25
src/cli/helpers/safeJsonParse.ts
Normal file
25
src/cli/helpers/safeJsonParse.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
/**
|
||||
* Safe JSON parser that never throws
|
||||
* Returns parsed value on success, or null on failure
|
||||
*/
|
||||
export function safeJsonParse<T = unknown>(
|
||||
json: string,
|
||||
): { success: true; data: T } | { success: false; error: string } {
|
||||
try {
|
||||
const data = JSON.parse(json) as T;
|
||||
return { success: true, data };
|
||||
} catch (error) {
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Safe JSON parser that returns the parsed value or a default value
|
||||
*/
|
||||
export function safeJsonParseOr<T>(json: string, defaultValue: T): T {
|
||||
const result = safeJsonParse<T>(json);
|
||||
return result.success ? result.data : defaultValue;
|
||||
}
|
||||
104
src/cli/helpers/stream.ts
Normal file
104
src/cli/helpers/stream.ts
Normal file
@@ -0,0 +1,104 @@
|
||||
import { Letta } from "@letta-ai/letta-client";
|
||||
import {
|
||||
type createBuffers,
|
||||
markCurrentLineAsFinished,
|
||||
onChunk,
|
||||
} from "./accumulator";
|
||||
|
||||
export type ApprovalRequest = {
|
||||
toolCallId: string;
|
||||
toolName: string;
|
||||
toolArgs: string;
|
||||
};
|
||||
|
||||
type DrainResult = {
|
||||
stopReason: Letta.StopReasonType;
|
||||
lastRunId?: string | null;
|
||||
lastSeqId?: number | null;
|
||||
approval?: ApprovalRequest | null; // present only if we ended due to approval
|
||||
};
|
||||
|
||||
export async function drainStream(
|
||||
stream: AsyncIterable<Letta.LettaStreamingResponse>,
|
||||
buffers: ReturnType<typeof createBuffers>,
|
||||
refresh: () => void,
|
||||
): Promise<DrainResult> {
|
||||
let approvalRequestId: string | null = null;
|
||||
let toolCallId: string | null = null;
|
||||
let toolName: string | null = null;
|
||||
let toolArgs: string | null = null;
|
||||
|
||||
let stopReason: Letta.StopReasonType | null = null;
|
||||
let lastRunId: string | null = null;
|
||||
let lastSeqId: number | null = null;
|
||||
|
||||
for await (const chunk of stream) {
|
||||
// Store the runId and seqId to re-connect if stream is interrupted
|
||||
if ("runId" in chunk && "seqId" in chunk && chunk.runId && chunk.seqId) {
|
||||
lastRunId = chunk.runId;
|
||||
lastSeqId = chunk.seqId;
|
||||
}
|
||||
|
||||
if (chunk.messageType === "ping") continue;
|
||||
|
||||
// Need to store the approval request ID to send an approval in a new run
|
||||
if (chunk.messageType === "approval_request_message") {
|
||||
approvalRequestId = chunk.id;
|
||||
}
|
||||
|
||||
// NOTE: this this a little ugly - we're basically processing tool name and chunk deltas
|
||||
// in both the onChunk handler and here, we could refactor to instead pull the tool name
|
||||
// and JSON args from the mutated lines (eg last mutated line)
|
||||
if (
|
||||
chunk.messageType === "tool_call_message" ||
|
||||
chunk.messageType === "approval_request_message"
|
||||
) {
|
||||
if (chunk.toolCall?.toolCallId) {
|
||||
toolCallId = chunk.toolCall.toolCallId;
|
||||
}
|
||||
if (chunk.toolCall?.name) {
|
||||
if (toolName) {
|
||||
// TODO would expect that we should allow stacking? I guess not?
|
||||
// toolName = toolName + chunk.toolCall.name;
|
||||
} else {
|
||||
toolName = chunk.toolCall.name;
|
||||
}
|
||||
}
|
||||
if (chunk.toolCall?.arguments) {
|
||||
if (toolArgs) {
|
||||
toolArgs = toolArgs + chunk.toolCall.arguments;
|
||||
} else {
|
||||
toolArgs = chunk.toolCall.arguments;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
onChunk(buffers, chunk);
|
||||
queueMicrotask(refresh);
|
||||
|
||||
if (chunk.messageType === "stop_reason") {
|
||||
stopReason = chunk.stopReason;
|
||||
break; // end of turn
|
||||
}
|
||||
}
|
||||
|
||||
// Mark the final line as finished now that stream has ended
|
||||
markCurrentLineAsFinished(buffers);
|
||||
queueMicrotask(refresh);
|
||||
|
||||
// Package the approval request at the end
|
||||
const approval =
|
||||
toolCallId && toolName && toolArgs && approvalRequestId
|
||||
? {
|
||||
toolCallId: toolCallId,
|
||||
toolName: toolName,
|
||||
toolArgs: toolArgs,
|
||||
}
|
||||
: null;
|
||||
|
||||
if (!stopReason) {
|
||||
stopReason = Letta.StopReasonType.Error;
|
||||
}
|
||||
|
||||
return { stopReason, approval, lastRunId, lastSeqId };
|
||||
}
|
||||
41
src/cli/helpers/thinkingMessages.ts
Normal file
41
src/cli/helpers/thinkingMessages.ts
Normal file
@@ -0,0 +1,41 @@
|
||||
// Machine god AI themed thinking messages
|
||||
const THINKING_MESSAGES = [
|
||||
"Thinking",
|
||||
"Processing",
|
||||
"Computing",
|
||||
"Calculating",
|
||||
"Analyzing",
|
||||
"Synthesizing",
|
||||
"Deliberating",
|
||||
"Cogitating",
|
||||
"Reflecting",
|
||||
"Reasoning",
|
||||
"Spinning",
|
||||
"Focusing",
|
||||
"Machinating",
|
||||
"Contemplating",
|
||||
"Ruminating",
|
||||
"Considering",
|
||||
"Pondering",
|
||||
"Evaluating",
|
||||
"Assessing",
|
||||
"Inferring",
|
||||
"Deducing",
|
||||
"Interpreting",
|
||||
"Formulating",
|
||||
"Strategizing",
|
||||
"Orchestrating",
|
||||
"Optimizing",
|
||||
"Calibrating",
|
||||
"Indexing",
|
||||
"Compiling",
|
||||
"Rendering",
|
||||
"Executing",
|
||||
"Initializing",
|
||||
] as const;
|
||||
|
||||
// Get a random thinking message
|
||||
export function getRandomThinkingMessage(): string {
|
||||
const index = Math.floor(Math.random() * THINKING_MESSAGES.length);
|
||||
return THINKING_MESSAGES[index] ?? "Thinking";
|
||||
}
|
||||
Reference in New Issue
Block a user