chore: Merge OSS (#512)

This commit is contained in:
Matthew Zhou
2025-01-06 08:46:53 -10:00
committed by GitHub
parent 61bbc8a2a5
commit 4fd04c63fe
12 changed files with 38 additions and 15 deletions

View File

@@ -1,4 +1,4 @@
__version__ = "0.6.6"
__version__ = "0.6.7"
# import clients
from letta.client.client import LocalClient, RESTClient, create_client

View File

@@ -224,8 +224,8 @@ class Agent(BaseAgent):
)
function_response, updated_agent_state = sandbox_run_result.func_return, sandbox_run_result.agent_state
assert orig_memory_str == self.agent_state.memory.compile(), "Memory should not be modified in a sandbox tool"
self.update_memory_if_change(updated_agent_state.memory)
if updated_agent_state is not None:
self.update_memory_if_change(updated_agent_state.memory)
except Exception as e:
# Need to catch error here, or else trunction wont happen
# TODO: modify to function execution error
@@ -238,7 +238,7 @@ class Agent(BaseAgent):
def _get_ai_reply(
self,
message_sequence: List[Message],
function_call: str = "auto",
function_call: Optional[str] = None,
first_message: bool = False,
stream: bool = False, # TODO move to config?
empty_response_retry_limit: int = 3,
@@ -1029,6 +1029,7 @@ class Agent(BaseAgent):
num_archival_memory=agent_manager_passage_size,
num_recall_memory=message_manager_size,
num_tokens_external_memory_summary=num_tokens_external_memory_summary,
external_memory_summary=external_memory_summary,
# top-level information
context_window_size_max=self.agent_state.llm_config.context_window,
context_window_size_current=num_tokens_used_total,

View File

@@ -60,7 +60,6 @@ def list(arg: Annotated[ListChoice, typer.Argument]):
table.field_names = ["Name", "Text"]
for human in client.list_humans():
table.add_row([human.template_name, human.value.replace("\n", "")[:100]])
print(table)
elif arg == ListChoice.personas:
"""List all personas"""
table.field_names = ["Name", "Text"]

View File

@@ -250,6 +250,8 @@ def unpack_all_inner_thoughts_from_kwargs(
def unpack_inner_thoughts_from_kwargs(choice: Choice, inner_thoughts_key: str) -> Choice:
message = choice.message
rewritten_choice = choice # inner thoughts unpacked out of the function
if message.role == "assistant" and message.tool_calls and len(message.tool_calls) >= 1:
if len(message.tool_calls) > 1:
warnings.warn(f"Unpacking inner thoughts from more than one tool call ({len(message.tool_calls)}) is not supported")
@@ -271,14 +273,18 @@ def unpack_inner_thoughts_from_kwargs(choice: Choice, inner_thoughts_key: str) -
warnings.warn(f"Overwriting existing inner monologue ({new_choice.message.content}) with kwarg ({inner_thoughts})")
new_choice.message.content = inner_thoughts
return new_choice
# update the choice object
rewritten_choice = new_choice
else:
warnings.warn(f"Did not find inner thoughts in tool call: {str(tool_call)}")
return choice
except json.JSONDecodeError as e:
warnings.warn(f"Failed to strip inner thoughts from kwargs: {e}")
raise e
else:
warnings.warn(f"Did not find tool call in message: {str(message)}")
return rewritten_choice
def is_context_overflow_error(exception: Union[requests.exceptions.RequestException, Exception]) -> bool:

View File

@@ -94,7 +94,7 @@ def create(
user_id: Optional[str] = None, # option UUID to associate request with
functions: Optional[list] = None,
functions_python: Optional[dict] = None,
function_call: str = "auto",
function_call: Optional[str] = None, # see: https://platform.openai.com/docs/api-reference/chat/create#chat-create-tool_choice
# hint
first_message: bool = False,
force_tool_call: Optional[str] = None, # Force a specific tool to be called
@@ -132,10 +132,19 @@ def create(
# openai
if llm_config.model_endpoint_type == "openai":
if model_settings.openai_api_key is None and llm_config.model_endpoint == "https://api.openai.com/v1":
# only is a problem if we are *not* using an openai proxy
raise LettaConfigurationError(message="OpenAI key is missing from letta config file", missing_fields=["openai_api_key"])
if function_call is None and functions is not None and len(functions) > 0:
# force function calling for reliability, see https://platform.openai.com/docs/api-reference/chat/create#chat-create-tool_choice
# TODO(matt) move into LLMConfig
if llm_config.model_endpoint == "https://inference.memgpt.ai":
function_call = "auto" # TODO change to "required" once proxy supports it
else:
function_call = "required"
data = build_openai_chat_completions_request(llm_config, messages, user_id, functions, function_call, use_tool_naming, max_tokens)
if stream: # Client requested token streaming
data.stream = True

View File

@@ -66,7 +66,7 @@ class LettaResponse(BaseModel):
return f'<div class="content"><span class="function-name">{html.escape(msg.function_call.name)}</span>({args})</div>'
elif msg.message_type == "tool_call_message":
args = format_json(msg.tool_call.arguments)
return f'<div class="content"><span class="function-name">{html.escape(msg.function_call.name)}</span>({args})</div>'
return f'<div class="content"><span class="function-name">{html.escape(msg.tool_call.name)}</span>({args})</div>'
elif msg.message_type == "function_return":
return_value = format_json(msg.function_return)
# return f'<div class="status-line">Status: {html.escape(msg.status)}</div><div class="content">{return_value}</div>'

View File

@@ -30,6 +30,9 @@ class ContextWindowOverview(BaseModel):
num_tokens_external_memory_summary: int = Field(
..., description="The number of tokens in the external memory summary (archival + recall metadata)."
)
external_memory_summary: str = Field(
..., description="The metadata summary of the external memory sources (archival + recall metadata)."
)
# context window breakdown (in tokens)
# this should all add up to context_window_size_current

View File

@@ -388,7 +388,7 @@ class AgentManager:
curr_memory_str = agent_state.memory.compile()
if curr_memory_str in curr_system_message_openai["content"] and not force:
# NOTE: could this cause issues if a block is removed? (substring match would still work)
logger.info(
logger.debug(
f"Memory hasn't changed for agent id={agent_id} and actor=({actor.id}, {actor.name}), skipping system prompt rebuild"
)
return agent_state

View File

@@ -1,6 +1,6 @@
[tool.poetry]
name = "letta"
version = "0.6.6"
version = "0.6.7"
packages = [
{include = "letta"}
]

View File

@@ -117,7 +117,11 @@ def check_first_response_is_valid_for_llm_endpoint(filename: str) -> ChatComplet
choice = response.choices[0]
# Ensure that the first message returns a "send_message"
validator_func = lambda function_call: function_call.name == "send_message" or function_call.name == "archival_memory_search"
validator_func = (
lambda function_call: function_call.name == "send_message"
or function_call.name == "archival_memory_search"
or function_call.name == "core_memory_append"
)
assert_contains_valid_function_call(choice.message, validator_func)
# Assert that the message has an inner monologue

View File

@@ -38,7 +38,7 @@ def second_secret_word(prev_secret_word: str):
prev_secret_word (str): The secret word retrieved from calling first_secret_word.
"""
if prev_secret_word != "v0iq020i0g":
raise RuntimeError(f"Expected secret {"v0iq020i0g"}, got {prev_secret_word}")
raise RuntimeError(f"Expected secret {'v0iq020i0g'}, got {prev_secret_word}")
return "4rwp2b4gxq"
@@ -51,7 +51,7 @@ def third_secret_word(prev_secret_word: str):
prev_secret_word (str): The secret word retrieved from calling second_secret_word.
"""
if prev_secret_word != "4rwp2b4gxq":
raise RuntimeError(f"Expected secret {"4rwp2b4gxq"}, got {prev_secret_word}")
raise RuntimeError(f'Expected secret "4rwp2b4gxq", got {prev_secret_word}')
return "hj2hwibbqm"
@@ -64,7 +64,7 @@ def fourth_secret_word(prev_secret_word: str):
prev_secret_word (str): The secret word retrieved from calling third_secret_word.
"""
if prev_secret_word != "hj2hwibbqm":
raise RuntimeError(f"Expected secret {"hj2hwibbqm"}, got {prev_secret_word}")
raise RuntimeError(f"Expected secret {'hj2hwibbqm'}, got {prev_secret_word}")
return "banana"

View File

@@ -508,6 +508,7 @@ def test_get_context_window_overview(server: SyncServer, user, agent_id):
assert overview.num_archival_memory is not None
assert overview.num_recall_memory is not None
assert overview.num_tokens_external_memory_summary is not None
assert overview.external_memory_summary is not None
assert overview.num_tokens_system is not None
assert overview.system_prompt is not None
assert overview.num_tokens_core_memory is not None