feat: add Python autodocs (#1703)

This commit is contained in:
Sarah Wooders
2024-09-05 11:55:37 -07:00
committed by GitHub
parent f7fdf7cc8f
commit 1c4405087e
21 changed files with 2781 additions and 201 deletions

132
docs/generate_docs.py Normal file
View File

@@ -0,0 +1,132 @@
import os
from pydoc_markdown import PydocMarkdown
from pydoc_markdown.contrib.loaders.python import PythonLoader
from pydoc_markdown.contrib.processors.crossref import CrossrefProcessor
from pydoc_markdown.contrib.processors.filter import FilterProcessor
from pydoc_markdown.contrib.processors.smart import SmartProcessor
from pydoc_markdown.contrib.renderers.markdown import MarkdownRenderer
def generate_config(package):
config = PydocMarkdown(
loaders=[PythonLoader(packages=[package])],
processors=[FilterProcessor(skip_empty_modules=True), CrossrefProcessor(), SmartProcessor()],
renderer=MarkdownRenderer(
render_module_header=False,
descriptive_class_title=False,
),
)
return config
def generate_modules(config):
modules = config.load_modules()
config.process(modules)
return modules
folder = "/Users/sarahwooders/repos/mintlify-docs/python-reference"
# Generate client documentation. This takes the documentation from the AbstractClient, but then appends the documentation from the LocalClient and RESTClient.
config = generate_config("memgpt.client")
modules = generate_modules(config)
## Get members from AbstractClient
##for module in generate_modules(config):
# for module in modules:
# client_members = [m for m in module.members if m.name == "AbstractClient"]
# if len(client_members) > 0:
# break
#
# client_members = client_members[0].members
# print(client_members)
# Add members and render for LocalClient and RESTClient
# config = generate_config("memgpt.client")
for module_name in ["LocalClient", "RESTClient"]:
for module in generate_modules(config):
# for module in modules:
members = [m for m in module.members if m.name == module_name]
if len(members) > 0:
print(module_name)
# module.members = members + client_members
# print(module_name, members)
module.members = members
open(os.path.join(folder, f"{module_name}.mdx"), "w").write(config.renderer.render_to_string([module]))
break
# Documentation of schemas
schema_config = generate_config("memgpt.schemas")
schema_models = [
"MemGPTBase",
"MemGPTConfig",
"Message",
"Passage",
"AgentState",
"Document",
"Source",
"LLMConfig",
"EmbeddingConfig",
"MemGPTRequest",
"MemGPTResponse",
["MemGPTMessage", "FunctionCallMessage", "FunctionReturn", "InternalMonologue"],
"MemGPTUsageStatistics",
["Memory", "BasicBlockMemory", "ChatMemory"],
"Block",
# ["Job", "JobStatus"],
"Job",
"Tool",
"User",
]
for module_name in schema_models:
for module in generate_modules(schema_config):
if isinstance(module_name, list):
# multiple objects in the same file
members = [m for m in module.members if m.name in module_name]
title = module_name[0]
else:
# single object in a file
members = [m for m in module.members if m.name == module_name]
title = module_name
if len(members) > 0:
print(module_name)
module.members = members
open(os.path.join(folder, f"{title}.mdx"), "w").write(config.renderer.render_to_string([module]))
break
# Documentation for connectors
connectors = ["DataConnector", "DirectoryConnector"]
connector_config = generate_config("memgpt.data_sources")
for module_name in connectors:
for module in generate_modules(connector_config):
members = [m for m in module.members if m.name == module_name]
if len(members) > 0:
print(module_name)
module.members = members
open(os.path.join(folder, f"{module_name}.mdx"), "w").write(config.renderer.render_to_string([module]))
break
## TODO: append the rendering from LocalClient and RESTClient from AbstractClient
#
## TODO: add documentation of schemas
#
# for module in modules:
# print(module.name, type(module))
# print(module)
#
# #module_name = "AbstractClient"
# #members = [m for m in module.members if m.name == module_name]
# #print([m.name for m in members])
# #module.members = members
#
# if "__" in module.name:
# continue
# #if len(members) > 0:
# # open(os.path.join(folder, f"{module_name}.md"), "w").write(config.renderer.render_to_string([module]))
# open(os.path.join(folder, f"{module.name}.md"), "w").write(config.renderer.render_to_string([module]))

1351
docs/markdown/index.md Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1 @@
docutils>=0.18
furo
myst-parser
mkdocs
mkdocs-material
pymdown-extensions
pydoc-markdown

File diff suppressed because it is too large Load Diff

View File

@@ -12,11 +12,29 @@ from memgpt.utils import create_uuid_from_string
class DataConnector:
"""
Base class for data connectors that can be extended to generate documents and passages from a custom data source.
"""
def generate_documents(self) -> Iterator[Tuple[str, Dict]]: # -> Iterator[Document]:
pass
"""
Generate document text and metadata from a data source.
Returns:
documents (Iterator[Tuple[str, Dict]]): Generate a tuple of string text and metadata dictionary for each document.
"""
def generate_passages(self, documents: List[Document], chunk_size: int = 1024) -> Iterator[Tuple[str, Dict]]: # -> Iterator[Passage]:
pass
"""
Generate passage text and metadata from a list of documents.
Args:
documents (List[Document]): List of documents to generate passages from.
chunk_size (int, optional): Chunk size for splitting passages. Defaults to 1024.
Returns:
passages (Iterator[Tuple[str, Dict]]): Generate a tuple of string text and metadata dictionary for each passage.
"""
def load_data(
@@ -50,7 +68,6 @@ def load_data(
# generate passages
for passage_text, passage_metadata in connector.generate_passages([document], chunk_size=embedding_config.embedding_chunk_size):
# for some reason, llama index parsers sometimes return empty strings
if len(passage_text) == 0:
typer.secho(
@@ -108,6 +125,15 @@ def load_data(
class DirectoryConnector(DataConnector):
def __init__(self, input_files: List[str] = None, input_directory: str = None, recursive: bool = False, extensions: List[str] = None):
"""
Connector for reading text data from a directory of files.
Args:
input_files (List[str], optional): List of file paths to read. Defaults to None.
input_directory (str, optional): Directory to read files from. Defaults to None.
recursive (bool, optional): Whether to read files recursively from the input directory. Defaults to False.
extensions (List[str], optional): List of file extensions to read. Defaults to None.
"""
self.connector_type = "directory"
self.input_files = input_files
self.input_directory = input_directory

View File

@@ -20,7 +20,21 @@ class BaseAgent(MemGPTBase, validate_assignment=True):
class AgentState(BaseAgent):
"""Representation of an agent's state."""
"""
Representation of an agent's state. This is the state of the agent at a given time, and is persisted in the DB backend. The state has all the information needed to recreate a persisted agent.
Parameters:
id (str): The unique identifier of the agent.
name (str): The name of the agent (must be unique to the user).
created_at (datetime): The datetime the agent was created.
message_ids (List[str]): The ids of the messages in the agent's in-context memory.
memory (Memory): The in-context memory of the agent.
tools (List[str]): The tools used by the agent. This includes any memory editing functions specified in `memory`.
system (str): The system prompt used by the agent.
llm_config (LLMConfig): The LLM configuration used by the agent.
embedding_config (EmbeddingConfig): The embedding configuration used by the agent.
"""
id: str = BaseAgent.generate_id_field()
name: str = Field(..., description="The name of the agent.")

View File

@@ -59,7 +59,19 @@ class BaseBlock(MemGPTBase, validate_assignment=True):
class Block(BaseBlock):
"""Block of the LLM context"""
"""
A Block represents a reserved section of the LLM's context window which is editable. `Block` objects contained in the `Memory` object, which is able to edit the Block values.
Parameters:
name (str): The name of the block.
value (str): The value of the block. This is the string that is represented in the context window.
limit (int): The character limit of the block.
template (bool): Whether the block is a template (e.g. saved human/persona options). Non-template blocks are not stored in the database and are ephemeral, while templated blocks are stored in the database.
label (str): The label of the block (e.g. 'human', 'persona'). This defines a category for the block.
description (str): Description of the block.
metadata_ (Dict): Metadata of the block.
user_id (str): The unique identifier of the user associated with the block.
"""
id: str = BaseBlock.generate_id_field()
value: str = Field(..., description="Value of the block.")

View File

@@ -4,7 +4,21 @@ from pydantic import BaseModel, Field
class EmbeddingConfig(BaseModel):
"""Embedding model configuration"""
"""
Embedding model configuration. This object specifies all the information necessary to access an embedding model to usage with MemGPT, except for secret keys.
Attributes:
embedding_endpoint_type (str): The endpoint type for the model.
embedding_endpoint (str): The endpoint for the model.
embedding_model (str): The model for the embedding.
embedding_dim (int): The dimension of the embedding.
embedding_chunk_size (int): The chunk size of the embedding.
azure_endpoint (:obj:`str`, optional): The Azure endpoint for the model (Azure only).
azure_version (str): The Azure version for the model (Azure only).
azure_deployment (str): The Azure deployment for the model (Azure only).
"""
embedding_endpoint_type: str = Field(..., description="The endpoint type for the model.")
embedding_endpoint: Optional[str] = Field(None, description="The endpoint for the model (`None` if local).")

View File

@@ -18,6 +18,10 @@ class OptionState(str, Enum):
class JobStatus(str, Enum):
"""
Status of the job.
"""
created = "created"
running = "running"
completed = "completed"

View File

@@ -14,7 +14,17 @@ class JobBase(MemGPTBase):
class Job(JobBase):
"""Representation of offline jobs."""
"""
Representation of offline jobs, used for tracking status of data loading tasks (involving parsing and embedding documents).
Parameters:
id (str): The unique identifier of the job.
status (JobStatus): The status of the job.
created_at (datetime): The unix timestamp of when the job was created.
completed_at (datetime): The unix timestamp of when the job was completed.
user_id (str): The unique identifier of the user associated with the.
"""
id: str = JobBase.generate_id_field()
status: JobStatus = Field(default=JobStatus.created, description="The status of the job.")

View File

@@ -4,6 +4,17 @@ from pydantic import BaseModel, ConfigDict, Field
class LLMConfig(BaseModel):
"""
Configuration for a Language Model (LLM) model. This object specifies all the information necessary to access an LLM model to usage with MemGPT, except for secret keys.
Attributes:
model (str): The name of the LLM model.
model_endpoint_type (str): The endpoint type for the model.
model_endpoint (str): The endpoint for the model.
model_wrapper (str): The wrapper for the model.
context_window (int): The context window size for the model.
"""
# TODO: 🤮 don't default to a vendor! bug city!
model: str = Field(..., description="LLM model name. ")
model_endpoint_type: str = Field(..., description="The endpoint type for the model.")

View File

@@ -7,7 +7,16 @@ from pydantic import BaseModel, field_serializer, field_validator
# MemGPT API style responses (intended to be easier to use vs getting true Message types)
class BaseMemGPTMessage(BaseModel):
class MemGPTMessage(BaseModel):
"""
Base class for simplified MemGPT message response type. This is intended to be used for developers who want the internal monologue, function calls, and function returns in a simplified format that does not include additional information other than the content and timestamp.
Attributes:
id (str): The ID of the message
date (datetime): The date the message was created in ISO format
"""
id: str
date: datetime
@@ -20,13 +29,14 @@ class BaseMemGPTMessage(BaseModel):
return dt.isoformat(timespec="seconds")
class InternalMonologue(BaseMemGPTMessage):
class InternalMonologue(MemGPTMessage):
"""
{
"internal_monologue": msg,
"date": msg_obj.created_at.isoformat() if msg_obj is not None else get_utc_time().isoformat(),
"id": str(msg_obj.id) if msg_obj is not None else None,
}
Representation of an agent's internal monologue.
Attributes:
internal_monologue (str): The internal monologue of the agent
id (str): The ID of the message
date (datetime): The date the message was created in ISO format
"""
internal_monologue: str
@@ -51,16 +61,14 @@ class FunctionCallDelta(BaseModel):
return json.dumps(self.model_dump(exclude_none=True), *args, **kwargs)
class FunctionCallMessage(BaseMemGPTMessage):
class FunctionCallMessage(MemGPTMessage):
"""
{
"function_call": {
"name": function_call.function.name,
"arguments": function_call.function.arguments,
},
"id": str(msg_obj.id),
"date": msg_obj.created_at.isoformat(),
}
A message representing a request to call a function (generated by the LLM to trigger function execution).
Attributes:
function_call (Union[FunctionCall, FunctionCallDelta]): The function call
id (str): The ID of the message
date (datetime): The date the message was created in ISO format
"""
function_call: Union[FunctionCall, FunctionCallDelta]
@@ -95,31 +103,32 @@ class FunctionCallMessage(BaseMemGPTMessage):
return v
class FunctionReturn(BaseMemGPTMessage):
class FunctionReturn(MemGPTMessage):
"""
{
"function_return": msg,
"status": "success" or "error",
"id": str(msg_obj.id),
"date": msg_obj.created_at.isoformat(),
}
A message representing the return value of a function call (generated by MemGPT executing the requested function).
Attributes:
function_return (str): The return value of the function
status (Literal["success", "error"]): The status of the function call
id (str): The ID of the message
date (datetime): The date the message was created in ISO format
"""
function_return: str
status: Literal["success", "error"]
MemGPTMessage = Union[InternalMonologue, FunctionCallMessage, FunctionReturn]
# MemGPTMessage = Union[InternalMonologue, FunctionCallMessage, FunctionReturn]
# Legacy MemGPT API had an additional type "assistant_message" and the "function_call" was a formatted string
class AssistantMessage(BaseMemGPTMessage):
class AssistantMessage(MemGPTMessage):
assistant_message: str
class LegacyFunctionCallMessage(BaseMemGPTMessage):
class LegacyFunctionCallMessage(MemGPTMessage):
function_call: str

View File

@@ -3,7 +3,7 @@ from typing import List, Union
from pydantic import BaseModel, Field
from memgpt.schemas.enums import MessageStreamStatus
from memgpt.schemas.memgpt_message import LegacyMemGPTMessage, MemGPTMessage
from memgpt.schemas.memgpt_message import MemGPTMessage
from memgpt.schemas.message import Message
from memgpt.schemas.usage import MemGPTUsageStatistics
@@ -11,10 +11,16 @@ from memgpt.schemas.usage import MemGPTUsageStatistics
class MemGPTResponse(BaseModel):
# messages: List[Message] = Field(..., description="The messages returned by the agent.")
messages: Union[List[Message], List[MemGPTMessage], List[LegacyMemGPTMessage]] = Field(
..., description="The messages returned by the agent."
)
"""
Response object from an agent interaction, consisting of the new messages generated by the agent and usage statistics.
The type of the returned messages can be either `Message` or `MemGPTMessage`, depending on what was specified in the request.
Attributes:
messages (List[Union[Message, MemGPTMessage]]): The messages returned by the agent.
usage (MemGPTUsageStatistics): The usage statistics
"""
messages: Union[List[Message], List[MemGPTMessage]] = Field(..., description="The messages returned by the agent.")
usage: MemGPTUsageStatistics = Field(..., description="The usage statistics of the agent.")

View File

@@ -11,7 +11,14 @@ from memgpt.schemas.block import Block
class Memory(BaseModel, validate_assignment=True):
"""Represents the in-context memory of the agent"""
"""
Represents the in-context memory of the agent. This includes both the `Block` objects (labelled by sections), as well as tools to edit the blocks.
Attributes:
memory (Dict[str, Block]): Mapping from memory block section to memory block.
"""
# Memory.memory is a dict mapping from memory block section to memory block.
memory: Dict[str, Block] = Field(default_factory=dict, description="Mapping from memory block section to memory block.")
@@ -114,7 +121,30 @@ class Memory(BaseModel, validate_assignment=True):
# TODO: ideally this is refactored into ChatMemory and the subclasses are given more specific names.
class BaseChatMemory(Memory):
class BasicBlockMemory(Memory):
"""
BasicBlockMemory is a basic implemention of the Memory class, which takes in a list of blocks and links them to the memory object. These are editable by the agent via the core memory functions.
Attributes:
memory (Dict[str, Block]): Mapping from memory block section to memory block.
Methods:
core_memory_append: Append to the contents of core memory.
core_memory_replace: Replace the contents of core memory.
"""
def __init__(self, blocks: List[Block] = []):
"""
Initialize the BasicBlockMemory object with a list of pre-defined blocks.
Args:
blocks (List[Block]): List of blocks to be linked to the memory object.
"""
super().__init__()
for block in blocks:
# TODO: centralize these internal schema validations
assert block.name is not None and block.name != "", "each existing chat block must have a name"
self.link_block(name=block.name, block=block)
def core_memory_append(self: "Agent", name: str, content: str) -> Optional[str]: # type: ignore
"""
@@ -150,30 +180,25 @@ class BaseChatMemory(Memory):
return None
class ChatMemory(BaseChatMemory):
class ChatMemory(BasicBlockMemory):
"""
ChatMemory initializes a BaseChatMemory with two default blocks
ChatMemory initializes a BaseChatMemory with two default blocks, `human` and `persona`.
"""
def __init__(self, persona: str, human: str, limit: int = 2000):
"""
Initialize the ChatMemory object with a persona and human string.
Args:
persona (str): The starter value for the persona block.
human (str): The starter value for the human block.
limit (int): The character limit for each block.
"""
super().__init__()
self.link_block(name="persona", block=Block(name="persona", value=persona, limit=limit, label="persona"))
self.link_block(name="human", block=Block(name="human", value=human, limit=limit, label="human"))
class BlockChatMemory(BaseChatMemory):
"""
BlockChatMemory is a subclass of BaseChatMemory which uses shared memory blocks specified at initialization-time.
"""
def __init__(self, blocks: List[Block] = []):
super().__init__()
for block in blocks:
# TODO: centralize these internal schema validations
assert block.name is not None and block.name != "", "each existing chat block must have a name"
self.link_block(name=block.name, block=block)
class UpdateMemory(BaseModel):
"""Update the memory of the agent"""

View File

@@ -51,12 +51,20 @@ class MessageCreate(BaseMessage):
class Message(BaseMessage):
"""
Representation of a message sent.
MemGPT's internal representation of a message. Includes methods to convert to/from LLM provider formats.
Attributes:
id (str): The unique identifier of the message.
role (MessageRole): The role of the participant.
text (str): The text of the message.
user_id (str): The unique identifier of the user.
agent_id (str): The unique identifier of the agent.
model (str): The model used to make the function call.
name (str): The name of the participant.
created_at (datetime): The time the message was created.
tool_calls (List[ToolCall]): The list of tool calls requested.
tool_call_id (str): The id of the tool call.
Messages can be:
- agent->user (role=='agent')
- user->agent and system->agent (role=='user')
- or function/tool call returns (role=='function'/'tool').
"""
id: str = BaseMessage.generate_id_field()
@@ -89,10 +97,9 @@ class Message(BaseMessage):
return json_message
def to_memgpt_message(self) -> Union[List[MemGPTMessage], List[LegacyMemGPTMessage]]:
"""Convert message object (in DB format) to the style used by the original MemGPT API
"""Convert message object (in DB format) to the style used by the original MemGPT API"""
NOTE: this may split the message into two pieces (e.g. if the assistant has inner thoughts + function call)
"""
# NOTE: this may split the message into two pieces (e.g. if the assistant has inner thoughts + function call)
raise NotImplementedError
@staticmethod
@@ -329,7 +336,12 @@ class Message(BaseMessage):
return openai_message
def to_anthropic_dict(self, inner_thoughts_xml_tag="thinking") -> dict:
# raise NotImplementedError
"""
Convert to an Anthropic message dictionary
Args:
inner_thoughts_xml_tag (str): The XML tag to wrap around inner thoughts
"""
def add_xml_tag(string: str, xml_tag: Optional[str]):
# NOTE: Anthropic docs recommends using <thinking> tag when using CoT + tool use
@@ -401,12 +413,13 @@ class Message(BaseMessage):
return anthropic_message
def to_google_ai_dict(self, put_inner_thoughts_in_kwargs: bool = True) -> dict:
"""Go from Message class to Google AI REST message object
type Content: https://ai.google.dev/api/rest/v1/Content / https://ai.google.dev/api/rest/v1beta/Content
parts[]: Part
role: str ('user' or 'model')
"""
Go from Message class to Google AI REST message object
"""
# type Content: https://ai.google.dev/api/rest/v1/Content / https://ai.google.dev/api/rest/v1beta/Content
# parts[]: Part
# role: str ('user' or 'model')
if self.role != "tool" and self.name is not None:
raise UserWarning(f"Using Google AI with non-null 'name' field ({self.name}) not yet supported.")
@@ -513,20 +526,21 @@ class Message(BaseMessage):
function_response_prefix: Optional[str] = "[CHATBOT function returned]",
inner_thoughts_as_kwarg: Optional[bool] = False,
) -> List[dict]:
"""Cohere chat_history dicts only have 'role' and 'message' fields
NOTE: returns a list of dicts so that we can convert:
assistant [cot]: "I'll send a message"
assistant [func]: send_message("hi")
tool: {'status': 'OK'}
to:
CHATBOT.text: "I'll send a message"
SYSTEM.text: [CHATBOT called function] send_message("hi")
SYSTEM.text: [CHATBOT function returned] {'status': 'OK'}
TODO: update this prompt style once guidance from Cohere on
embedded function calls in multi-turn conversation become more clear
"""
Cohere chat_history dicts only have 'role' and 'message' fields
"""
# NOTE: returns a list of dicts so that we can convert:
# assistant [cot]: "I'll send a message"
# assistant [func]: send_message("hi")
# tool: {'status': 'OK'}
# to:
# CHATBOT.text: "I'll send a message"
# SYSTEM.text: [CHATBOT called function] send_message("hi")
# SYSTEM.text: [CHATBOT function returned] {'status': 'OK'}
# TODO: update this prompt style once guidance from Cohere on
# embedded function calls in multi-turn conversation become more clear
if self.role == "system":
"""

View File

@@ -25,6 +25,20 @@ class PassageBase(MemGPTBase):
class Passage(PassageBase):
"""
Representation of a passage, which is stored in archival memory.
Parameters:
text (str): The text of the passage.
embedding (List[float]): The embedding of the passage.
embedding_config (EmbeddingConfig): The embedding configuration used by the passage.
created_at (datetime): The creation date of the passage.
user_id (str): The unique identifier of the user associated with the passage.
agent_id (str): The unique identifier of the agent associated with the passage.
source_id (str): The data source of the passage.
doc_id (str): The unique identifier of the document associated with the passage.
"""
id: str = PassageBase.generate_id_field()
# passage text
@@ -39,7 +53,7 @@ class Passage(PassageBase):
@field_validator("embedding")
@classmethod
def pad_embeddings(cls, embedding: List[float]) -> List[float]:
"""Pad embeddings to MAX_EMBEDDING_SIZE. This is necessary to ensure all stored embeddings are the same size."""
"""Pad embeddings to `MAX_EMBEDDING_SIZE`. This is necessary to ensure all stored embeddings are the same size."""
import numpy as np
if embedding and len(embedding) != MAX_EMBEDDING_DIM:

View File

@@ -27,6 +27,19 @@ class SourceCreate(BaseSource):
class Source(BaseSource):
"""
Representation of a source, which is a collection of documents and passages.
Parameters:
id (str): The ID of the source
name (str): The name of the source.
embedding_config (EmbeddingConfig): The embedding configuration used by the source.
created_at (datetime): The creation date of the source.
user_id (str): The ID of the user that created the source.
metadata_ (dict): Metadata associated with the source.
description (str): The description of the source.
"""
id: str = BaseSource.generate_id_field()
name: str = Field(..., description="The name of the source.")
embedding_config: EmbeddingConfig = Field(..., description="The embedding configuration used by the source.")

View File

@@ -23,6 +23,17 @@ class BaseTool(MemGPTBase):
class Tool(BaseTool):
"""
Representation of a tool, which is a function that can be called by the agent.
Parameters:
id (str): The unique identifier of the tool.
name (str): The name of the function.
tags (List[str]): Metadata tags.
source_code (str): The source code of the function.
json_schema (Dict): The JSON schema of the function.
"""
id: str = BaseTool.generate_id_field()
@@ -34,7 +45,9 @@ class Tool(BaseTool):
json_schema: Dict = Field(default_factory=dict, description="The JSON schema of the function.")
def to_dict(self):
"""Convert into OpenAI representation"""
"""
Convert tool into OpenAI representation.
"""
return vars(
ToolCall(
tool_id=self.id,
@@ -52,7 +65,7 @@ class Tool(BaseTool):
crewai_tool (CrewAIBaseTool): An instance of a crewAI BaseTool (BaseTool from crewai)
Returns:
Tool: A memGPT Tool initialized with attributes derived from the provided crewAI BaseTool object.
Tool: A MemGPT Tool initialized with attributes derived from the provided crewAI BaseTool object.
"""
crewai_tool.name
description = crewai_tool.description

View File

@@ -2,6 +2,16 @@ from pydantic import BaseModel, Field
class MemGPTUsageStatistics(BaseModel):
"""
Usage statistics for the agent interaction.
Attributes:
completion_tokens (int): The number of tokens generated by the agent.
prompt_tokens (int): The number of tokens in the prompt.
total_tokens (int): The total number of tokens processed by the agent.
step_count (int): The number of steps taken by the agent.
"""
completion_tokens: int = Field(0, description="The number of tokens generated by the agent.")
prompt_tokens: int = Field(0, description="The number of tokens in the prompt.")
total_tokens: int = Field(0, description="The total number of tokens processed by the agent.")

View File

@@ -11,6 +11,15 @@ class UserBase(MemGPTBase):
class User(UserBase):
"""
Representation of a user.
Parameters:
id (str): The unique identifier of the user.
name (str): The name of the user.
created_at (datetime): The creation date of the user.
"""
id: str = UserBase.generate_id_field()
name: str = Field(..., description="The name of the user.")
created_at: datetime = Field(default_factory=datetime.utcnow, description="The creation date of the user.")

View File

@@ -5,7 +5,7 @@ import pytest
from memgpt import create_client
from memgpt.client.client import LocalClient, RESTClient
from memgpt.schemas.block import Block
from memgpt.schemas.memory import BlockChatMemory, ChatMemory, Memory
from memgpt.schemas.memory import BasicBlockMemory, ChatMemory, Memory
@pytest.fixture(scope="module")
@@ -23,7 +23,6 @@ def agent(client):
def test_agent(client: Union[LocalClient, RESTClient]):
tools = client.list_tools()
# create agent
@@ -129,7 +128,7 @@ def test_agent_with_shared_blocks(client):
try:
first_agent_state_test = client.create_agent(
name="first_test_agent_shared_memory_blocks",
memory=BlockChatMemory(blocks=existing_non_template_blocks),
memory=BasicBlockMemory(blocks=existing_non_template_blocks),
description="This is a test agent using shared memory blocks",
)
assert isinstance(first_agent_state_test.memory, Memory)
@@ -143,7 +142,7 @@ def test_agent_with_shared_blocks(client):
# have this latest value set by the other agent.
second_agent_state_test = client.create_agent(
name="second_test_agent_shared_memory_blocks",
memory=BlockChatMemory(blocks=existing_non_template_blocks_no_values),
memory=BasicBlockMemory(blocks=existing_non_template_blocks_no_values),
description="This is a test agent using shared memory blocks",
)
@@ -161,7 +160,6 @@ def test_agent_with_shared_blocks(client):
def test_memory(client, agent):
# get agent memory
original_memory = client.get_in_context_memory(agent.id)
assert original_memory is not None
@@ -214,7 +212,6 @@ def test_recall_memory(client, agent):
def test_tools(client):
def print_tool(message: str):
"""
A tool to print a message
@@ -303,7 +300,6 @@ def test_tools_from_crewai(client):
def test_sources(client, agent):
# list sources (empty)
sources = client.list_sources()
assert len(sources) == 0