refactor: simplify docstrings for EmbeddingConfig and LLMConfig classes (#3733)
Co-authored-by: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -6,21 +6,7 @@ from letta.constants import DEFAULT_EMBEDDING_CHUNK_SIZE
|
||||
|
||||
|
||||
class EmbeddingConfig(BaseModel):
|
||||
"""
|
||||
|
||||
Embedding model configuration. This object specifies all the information necessary to access an embedding model to usage with Letta, except for secret keys.
|
||||
|
||||
Attributes:
|
||||
embedding_endpoint_type (str): The endpoint type for the model.
|
||||
embedding_endpoint (str): The endpoint for the model.
|
||||
embedding_model (str): The model for the embedding.
|
||||
embedding_dim (int): The dimension of the embedding.
|
||||
embedding_chunk_size (int): The chunk size of the embedding.
|
||||
azure_endpoint (:obj:`str`, optional): The Azure endpoint for the model (Azure only).
|
||||
azure_version (str): The Azure version for the model (Azure only).
|
||||
azure_deployment (str): The Azure deployment for the model (Azure only).
|
||||
|
||||
"""
|
||||
"""Configuration for embedding model connection and processing parameters."""
|
||||
|
||||
embedding_endpoint_type: Literal[
|
||||
"openai",
|
||||
|
||||
@@ -10,19 +10,7 @@ logger = get_logger(__name__)
|
||||
|
||||
|
||||
class LLMConfig(BaseModel):
|
||||
"""
|
||||
Configuration for a Language Model (LLM) model. This object specifies all the information necessary to access an LLM model to usage with Letta, except for secret keys.
|
||||
|
||||
Attributes:
|
||||
model (str): The name of the LLM model.
|
||||
model_endpoint_type (str): The endpoint type for the model.
|
||||
model_endpoint (str): The endpoint for the model.
|
||||
model_wrapper (str): The wrapper for the model. This is used to wrap additional text around the input/output of the model. This is useful for text-to-text completions, such as the Completions API in OpenAI.
|
||||
context_window (int): The context window size for the model.
|
||||
put_inner_thoughts_in_kwargs (bool): Puts `inner_thoughts` as a kwarg in the function call if this is set to True. This helps with function calling performance and also the generation of inner thoughts.
|
||||
temperature (float): The temperature to use when generating text with the model. A higher temperature will result in more random text.
|
||||
max_tokens (int): The maximum number of tokens to generate.
|
||||
"""
|
||||
"""Configuration for Language Model (LLM) connection and generation parameters."""
|
||||
|
||||
model: str = Field(..., description="LLM model name. ")
|
||||
model_endpoint_type: Literal[
|
||||
|
||||
Reference in New Issue
Block a user