Files
letta-server/memgpt/presets/presets.py
Sarah Wooders ec2bda4966 Refactor config + determine LLM via config.model_endpoint_type (#422)
* mark depricated API section

* CLI bug fixes for azure

* check azure before running

* Update README.md

* Update README.md

* bug fix with persona loading

* remove print

* make errors for cli flags more clear

* format

* fix imports

* fix imports

* add prints

* update lock

* update config fields

* cleanup config loading

* commit

* remove asserts

* refactor configure

* put into different functions

* add embedding default

* pass in config

* fixes

* allow overriding openai embedding endpoint

* black

* trying to patch tests (some circular import errors)

* update flags and docs

* patched support for local llms using endpoint and endpoint type passed via configs, not env vars

* missing files

* fix naming

* fix import

* fix two runtime errors

* patch ollama typo, move ollama model question pre-wrapper, modify question phrasing to include link to readthedocs, also have a default ollama model that has a tag included

* disable debug messages

* made error message for failed load more informative

* don't print dynamic linking function warning unless --debug

* updated tests to work with new cli workflow (disabled openai config test for now)

* added skips for tests when vars are missing

* update bad arg

* revise test to soft pass on empty string too

* don't run configure twice

* extend timeout (try to pass against nltk download)

* update defaults

* typo with endpoint type default

* patch runtime errors for when model is None

* catching another case of 'x in model' when model is None (preemptively)

* allow overrides to local llm related config params

* made model wrapper selection from a list vs raw input

* update test for select instead of input

* Fixed bug in endpoint when using local->openai selection, also added validation loop to manual endpoint entry

* updated error messages to be more informative with links to readthedocs

* add back gpt3.5-turbo

---------

Co-authored-by: cpacker <packercharles@gmail.com>
2023-11-14 15:58:19 -08:00

62 lines
2.6 KiB
Python

from .utils import load_all_presets, is_valid_yaml_format
from ..prompts import gpt_functions
from ..prompts import gpt_system
from ..functions.functions import load_all_function_sets
DEFAULT_PRESET = "memgpt_chat"
available_presets = load_all_presets()
preset_options = list(available_presets.keys())
def use_preset(preset_name, agent_config, model, persona, human, interface, persistence_manager):
"""Storing combinations of SYSTEM + FUNCTION prompts"""
from memgpt.agent import Agent
from memgpt.utils import printd
# Available functions is a mapping from:
# function_name -> {
# json_schema: schema
# python_function: function
# }
available_functions = load_all_function_sets()
available_presets = load_all_presets()
if preset_name not in available_presets:
raise ValueError(f"Preset '{preset_name}.yaml' not found")
preset = available_presets[preset_name]
if not is_valid_yaml_format(preset, list(available_functions.keys())):
raise ValueError(f"Preset '{preset_name}.yaml' is not valid")
preset_system_prompt = preset["system_prompt"]
preset_function_set_names = preset["functions"]
# Filter down the function set based on what the preset requested
preset_function_set = {}
for f_name in preset_function_set_names:
if f_name not in available_functions:
raise ValueError(f"Function '{f_name}' was specified in preset, but is not in function library:\n{available_functions.keys()}")
preset_function_set[f_name] = available_functions[f_name]
assert len(preset_function_set_names) == len(preset_function_set)
printd(f"Available functions:\n", list(preset_function_set.keys()))
# preset_function_set = {f_name: f_dict for f_name, f_dict in available_functions.items() if f_name in preset_function_set_names}
# printd(f"Available functions:\n", [f_name for f_name, f_dict in preset_function_set.items()])
# Make sure that every function the preset wanted is inside the available functions
# assert len(preset_function_set_names) == len(preset_function_set)
return Agent(
config=agent_config,
model=model,
system=gpt_system.get_system_text(preset_system_prompt),
functions=preset_function_set,
interface=interface,
persistence_manager=persistence_manager,
persona_notes=persona,
human_notes=human,
# gpt-3.5-turbo tends to omit inner monologue, relax this requirement for now
first_message_verify_mono=True if (model is not None and "gpt-4" in model) else False,
)