Files
letta-server/memgpt/local_llm/lmstudio/settings.py
Charles Packer 7f950b05e8 Patch local LLMs with context_window (#416)
* patch

* patch ollama

* patch lmstudio

* patch kobold
2023-11-10 12:06:41 -08:00

32 lines
857 B
Python

from ...constants import LLM_MAX_TOKENS
SIMPLE = {
"stop": [
"\nUSER:",
"\nASSISTANT:",
"\nFUNCTION RETURN:",
"\nUSER",
"\nASSISTANT",
"\nFUNCTION RETURN",
"\nFUNCTION",
"\nFUNC",
"<|im_start|>",
"<|im_end|>",
"<|im_sep|>",
# '\n' +
# '</s>',
# '<|',
# '\n#',
# '\n\n\n',
],
# This controls the maximum number of tokens that the model can generate
# Cap this at the model context length (assuming 8k for Mistral 7B)
# "max_tokens": 8000,
# "max_tokens": LLM_MAX_TOKENS,
# This controls how LM studio handles context overflow
# In MemGPT we handle this ourselves, so this should be commented out
# "lmstudio": {"context_overflow_policy": 2},
"stream": False,
"model": "local model",
}