Files
letta-server/memgpt/functions/function_sets/extras.py
Sarah Wooders ec2bda4966 Refactor config + determine LLM via config.model_endpoint_type (#422)
* mark depricated API section

* CLI bug fixes for azure

* check azure before running

* Update README.md

* Update README.md

* bug fix with persona loading

* remove print

* make errors for cli flags more clear

* format

* fix imports

* fix imports

* add prints

* update lock

* update config fields

* cleanup config loading

* commit

* remove asserts

* refactor configure

* put into different functions

* add embedding default

* pass in config

* fixes

* allow overriding openai embedding endpoint

* black

* trying to patch tests (some circular import errors)

* update flags and docs

* patched support for local llms using endpoint and endpoint type passed via configs, not env vars

* missing files

* fix naming

* fix import

* fix two runtime errors

* patch ollama typo, move ollama model question pre-wrapper, modify question phrasing to include link to readthedocs, also have a default ollama model that has a tag included

* disable debug messages

* made error message for failed load more informative

* don't print dynamic linking function warning unless --debug

* updated tests to work with new cli workflow (disabled openai config test for now)

* added skips for tests when vars are missing

* update bad arg

* revise test to soft pass on empty string too

* don't run configure twice

* extend timeout (try to pass against nltk download)

* update defaults

* typo with endpoint type default

* patch runtime errors for when model is None

* catching another case of 'x in model' when model is None (preemptively)

* allow overrides to local llm related config params

* made model wrapper selection from a list vs raw input

* update test for select instead of input

* Fixed bug in endpoint when using local->openai selection, also added validation loop to manual endpoint entry

* updated error messages to be more informative with links to readthedocs

* add back gpt3.5-turbo

---------

Co-authored-by: cpacker <packercharles@gmail.com>
2023-11-14 15:58:19 -08:00

127 lines
4.4 KiB
Python

from typing import Optional
import os
import json
import requests
from memgpt.constants import MESSAGE_CHATGPT_FUNCTION_MODEL, MESSAGE_CHATGPT_FUNCTION_SYSTEM_MESSAGE, MAX_PAUSE_HEARTBEATS
from memgpt.openai_tools import completions_with_backoff as create
def message_chatgpt(self, message: str):
"""
Send a message to a more basic AI, ChatGPT. A useful resource for asking questions. ChatGPT does not retain memory of previous interactions.
Args:
message (str): Message to send ChatGPT. Phrase your message as a full English sentence.
Returns:
str: Reply message from ChatGPT
"""
message_sequence = [
{"role": "system", "content": MESSAGE_CHATGPT_FUNCTION_SYSTEM_MESSAGE},
{"role": "user", "content": str(message)},
]
response = create(
model=MESSAGE_CHATGPT_FUNCTION_MODEL,
messages=message_sequence,
# functions=functions,
# function_call=function_call,
)
reply = response.choices[0].message.content
return reply
def read_from_text_file(self, filename: str, line_start: int, num_lines: Optional[int] = 1):
"""
Read lines from a text file.
Args:
filename (str): The name of the file to read.
line_start (int): Line to start reading from.
num_lines (Optional[int]): How many lines to read (defaults to 1).
Returns:
str: Text read from the file
"""
max_chars = 500
trunc_message = True
if not os.path.exists(filename):
raise FileNotFoundError(f"The file '{filename}' does not exist.")
if line_start < 1 or num_lines < 1:
raise ValueError("Both line_start and num_lines must be positive integers.")
lines = []
chars_read = 0
with open(filename, "r") as file:
for current_line_number, line in enumerate(file, start=1):
if line_start <= current_line_number < line_start + num_lines:
chars_to_add = len(line)
if max_chars is not None and chars_read + chars_to_add > max_chars:
# If adding this line exceeds MAX_CHARS, truncate the line if needed and stop reading further.
excess_chars = (chars_read + chars_to_add) - max_chars
lines.append(line[:-excess_chars].rstrip("\n"))
if trunc_message:
lines.append(f"[SYSTEM ALERT - max chars ({max_chars}) reached during file read]")
break
else:
lines.append(line.rstrip("\n"))
chars_read += chars_to_add
if current_line_number >= line_start + num_lines - 1:
break
return "\n".join(lines)
def append_to_text_file(self, filename: str, content: str):
"""
Append to a text file.
Args:
filename (str): The name of the file to append to.
content (str): Content to append to the file.
Returns:
Optional[str]: None is always returned as this function does not produce a response.
"""
if not os.path.exists(filename):
raise FileNotFoundError(f"The file '{filename}' does not exist.")
with open(filename, "a") as file:
file.write(content + "\n")
def http_request(self, method: str, url: str, payload_json: Optional[str] = None):
"""
Generates an HTTP request and returns the response.
Args:
method (str): The HTTP method (e.g., 'GET', 'POST').
url (str): The URL for the request.
payload_json (Optional[str]): A JSON string representing the request payload.
Returns:
dict: The response from the HTTP request.
"""
try:
headers = {"Content-Type": "application/json"}
# For GET requests, ignore the payload
if method.upper() == "GET":
print(f"[HTTP] launching GET request to {url}")
response = requests.get(url, headers=headers)
else:
# Validate and convert the payload for other types of requests
if payload_json:
payload = json.loads(payload_json)
else:
payload = {}
print(f"[HTTP] launching {method} request to {url}, payload=\n{json.dumps(payload, indent=2)}")
response = requests.request(method, url, json=payload, headers=headers)
return {"status_code": response.status_code, "headers": dict(response.headers), "body": response.text}
except Exception as e:
return {"error": str(e)}