fix: pass in a dummy key to openai python client if it doesn't exist at inference time

This commit is contained in:
cpacker
2025-01-29 17:55:54 -08:00
parent 619be87a47
commit f3c512f4a1
3 changed files with 8 additions and 5 deletions

View File

@@ -142,6 +142,11 @@ def create(
if model_settings.openai_api_key is None and llm_config.model_endpoint == "https://api.openai.com/v1":
# only is a problem if we are *not* using an openai proxy
raise LettaConfigurationError(message="OpenAI key is missing from letta config file", missing_fields=["openai_api_key"])
elif model_settings.openai_api_key is None:
# the openai python client requires a dummy API key
api_key = "DUMMY_API_KEY"
else:
api_key = model_settings.openai_api_key
if function_call is None and functions is not None and len(functions) > 0:
# force function calling for reliability, see https://platform.openai.com/docs/api-reference/chat/create#chat-create-tool_choice
@@ -160,7 +165,7 @@ def create(
response = run_async_task(
openai_chat_completions_process_stream(
url=llm_config.model_endpoint,
api_key=model_settings.openai_api_key,
api_key=api_key,
chat_completion_request=data,
stream_interface=stream_interface,
)
@@ -173,7 +178,7 @@ def create(
response = run_async_task(
openai_chat_completions_request(
url=llm_config.model_endpoint,
api_key=model_settings.openai_api_key,
api_key=api_key,
chat_completion_request=data,
)
)

View File

@@ -411,6 +411,7 @@ async def openai_chat_completions_request(
https://platform.openai.com/docs/guides/text-generation?lang=curl
"""
print(f"\n\n\n\napi_key is {api_key}")
data = prepare_openai_payload(chat_completion_request)
client = AsyncOpenAI(api_key=api_key, base_url=url)
chat_completion = await client.chat.completions.create(**data)

View File

@@ -404,9 +404,6 @@ class SyncServer(Server):
if model_settings.lmstudio_base_url.endswith("/v1")
else model_settings.lmstudio_base_url + "/v1"
)
# Set the OpenAI API key to something non-empty
if model_settings.openai_api_key is None:
model_settings.openai_api_key = "DUMMY"
self._enabled_providers.append(LMStudioOpenAIProvider(base_url=lmstudio_url))
def load_agent(self, agent_id: str, actor: User, interface: Union[AgentInterface, None] = None) -> Agent: