fix: lmstudio tests and integration

Co-authored-by: Jin Peng <jinjpeng@Jins-MacBook-Pro.local>
This commit is contained in:
jnjpng
2025-08-07 20:02:56 -07:00
committed by GitHub
parent e35648b51f
commit 302b07bd2b
3 changed files with 4 additions and 8 deletions

View File

@@ -55,7 +55,7 @@ class LMStudioOpenAIProvider(OpenAIProvider):
LLMConfig(
model=model_name,
model_endpoint_type="openai",
model_endpoint=self.base_url,
model_endpoint=self.model_endpoint_url,
context_window=context_window_size,
handle=self.get_handle(model_name),
compatibility_type=compatibility_type,
@@ -94,7 +94,7 @@ class LMStudioOpenAIProvider(OpenAIProvider):
EmbeddingConfig(
embedding_model=model_name,
embedding_endpoint_type="openai",
embedding_endpoint=self.base_url,
embedding_endpoint=self.model_endpoint_url,
embedding_dim=768, # Default embedding dimension, not context window
embedding_chunk_size=DEFAULT_EMBEDDING_CHUNK_SIZE, # NOTE: max is 2048
handle=self.get_handle(model_name),

View File

@@ -2,7 +2,7 @@
"context_window": 8192,
"model": "qwen2.5-7b-instruct-1m",
"model_endpoint_type": "openai",
"model_endpoint": "http://127.0.0.1:1234/v1",
"model_endpoint": "http://127.0.0.1:1234/api/v0",
"model_wrapper": null,
"provider_name": "lmstudio_openai"
}

View File

@@ -110,11 +110,7 @@ USER_MESSAGE_BASE64_IMAGE: List[MessageCreate] = [
]
# configs for models that are to dumb to do much other than messaging
limited_configs = [
"ollama.json",
"together-qwen-2.5-72b-instruct.json",
"vllm.json",
]
limited_configs = ["ollama.json", "together-qwen-2.5-72b-instruct.json", "vllm.json", "lmstudio.json"]
all_configs = [
"openai-gpt-4o-mini.json",