* trying to patch summarize when running with local llms * moved token magic numbers to constants, made special localllm exception class (TODO catch these for retry), fix summarize bug where it exits early if empty list * missing file * raise an exception on no-op summary * changed summarization logic to walk forwards in list until fraction of tokens in buffer is reached * added same diff to sync agent * reverted default max tokens to 8k, cleanup + more error wrapping for better error messages that get caught on retry * patch for web UI context limit error propogation, using best guess for what the web UI error message is * add webui token length exception * remove print * make no wrapper warning only pop up once * cleanup * Add errors to other wrappers --------- Co-authored-by: Vivian Fang <hi@vivi.sh>
29 lines
830 B
Python
29 lines
830 B
Python
class LLMError(Exception):
|
|
"""Base class for all LLM-related errors."""
|
|
|
|
pass
|
|
|
|
|
|
class LLMJSONParsingError(LLMError):
|
|
"""Exception raised for errors in the JSON parsing process."""
|
|
|
|
def __init__(self, message="Error parsing JSON generated by LLM"):
|
|
self.message = message
|
|
super().__init__(self.message)
|
|
|
|
|
|
class LocalLLMError(LLMError):
|
|
"""Generic catch-all error for local LLM problems"""
|
|
|
|
def __init__(self, message="Encountered an error while running local LLM"):
|
|
self.message = message
|
|
super().__init__(self.message)
|
|
|
|
|
|
class LocalLLMConnectionError(LLMError):
|
|
"""Error for when local LLM cannot be reached with provided IP/port"""
|
|
|
|
def __init__(self, message="Could not connect to local LLM"):
|
|
self.message = message
|
|
super().__init__(self.message)
|