|
1 | | -class WorkflowAIError(Exception): |
2 | | - def __init__(self, message: str): |
3 | | - self.message = message |
4 | | - super().__init__(message) |
| 1 | +from typing import Any, Literal, Optional, Union |
| 2 | + |
| 3 | +from pydantic import BaseModel |
| 4 | + |
| 5 | +ProviderErrorCode = Literal[ |
| 6 | + # Max number of tokens were exceeded in the prompt |
| 7 | + "max_tokens_exceeded", |
| 8 | + # The model failed to generate a response |
| 9 | + "failed_generation", |
| 10 | + # The model generated a response but it was not valid |
| 11 | + "invalid_generation", |
| 12 | + # The model returned an error that we currently do not handle |
| 13 | + # The returned status code will match the provider status code and the entire |
| 14 | + # provider response will be provided the error details. |
| 15 | + # |
| 16 | + # This error is intended as a fallback since we do not control what the providers |
| 17 | + # return. We track this error on our end and the error should eventually |
| 18 | + # be assigned a different status code |
| 19 | + "unknown_provider_error", |
| 20 | + # The provider returned a rate limit error |
| 21 | + "rate_limit", |
| 22 | + # The provider returned a server overloaded error |
| 23 | + "server_overloaded", |
| 24 | + # The requested provider does not support the model |
| 25 | + "invalid_provider_config", |
| 26 | + # The provider returned a 500 |
| 27 | + "provider_internal_error", |
| 28 | + # The provider returned a 502 or 503 |
| 29 | + "provider_unavailable", |
| 30 | + # The request timed out |
| 31 | + "read_timeout", |
| 32 | +] |
| 33 | + |
| 34 | +ErrorCode = Union[ |
| 35 | + ProviderErrorCode, |
| 36 | + Literal[ |
| 37 | + # The object was not found |
| 38 | + "object_not_found", |
| 39 | + # There are no configured providers supporting the requested model |
| 40 | + # This error will never happen when using WorkflowAI keys |
| 41 | + "no_provider_supporting_model", |
| 42 | + # The requested provider does not support the model |
| 43 | + "provider_does_not_support_model", |
| 44 | + # The requested model does not support the requested generation mode |
| 45 | + # (e-g a model that does not support images generation was sent an image) |
| 46 | + "model_does_not_support_mode", |
| 47 | + # Run properties are invalid, for example the model does not exist |
| 48 | + "invalid_run_properties", |
| 49 | + # An internal error occurred |
| 50 | + "internal_error", |
| 51 | + # The request was invalid |
| 52 | + "bad_request", |
| 53 | + ], |
| 54 | + str, # Using as a fallback to avoid validation error if an error code is added to the API |
| 55 | +] |
5 | 56 |
|
6 | | - def __str__(self): |
7 | | - return self.message |
8 | 57 |
|
| 58 | +class BaseError(BaseModel): |
| 59 | + details: Optional[dict[str, Any]] = None |
| 60 | + message: str |
| 61 | + status_code: Optional[int] = None |
| 62 | + code: Optional[ErrorCode] = None |
9 | 63 |
|
10 | | -class NotFoundError(WorkflowAIError): |
11 | | - def __init__(self, message: str): |
12 | | - super().__init__(message) |
| 64 | + |
| 65 | +class ErrorResponse(BaseModel): |
| 66 | + error: BaseError |
| 67 | + task_run_id: Optional[str] = None |
| 68 | + |
| 69 | + |
| 70 | +class WorkflowAIError(Exception): |
| 71 | + def __init__(self, error: BaseError, task_run_id: Optional[str] = None): |
| 72 | + self.error = error |
| 73 | + self.task_run_id = task_run_id |
0 commit comments