Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -224,9 +224,10 @@ Here is the [official installation guide](https://docs.manim.community/en/stable

Fill in your **API credentials** in `api_config.json`.

* **LLM API**:
* **LLM API**:
* Required for Planner & Coder.
* Best Manim code quality achieved with **Claude-4-Opus**.
* Also supports [MiniMax](https://www.minimaxi.com/) via OpenAI-compatible API (`MiniMax-M2.7` with 1M context window).
* **VLM API**:
* Required for Planner Critic.
* For layout and aesthetics optimization, provide **Gemini API key**.
Expand Down
3 changes: 2 additions & 1 deletion src/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -815,6 +815,7 @@ def get_api_and_output(API_name):
"gpt-4o": (request_gpt4o_token, "Chatgpt4o"),
"gpt-o4mini": (request_o4mini_token, "Chatgpto4mini"),
"Gemini": (request_gemini_token, "Gemini"),
"minimax": (request_minimax_token, "MiniMax"),
}
try:
return mapping[API_name]
Expand All @@ -828,7 +829,7 @@ def build_and_parse_args():
parser.add_argument(
"--API",
type=str,
choices=["gpt-41", "claude", "gpt-5", "gpt-4o", "gpt-o4mini", "Gemini"],
choices=["gpt-41", "claude", "gpt-5", "gpt-4o", "gpt-o4mini", "Gemini", "minimax"],
default="gpt-41",
)
parser.add_argument(
Expand Down
5 changes: 5 additions & 0 deletions src/api_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,11 @@
"base_url": "...",
"api_key": "..."
},
"minimax": {
"base_url": "https://api.minimax.io/v1",
"api_key": "YOUR_MINIMAX_API_KEY",
"model": "MiniMax-M2.7"
},
"iconfinder": {
"api_key": "YOUR_ICONFINDER_KEY"
}
Expand Down
95 changes: 95 additions & 0 deletions src/gpt_request.py
Original file line number Diff line number Diff line change
Expand Up @@ -1035,6 +1035,101 @@ def request_gpt41_img(prompt, image_path=None, log_id=None, max_tokens=1000, max
time.sleep(delay)


def request_minimax(prompt, log_id=None, max_tokens=8000, max_retries=3):
"""
Makes a request to the MiniMax model via OpenAI-compatible API with retry functionality.

Args:
prompt (str): The text prompt to send to the model
log_id (str, optional): The log ID for tracking requests, defaults to tkb+timestamp
max_tokens (int, optional): Maximum tokens for response, default 8000
max_retries (int, optional): Maximum number of retry attempts, default 3

Returns:
str: The model's response content
"""
base_url = cfg("minimax", "base_url")
api_key = cfg("minimax", "api_key")
model_name = cfg("minimax", "model")

client = OpenAI(base_url=base_url, api_key=api_key)

if log_id is None:
log_id = generate_log_id()

retry_count = 0
while retry_count < max_retries:
try:
completion = client.chat.completions.create(
model=model_name,
messages=[{"role": "user", "content": prompt}],
max_tokens=max_tokens,
)
return completion.choices[0].message.content.strip()
except Exception as e:
retry_count += 1
if retry_count >= max_retries:
raise Exception(f"Failed after {max_retries} attempts. Last error: {str(e)}")

delay = (2**retry_count) * 0.1 + (random.random() * 0.1)
print(
f"Request failed with error: {str(e)}. Retrying in {delay:.2f} seconds... (Attempt {retry_count}/{max_retries})"
)
time.sleep(delay)


def request_minimax_token(prompt, log_id=None, max_tokens=8000, max_retries=3):
"""
Makes a request to the MiniMax model via OpenAI-compatible API with retry and token tracking.

Args:
prompt (str): The text prompt to send to the model
log_id (str, optional): The log ID for tracking requests, defaults to tkb+timestamp
max_tokens (int, optional): Maximum tokens for response, default 8000
max_retries (int, optional): Maximum number of retry attempts, default 3

Returns:
tuple: (completion, usage_info) where usage_info is a dict with token counts
"""
base_url = cfg("minimax", "base_url")
api_key = cfg("minimax", "api_key")
model_name = cfg("minimax", "model")

client = OpenAI(base_url=base_url, api_key=api_key)

if log_id is None:
log_id = generate_log_id()

usage_info = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}

retry_count = 0
while retry_count < max_retries:
try:
completion = client.chat.completions.create(
model=model_name,
messages=[{"role": "user", "content": prompt}],
max_tokens=max_tokens,
)

if completion.usage:
usage_info["prompt_tokens"] = completion.usage.prompt_tokens
usage_info["completion_tokens"] = completion.usage.completion_tokens
usage_info["total_tokens"] = completion.usage.total_tokens
return completion, usage_info

except Exception as e:
retry_count += 1
if retry_count >= max_retries:
raise Exception(f"Failed after {max_retries} attempts. Last error: {str(e)}")

delay = (2**retry_count) * 0.1 + (random.random() * 0.1)
print(
f"Request failed with error: {str(e)}. Retrying in {delay:.2f} seconds... (Attempt {retry_count}/{max_retries})"
)
time.sleep(delay)
return None, usage_info


if __name__ == "__main__":

# Gemini
Expand Down
Empty file added tests/__init__.py
Empty file.
Loading