Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ There are other pipelines that can be used to extract information from multiple

For each of these graphs there is the multi version. It allows to make calls of the LLM in parallel.

It is possible to use different LLM through APIs, such as **OpenAI**, **Groq**, **Azure** and **Gemini**, or local models using **Ollama**.
It is possible to use different LLM through APIs, such as **OpenAI**, **Groq**, **Azure**, **Gemini**, **MiniMax** and more, or local models using **Ollama**.

Remember to have [Ollama](https://ollama.com/) installed and download the models using the **ollama pull** command, if you want to use local models.

Expand Down
7 changes: 6 additions & 1 deletion scrapegraphai/graphs/abstract_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from pydantic import BaseModel

from ..helpers import models_tokens
from ..models import XAI, CLoD, DeepSeek, Nvidia, OneApi
from ..models import XAI, CLoD, DeepSeek, MiniMax, Nvidia, OneApi
from ..utils.logging import get_logger, set_verbosity_info, set_verbosity_warning

logger = get_logger(__name__)
Expand Down Expand Up @@ -171,6 +171,7 @@ def _create_llm(self, llm_config: dict) -> object:
"clod",
"togetherai",
"xai",
"minimax",
}

if "/" in llm_params["model"]:
Expand Down Expand Up @@ -226,6 +227,7 @@ def _create_llm(self, llm_config: dict) -> object:
"togetherai",
"clod",
"xai",
"minimax",
}:
if llm_params["model_provider"] == "bedrock":
llm_params["model_kwargs"] = {
Expand All @@ -243,6 +245,9 @@ def _create_llm(self, llm_config: dict) -> object:
if model_provider == "deepseek":
return DeepSeek(**llm_params)

if model_provider == "minimax":
return MiniMax(**llm_params)

if model_provider == "ernie":
from langchain_community.chat_models import ErnieBotChat

Expand Down
7 changes: 7 additions & 0 deletions scrapegraphai/helpers/models_tokens.py
Original file line number Diff line number Diff line change
Expand Up @@ -377,4 +377,11 @@
"grok-3-mini": 1000000,
"grok-beta": 128000,
},
"minimax": {
"MiniMax-M1": 1000000,
"MiniMax-M1-40k": 40000,
"MiniMax-M2": 204000,
"MiniMax-M2.5": 204000,
"MiniMax-M2.5-highspeed": 204000,
},
}
3 changes: 2 additions & 1 deletion scrapegraphai/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,11 @@

from .clod import CLoD
from .deepseek import DeepSeek
from .minimax import MiniMax
from .nvidia import Nvidia
from .oneapi import OneApi
from .openai_itt import OpenAIImageToText
from .openai_tts import OpenAITextToSpeech
from .xai import XAI

__all__ = ["DeepSeek", "OneApi", "OpenAIImageToText", "OpenAITextToSpeech", "CLoD", "XAI", "Nvidia"]
__all__ = ["DeepSeek", "MiniMax", "OneApi", "OpenAIImageToText", "OpenAITextToSpeech", "CLoD", "XAI", "Nvidia"]
23 changes: 23 additions & 0 deletions scrapegraphai/models/minimax.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
"""
MiniMax Module
"""

from langchain_openai import ChatOpenAI


class MiniMax(ChatOpenAI):
"""
A wrapper for the ChatOpenAI class (MiniMax uses an OpenAI-compatible API) that
provides default configuration and could be extended with additional methods
if needed.

Args:
llm_config (dict): Configuration parameters for the language model.
"""

def __init__(self, **llm_config):
if "api_key" in llm_config:
llm_config["openai_api_key"] = llm_config.pop("api_key")
llm_config["openai_api_base"] = "https://api.minimax.io/v1"

super().__init__(**llm_config)