diff --git a/README.md b/README.md index 78fb5f00..92be5c1b 100644 --- a/README.md +++ b/README.md @@ -150,7 +150,7 @@ There are other pipelines that can be used to extract information from multiple For each of these graphs there is the multi version. It allows to make calls of the LLM in parallel. -It is possible to use different LLM through APIs, such as **OpenAI**, **Groq**, **Azure** and **Gemini**, or local models using **Ollama**. +It is possible to use different LLM through APIs, such as **OpenAI**, **Groq**, **Azure**, **Gemini**, **MiniMax** and more, or local models using **Ollama**. Remember to have [Ollama](https://ollama.com/) installed and download the models using the **ollama pull** command, if you want to use local models. diff --git a/scrapegraphai/graphs/abstract_graph.py b/scrapegraphai/graphs/abstract_graph.py index 0efcd281..065d3d30 100644 --- a/scrapegraphai/graphs/abstract_graph.py +++ b/scrapegraphai/graphs/abstract_graph.py @@ -13,7 +13,7 @@ from pydantic import BaseModel from ..helpers import models_tokens -from ..models import XAI, CLoD, DeepSeek, Nvidia, OneApi +from ..models import XAI, CLoD, DeepSeek, MiniMax, Nvidia, OneApi from ..utils.logging import get_logger, set_verbosity_info, set_verbosity_warning logger = get_logger(__name__) @@ -171,6 +171,7 @@ def _create_llm(self, llm_config: dict) -> object: "clod", "togetherai", "xai", + "minimax", } if "/" in llm_params["model"]: @@ -226,6 +227,7 @@ def _create_llm(self, llm_config: dict) -> object: "togetherai", "clod", "xai", + "minimax", }: if llm_params["model_provider"] == "bedrock": llm_params["model_kwargs"] = { @@ -243,6 +245,9 @@ def _create_llm(self, llm_config: dict) -> object: if model_provider == "deepseek": return DeepSeek(**llm_params) + if model_provider == "minimax": + return MiniMax(**llm_params) + if model_provider == "ernie": from langchain_community.chat_models import ErnieBotChat diff --git a/scrapegraphai/helpers/models_tokens.py b/scrapegraphai/helpers/models_tokens.py index 406d48f0..5d0bcf21 100644 --- a/scrapegraphai/helpers/models_tokens.py +++ b/scrapegraphai/helpers/models_tokens.py @@ -377,4 +377,11 @@ "grok-3-mini": 1000000, "grok-beta": 128000, }, + "minimax": { + "MiniMax-M1": 1000000, + "MiniMax-M1-40k": 40000, + "MiniMax-M2": 204000, + "MiniMax-M2.5": 204000, + "MiniMax-M2.5-highspeed": 204000, + }, } diff --git a/scrapegraphai/models/__init__.py b/scrapegraphai/models/__init__.py index 8ec6e490..d2157d37 100644 --- a/scrapegraphai/models/__init__.py +++ b/scrapegraphai/models/__init__.py @@ -4,10 +4,11 @@ from .clod import CLoD from .deepseek import DeepSeek +from .minimax import MiniMax from .nvidia import Nvidia from .oneapi import OneApi from .openai_itt import OpenAIImageToText from .openai_tts import OpenAITextToSpeech from .xai import XAI -__all__ = ["DeepSeek", "OneApi", "OpenAIImageToText", "OpenAITextToSpeech", "CLoD", "XAI", "Nvidia"] +__all__ = ["DeepSeek", "MiniMax", "OneApi", "OpenAIImageToText", "OpenAITextToSpeech", "CLoD", "XAI", "Nvidia"] diff --git a/scrapegraphai/models/minimax.py b/scrapegraphai/models/minimax.py new file mode 100644 index 00000000..ae08d0c6 --- /dev/null +++ b/scrapegraphai/models/minimax.py @@ -0,0 +1,23 @@ +""" +MiniMax Module +""" + +from langchain_openai import ChatOpenAI + + +class MiniMax(ChatOpenAI): + """ + A wrapper for the ChatOpenAI class (MiniMax uses an OpenAI-compatible API) that + provides default configuration and could be extended with additional methods + if needed. + + Args: + llm_config (dict): Configuration parameters for the language model. + """ + + def __init__(self, **llm_config): + if "api_key" in llm_config: + llm_config["openai_api_key"] = llm_config.pop("api_key") + llm_config["openai_api_base"] = "https://api.minimax.io/v1" + + super().__init__(**llm_config)