diff --git a/astrbot/core/provider/sources/anthropic_source.py b/astrbot/core/provider/sources/anthropic_source.py index b6d6ebeaf0..203d0610ff 100644 --- a/astrbot/core/provider/sources/anthropic_source.py +++ b/astrbot/core/provider/sources/anthropic_source.py @@ -515,7 +515,7 @@ async def text_chat( model = model or self.get_model() - payloads = {**kwargs, "messages": new_messages, "model": model} + payloads = {"messages": new_messages, "model": model} # Anthropic has a different way of handling system prompts if system_prompt: @@ -571,7 +571,7 @@ async def text_chat_stream( model = model or self.get_model() - payloads = {**kwargs, "messages": new_messages, "model": model} + payloads = {"messages": new_messages, "model": model} # Anthropic has a different way of handling system prompts if system_prompt: diff --git a/astrbot/core/provider/sources/gemini_source.py b/astrbot/core/provider/sources/gemini_source.py index c66a9a7e98..9557f3dbcd 100644 --- a/astrbot/core/provider/sources/gemini_source.py +++ b/astrbot/core/provider/sources/gemini_source.py @@ -757,7 +757,7 @@ async def text_chat( model = model or self.get_model() - payloads = {**kwargs, "messages": context_query, "model": model} + payloads = {"messages": context_query, "model": model} retry = 10 keys = self.api_keys.copy() @@ -812,7 +812,7 @@ async def text_chat_stream( model = model or self.get_model() - payloads = {**kwargs, "messages": context_query, "model": model} + payloads = {"messages": context_query, "model": model} retry = 10 keys = self.api_keys.copy() diff --git a/astrbot/core/provider/sources/openai_source.py b/astrbot/core/provider/sources/openai_source.py index 7e09d16159..cdad66a22f 100644 --- a/astrbot/core/provider/sources/openai_source.py +++ b/astrbot/core/provider/sources/openai_source.py @@ -812,7 +812,8 @@ async def _prepare_chat_payload( context_query = await self._materialize_context_image_parts(context_query) model = model or self.get_model() - payloads = {**kwargs, "messages": context_query, "model": model} + + payloads = {"messages": context_query, "model": model} self._finally_convert_payload(payloads)