@@ -19,6 +19,7 @@ env: | |||||
jobs: | jobs: | ||||
build-and-push-image: | build-and-push-image: | ||||
if: github.repository == 'zhayujie/chatgpt-on-wechat' | |||||
runs-on: ubuntu-latest | runs-on: ubuntu-latest | ||||
permissions: | permissions: | ||||
contents: read | contents: read | ||||
@@ -19,6 +19,7 @@ env: | |||||
jobs: | jobs: | ||||
build-and-push-image: | build-and-push-image: | ||||
if: github.repository == 'zhayujie/chatgpt-on-wechat' | |||||
runs-on: ubuntu-latest | runs-on: ubuntu-latest | ||||
permissions: | permissions: | ||||
contents: read | contents: read | ||||
@@ -5,7 +5,7 @@ | |||||
最新版本支持的功能如下: | 最新版本支持的功能如下: | ||||
- [x] **多端部署:** 有多种部署方式可选择且功能完备,目前已支持个人微信、微信公众号和、业微信、飞书等部署方式 | - [x] **多端部署:** 有多种部署方式可选择且功能完备,目前已支持个人微信、微信公众号和、业微信、飞书等部署方式 | ||||
- [x] **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3.5, GPT-4, claude, 文心一言, 讯飞星火 | |||||
- [x] **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3.5, GPT-4, claude, Gemini, 文心一言, 讯飞星火, 通义千问 | |||||
- [x] **语音能力:** 可识别语音消息,通过文字或语音回复,支持 azure, baidu, google, openai(whisper/tts) 等多种语音模型 | - [x] **语音能力:** 可识别语音消息,通过文字或语音回复,支持 azure, baidu, google, openai(whisper/tts) 等多种语音模型 | ||||
- [x] **图像能力:** 支持图片生成、图片识别、图生图(如照片修复),可选择 Dall-E-3, stable diffusion, replicate, midjourney, vision模型 | - [x] **图像能力:** 支持图片生成、图片识别、图生图(如照片修复),可选择 Dall-E-3, stable diffusion, replicate, midjourney, vision模型 | ||||
- [x] **丰富插件:** 支持个性化插件扩展,已实现多角色切换、文字冒险、敏感词过滤、聊天记录总结、文档总结和对话等插件 | - [x] **丰富插件:** 支持个性化插件扩展,已实现多角色切换、文字冒险、敏感词过滤、聊天记录总结、文档总结和对话等插件 | ||||
@@ -28,6 +28,8 @@ Demo made by [Visionn](https://www.wangpc.cc/) | |||||
# 更新日志 | # 更新日志 | ||||
>**2023.11.11:** [1.5.3版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.3) 和 [1.5.4版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.4),新增Google Gemini、通义千问模型 | |||||
>**2023.11.10:** [1.5.2版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.2),新增飞书通道、图像识别对话、黑名单配置 | >**2023.11.10:** [1.5.2版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.2),新增飞书通道、图像识别对话、黑名单配置 | ||||
>**2023.11.10:** [1.5.0版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.0),新增 `gpt-4-turbo`, `dall-e-3`, `tts` 模型接入,完善图像理解&生成、语音识别&生成的多模态能力 | >**2023.11.10:** [1.5.0版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.0),新增 `gpt-4-turbo`, `dall-e-3`, `tts` 模型接入,完善图像理解&生成、语音识别&生成的多模态能力 | ||||
@@ -44,7 +44,7 @@ def run(): | |||||
# os.environ['WECHATY_PUPPET_SERVICE_ENDPOINT'] = '127.0.0.1:9001' | # os.environ['WECHATY_PUPPET_SERVICE_ENDPOINT'] = '127.0.0.1:9001' | ||||
channel = channel_factory.create_channel(channel_name) | channel = channel_factory.create_channel(channel_name) | ||||
if channel_name in ["wx", "wxy", "terminal", "wechatmp", "wechatmp_service", "wechatcom_app", "wework", const.FEISHU]: | |||||
if channel_name in ["wx", "wxy", "terminal", "wechatmp", "wechatmp_service", "wechatcom_app", "wework", const.FEISHU,const.DINGTALK]: | |||||
PluginManager().load_plugins() | PluginManager().load_plugins() | ||||
if conf().get("use_linkai"): | if conf().get("use_linkai"): | ||||
@@ -10,31 +10,48 @@ import broadscope_bailian | |||||
from broadscope_bailian import ChatQaMessage | from broadscope_bailian import ChatQaMessage | ||||
from bot.bot import Bot | from bot.bot import Bot | ||||
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession | |||||
from bot.ali.ali_qwen_session import AliQwenSession | |||||
from bot.session_manager import SessionManager | from bot.session_manager import SessionManager | ||||
from bridge.context import ContextType | from bridge.context import ContextType | ||||
from bridge.reply import Reply, ReplyType | from bridge.reply import Reply, ReplyType | ||||
from common.log import logger | from common.log import logger | ||||
from common import const | |||||
from config import conf, load_config | from config import conf, load_config | ||||
class TongyiQwenBot(Bot): | |||||
class AliQwenBot(Bot): | |||||
def __init__(self): | def __init__(self): | ||||
super().__init__() | super().__init__() | ||||
self.access_key_id = conf().get("qwen_access_key_id") | |||||
self.access_key_secret = conf().get("qwen_access_key_secret") | |||||
self.agent_key = conf().get("qwen_agent_key") | |||||
self.app_id = conf().get("qwen_app_id") | |||||
self.node_id = conf().get("qwen_node_id") or "" | |||||
self.api_key_client = broadscope_bailian.AccessTokenClient(access_key_id=self.access_key_id, access_key_secret=self.access_key_secret) | |||||
self.api_key_expired_time = self.set_api_key() | self.api_key_expired_time = self.set_api_key() | ||||
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or "qwen") | |||||
self.temperature = conf().get("temperature", 0.2) # 值在[0,1]之间,越大表示回复越具有不确定性 | |||||
self.top_p = conf().get("top_p", 1) | |||||
self.sessions = SessionManager(AliQwenSession, model=conf().get("model", const.QWEN)) | |||||
def api_key_client(self): | |||||
return broadscope_bailian.AccessTokenClient(access_key_id=self.access_key_id(), access_key_secret=self.access_key_secret()) | |||||
def access_key_id(self): | |||||
return conf().get("qwen_access_key_id") | |||||
def access_key_secret(self): | |||||
return conf().get("qwen_access_key_secret") | |||||
def agent_key(self): | |||||
return conf().get("qwen_agent_key") | |||||
def app_id(self): | |||||
return conf().get("qwen_app_id") | |||||
def node_id(self): | |||||
return conf().get("qwen_node_id", "") | |||||
def temperature(self): | |||||
return conf().get("temperature", 0.2 ) | |||||
def top_p(self): | |||||
return conf().get("top_p", 1) | |||||
def reply(self, query, context=None): | def reply(self, query, context=None): | ||||
# acquire reply content | # acquire reply content | ||||
if context.type == ContextType.TEXT: | if context.type == ContextType.TEXT: | ||||
logger.info("[TONGYI] query={}".format(query)) | |||||
logger.info("[QWEN] query={}".format(query)) | |||||
session_id = context["session_id"] | session_id = context["session_id"] | ||||
reply = None | reply = None | ||||
@@ -51,11 +68,11 @@ class TongyiQwenBot(Bot): | |||||
if reply: | if reply: | ||||
return reply | return reply | ||||
session = self.sessions.session_query(query, session_id) | session = self.sessions.session_query(query, session_id) | ||||
logger.debug("[TONGYI] session query={}".format(session.messages)) | |||||
logger.debug("[QWEN] session query={}".format(session.messages)) | |||||
reply_content = self.reply_text(session) | reply_content = self.reply_text(session) | ||||
logger.debug( | logger.debug( | ||||
"[TONGYI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format( | |||||
"[QWEN] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format( | |||||
session.messages, | session.messages, | ||||
session_id, | session_id, | ||||
reply_content["content"], | reply_content["content"], | ||||
@@ -69,14 +86,14 @@ class TongyiQwenBot(Bot): | |||||
reply = Reply(ReplyType.TEXT, reply_content["content"]) | reply = Reply(ReplyType.TEXT, reply_content["content"]) | ||||
else: | else: | ||||
reply = Reply(ReplyType.ERROR, reply_content["content"]) | reply = Reply(ReplyType.ERROR, reply_content["content"]) | ||||
logger.debug("[TONGYI] reply {} used 0 tokens.".format(reply_content)) | |||||
logger.debug("[QWEN] reply {} used 0 tokens.".format(reply_content)) | |||||
return reply | return reply | ||||
else: | else: | ||||
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type)) | reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type)) | ||||
return reply | return reply | ||||
def reply_text(self, session: BaiduWenxinSession, retry_count=0) -> dict: | |||||
def reply_text(self, session: AliQwenSession, retry_count=0) -> dict: | |||||
""" | """ | ||||
call bailian's ChatCompletion to get the answer | call bailian's ChatCompletion to get the answer | ||||
:param session: a conversation session | :param session: a conversation session | ||||
@@ -86,9 +103,9 @@ class TongyiQwenBot(Bot): | |||||
try: | try: | ||||
prompt, history = self.convert_messages_format(session.messages) | prompt, history = self.convert_messages_format(session.messages) | ||||
self.update_api_key_if_expired() | self.update_api_key_if_expired() | ||||
# NOTE 阿里百炼的call()函数参数比较奇怪, top_k参数表示top_p, top_p参数表示temperature, 可以参考文档 https://help.aliyun.com/document_detail/2587502.htm | |||||
response = broadscope_bailian.Completions().call(app_id=self.app_id, prompt=prompt, history=history, top_k=self.top_p, top_p=self.temperature) | |||||
completion_content = self.get_completion_content(response, self.node_id) | |||||
# NOTE 阿里百炼的call()函数未提供temperature参数,考虑到temperature和top_p参数作用相同,取两者较小的值作为top_p参数传入,详情见文档 https://help.aliyun.com/document_detail/2587502.htm | |||||
response = broadscope_bailian.Completions().call(app_id=self.app_id(), prompt=prompt, history=history, top_p=min(self.temperature(), self.top_p())) | |||||
completion_content = self.get_completion_content(response, self.node_id()) | |||||
completion_tokens, total_tokens = self.calc_tokens(session.messages, completion_content) | completion_tokens, total_tokens = self.calc_tokens(session.messages, completion_content) | ||||
return { | return { | ||||
"total_tokens": total_tokens, | "total_tokens": total_tokens, | ||||
@@ -99,39 +116,40 @@ class TongyiQwenBot(Bot): | |||||
need_retry = retry_count < 2 | need_retry = retry_count < 2 | ||||
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"} | result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"} | ||||
if isinstance(e, openai.error.RateLimitError): | if isinstance(e, openai.error.RateLimitError): | ||||
logger.warn("[TONGYI] RateLimitError: {}".format(e)) | |||||
logger.warn("[QWEN] RateLimitError: {}".format(e)) | |||||
result["content"] = "提问太快啦,请休息一下再问我吧" | result["content"] = "提问太快啦,请休息一下再问我吧" | ||||
if need_retry: | if need_retry: | ||||
time.sleep(20) | time.sleep(20) | ||||
elif isinstance(e, openai.error.Timeout): | elif isinstance(e, openai.error.Timeout): | ||||
logger.warn("[TONGYI] Timeout: {}".format(e)) | |||||
logger.warn("[QWEN] Timeout: {}".format(e)) | |||||
result["content"] = "我没有收到你的消息" | result["content"] = "我没有收到你的消息" | ||||
if need_retry: | if need_retry: | ||||
time.sleep(5) | time.sleep(5) | ||||
elif isinstance(e, openai.error.APIError): | elif isinstance(e, openai.error.APIError): | ||||
logger.warn("[TONGYI] Bad Gateway: {}".format(e)) | |||||
logger.warn("[QWEN] Bad Gateway: {}".format(e)) | |||||
result["content"] = "请再问我一次" | result["content"] = "请再问我一次" | ||||
if need_retry: | if need_retry: | ||||
time.sleep(10) | time.sleep(10) | ||||
elif isinstance(e, openai.error.APIConnectionError): | elif isinstance(e, openai.error.APIConnectionError): | ||||
logger.warn("[TONGYI] APIConnectionError: {}".format(e)) | |||||
logger.warn("[QWEN] APIConnectionError: {}".format(e)) | |||||
need_retry = False | need_retry = False | ||||
result["content"] = "我连接不到你的网络" | result["content"] = "我连接不到你的网络" | ||||
else: | else: | ||||
logger.exception("[TONGYI] Exception: {}".format(e)) | |||||
logger.exception("[QWEN] Exception: {}".format(e)) | |||||
need_retry = False | need_retry = False | ||||
self.sessions.clear_session(session.session_id) | self.sessions.clear_session(session.session_id) | ||||
if need_retry: | if need_retry: | ||||
logger.warn("[TONGYI] 第{}次重试".format(retry_count + 1)) | |||||
logger.warn("[QWEN] 第{}次重试".format(retry_count + 1)) | |||||
return self.reply_text(session, retry_count + 1) | return self.reply_text(session, retry_count + 1) | ||||
else: | else: | ||||
return result | return result | ||||
def set_api_key(self): | def set_api_key(self): | ||||
api_key, expired_time = self.api_key_client.create_token(agent_key=self.agent_key) | |||||
api_key, expired_time = self.api_key_client().create_token(agent_key=self.agent_key()) | |||||
broadscope_bailian.api_key = api_key | broadscope_bailian.api_key = api_key | ||||
return expired_time | return expired_time | ||||
def update_api_key_if_expired(self): | def update_api_key_if_expired(self): | ||||
if time.time() > self.api_key_expired_time: | if time.time() > self.api_key_expired_time: | ||||
self.api_key_expired_time = self.set_api_key() | self.api_key_expired_time = self.set_api_key() | ||||
@@ -140,6 +158,7 @@ class TongyiQwenBot(Bot): | |||||
history = [] | history = [] | ||||
user_content = '' | user_content = '' | ||||
assistant_content = '' | assistant_content = '' | ||||
system_content = '' | |||||
for message in messages: | for message in messages: | ||||
role = message.get('role') | role = message.get('role') | ||||
if role == 'user': | if role == 'user': | ||||
@@ -149,11 +168,21 @@ class TongyiQwenBot(Bot): | |||||
history.append(ChatQaMessage(user_content, assistant_content)) | history.append(ChatQaMessage(user_content, assistant_content)) | ||||
user_content = '' | user_content = '' | ||||
assistant_content = '' | assistant_content = '' | ||||
elif role =='system': | |||||
system_content += message.get('content') | |||||
if user_content == '': | if user_content == '': | ||||
raise Exception('no user message') | raise Exception('no user message') | ||||
if system_content != '': | |||||
# NOTE 模拟系统消息,测试发现人格描述以"你需要扮演ChatGPT"开头能够起作用,而以"你是ChatGPT"开头模型会直接否认 | |||||
system_qa = ChatQaMessage(system_content, '好的,我会严格按照你的设定回答问题') | |||||
history.insert(0, system_qa) | |||||
logger.debug("[QWEN] converted qa messages: {}".format([item.to_dict() for item in history])) | |||||
logger.debug("[QWEN] user content as prompt: {}".format(user_content)) | |||||
return user_content, history | return user_content, history | ||||
def get_completion_content(self, response, node_id): | def get_completion_content(self, response, node_id): | ||||
if not response['Success']: | |||||
return f"[ERROR]\n{response['Code']}:{response['Message']}" | |||||
text = response['Data']['Text'] | text = response['Data']['Text'] | ||||
if node_id == '': | if node_id == '': | ||||
return text | return text |
@@ -0,0 +1,62 @@ | |||||
from bot.session_manager import Session | |||||
from common.log import logger | |||||
""" | |||||
e.g. | |||||
[ | |||||
{"role": "system", "content": "You are a helpful assistant."}, | |||||
{"role": "user", "content": "Who won the world series in 2020?"}, | |||||
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."}, | |||||
{"role": "user", "content": "Where was it played?"} | |||||
] | |||||
""" | |||||
class AliQwenSession(Session): | |||||
def __init__(self, session_id, system_prompt=None, model="qianwen"): | |||||
super().__init__(session_id, system_prompt) | |||||
self.model = model | |||||
self.reset() | |||||
def discard_exceeding(self, max_tokens, cur_tokens=None): | |||||
precise = True | |||||
try: | |||||
cur_tokens = self.calc_tokens() | |||||
except Exception as e: | |||||
precise = False | |||||
if cur_tokens is None: | |||||
raise e | |||||
logger.debug("Exception when counting tokens precisely for query: {}".format(e)) | |||||
while cur_tokens > max_tokens: | |||||
if len(self.messages) > 2: | |||||
self.messages.pop(1) | |||||
elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant": | |||||
self.messages.pop(1) | |||||
if precise: | |||||
cur_tokens = self.calc_tokens() | |||||
else: | |||||
cur_tokens = cur_tokens - max_tokens | |||||
break | |||||
elif len(self.messages) == 2 and self.messages[1]["role"] == "user": | |||||
logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens)) | |||||
break | |||||
else: | |||||
logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages))) | |||||
break | |||||
if precise: | |||||
cur_tokens = self.calc_tokens() | |||||
else: | |||||
cur_tokens = cur_tokens - max_tokens | |||||
return cur_tokens | |||||
def calc_tokens(self): | |||||
return num_tokens_from_messages(self.messages, self.model) | |||||
def num_tokens_from_messages(messages, model): | |||||
"""Returns the number of tokens used by a list of messages.""" | |||||
# 官方token计算规则:"对于中文文本来说,1个token通常对应一个汉字;对于英文文本来说,1个token通常对应3至4个字母或1个单词" | |||||
# 详情请产看文档:https://help.aliyun.com/document_detail/2586397.html | |||||
# 目前根据字符串长度粗略估计token数,不影响正常使用 | |||||
tokens = 0 | |||||
for msg in messages: | |||||
tokens += len(msg["content"]) | |||||
return tokens |
@@ -45,6 +45,11 @@ def create_bot(bot_type): | |||||
return ClaudeAIBot() | return ClaudeAIBot() | ||||
elif bot_type == const.QWEN: | elif bot_type == const.QWEN: | ||||
from bot.tongyi.tongyi_qwen_bot import TongyiQwenBot | |||||
return TongyiQwenBot() | |||||
from bot.ali.ali_qwen_bot import AliQwenBot | |||||
return AliQwenBot() | |||||
elif bot_type == const.GEMINI: | |||||
from bot.gemini.google_gemini_bot import GoogleGeminiBot | |||||
return GoogleGeminiBot() | |||||
raise RuntimeError | raise RuntimeError |
@@ -57,7 +57,7 @@ class ChatGPTSession(Session): | |||||
def num_tokens_from_messages(messages, model): | def num_tokens_from_messages(messages, model): | ||||
"""Returns the number of tokens used by a list of messages.""" | """Returns the number of tokens used by a list of messages.""" | ||||
if model in ["wenxin", "xunfei"]: | |||||
if model in ["wenxin", "xunfei", const.GEMINI]: | |||||
return num_tokens_by_character(messages) | return num_tokens_by_character(messages) | ||||
import tiktoken | import tiktoken | ||||
@@ -0,0 +1,75 @@ | |||||
""" | |||||
Google gemini bot | |||||
@author zhayujie | |||||
@Date 2023/12/15 | |||||
""" | |||||
# encoding:utf-8 | |||||
from bot.bot import Bot | |||||
import google.generativeai as genai | |||||
from bot.session_manager import SessionManager | |||||
from bridge.context import ContextType, Context | |||||
from bridge.reply import Reply, ReplyType | |||||
from common.log import logger | |||||
from config import conf | |||||
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession | |||||
# OpenAI对话模型API (可用) | |||||
class GoogleGeminiBot(Bot): | |||||
def __init__(self): | |||||
super().__init__() | |||||
self.api_key = conf().get("gemini_api_key") | |||||
# 复用文心的token计算方式 | |||||
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or "gpt-3.5-turbo") | |||||
def reply(self, query, context: Context = None) -> Reply: | |||||
try: | |||||
if context.type != ContextType.TEXT: | |||||
logger.warn(f"[Gemini] Unsupported message type, type={context.type}") | |||||
return Reply(ReplyType.TEXT, None) | |||||
logger.info(f"[Gemini] query={query}") | |||||
session_id = context["session_id"] | |||||
session = self.sessions.session_query(query, session_id) | |||||
gemini_messages = self._convert_to_gemini_messages(self._filter_messages(session.messages)) | |||||
genai.configure(api_key=self.api_key) | |||||
model = genai.GenerativeModel('gemini-pro') | |||||
response = model.generate_content(gemini_messages) | |||||
reply_text = response.text | |||||
self.sessions.session_reply(reply_text, session_id) | |||||
logger.info(f"[Gemini] reply={reply_text}") | |||||
return Reply(ReplyType.TEXT, reply_text) | |||||
except Exception as e: | |||||
logger.error("[Gemini] fetch reply error, may contain unsafe content") | |||||
logger.error(e) | |||||
def _convert_to_gemini_messages(self, messages: list): | |||||
res = [] | |||||
for msg in messages: | |||||
if msg.get("role") == "user": | |||||
role = "user" | |||||
elif msg.get("role") == "assistant": | |||||
role = "model" | |||||
else: | |||||
continue | |||||
res.append({ | |||||
"role": role, | |||||
"parts": [{"text": msg.get("content")}] | |||||
}) | |||||
return res | |||||
def _filter_messages(self, messages: list): | |||||
res = [] | |||||
turn = "user" | |||||
for i in range(len(messages) - 1, -1, -1): | |||||
message = messages[i] | |||||
if message.get("role") != turn: | |||||
continue | |||||
res.insert(0, message) | |||||
if turn == "user": | |||||
turn = "assistant" | |||||
elif turn == "assistant": | |||||
turn = "user" | |||||
return res |
@@ -1,10 +1,9 @@ | |||||
# access LinkAI knowledge base platform | # access LinkAI knowledge base platform | ||||
# docs: https://link-ai.tech/platform/link-app/wechat | # docs: https://link-ai.tech/platform/link-app/wechat | ||||
import re | |||||
import time | import time | ||||
import requests | import requests | ||||
import config | import config | ||||
from bot.bot import Bot | from bot.bot import Bot | ||||
from bot.chatgpt.chat_gpt_session import ChatGPTSession | from bot.chatgpt.chat_gpt_session import ChatGPTSession | ||||
@@ -141,6 +140,7 @@ class LinkAIBot(Bot): | |||||
thread.start() | thread.start() | ||||
if response["choices"][0].get("text_content"): | if response["choices"][0].get("text_content"): | ||||
reply_content = response["choices"][0].get("text_content") | reply_content = response["choices"][0].get("text_content") | ||||
reply_content = self._process_url(reply_content) | |||||
return Reply(ReplyType.TEXT, reply_content) | return Reply(ReplyType.TEXT, reply_content) | ||||
else: | else: | ||||
@@ -371,6 +371,14 @@ class LinkAIBot(Bot): | |||||
except Exception as e: | except Exception as e: | ||||
logger.exception(e) | logger.exception(e) | ||||
def _process_url(self, text): | |||||
try: | |||||
url_pattern = re.compile(r'\[(.*?)\]\((http[s]?://.*?)\)') | |||||
def replace_markdown_url(match): | |||||
return f"{match.group(2)}" | |||||
return url_pattern.sub(replace_markdown_url, text) | |||||
except Exception as e: | |||||
logger.error(e) | |||||
def _send_image(self, channel, context, image_urls): | def _send_image(self, channel, context, image_urls): | ||||
if not image_urls: | if not image_urls: | ||||
@@ -29,12 +29,16 @@ class Bridge(object): | |||||
self.btype["chat"] = const.XUNFEI | self.btype["chat"] = const.XUNFEI | ||||
if model_type in [const.QWEN]: | if model_type in [const.QWEN]: | ||||
self.btype["chat"] = const.QWEN | self.btype["chat"] = const.QWEN | ||||
if model_type in [const.GEMINI]: | |||||
self.btype["chat"] = const.GEMINI | |||||
if conf().get("use_linkai") and conf().get("linkai_api_key"): | if conf().get("use_linkai") and conf().get("linkai_api_key"): | ||||
self.btype["chat"] = const.LINKAI | self.btype["chat"] = const.LINKAI | ||||
if not conf().get("voice_to_text") or conf().get("voice_to_text") in ["openai"]: | if not conf().get("voice_to_text") or conf().get("voice_to_text") in ["openai"]: | ||||
self.btype["voice_to_text"] = const.LINKAI | self.btype["voice_to_text"] = const.LINKAI | ||||
if not conf().get("text_to_voice") or conf().get("text_to_voice") in ["openai", const.TTS_1, const.TTS_1_HD]: | if not conf().get("text_to_voice") or conf().get("text_to_voice") in ["openai", const.TTS_1, const.TTS_1_HD]: | ||||
self.btype["text_to_voice"] = const.LINKAI | self.btype["text_to_voice"] = const.LINKAI | ||||
if model_type in ["claude"]: | if model_type in ["claude"]: | ||||
self.btype["chat"] = const.CLAUDEAI | self.btype["chat"] = const.CLAUDEAI | ||||
self.bots = {} | self.bots = {} | ||||
@@ -36,6 +36,9 @@ def create_channel(channel_type) -> Channel: | |||||
elif channel_type == const.FEISHU: | elif channel_type == const.FEISHU: | ||||
from channel.feishu.feishu_channel import FeiShuChanel | from channel.feishu.feishu_channel import FeiShuChanel | ||||
ch = FeiShuChanel() | ch = FeiShuChanel() | ||||
elif channel_type == const.DINGTALK: | |||||
from channel.dingtalk.dingtalk_channel import DingTalkChanel | |||||
ch = DingTalkChanel() | |||||
else: | else: | ||||
raise RuntimeError | raise RuntimeError | ||||
ch.channel_type = channel_type | ch.channel_type = channel_type | ||||
@@ -0,0 +1,164 @@ | |||||
""" | |||||
钉钉通道接入 | |||||
@author huiwen | |||||
@Date 2023/11/28 | |||||
""" | |||||
# -*- coding=utf-8 -*- | |||||
import uuid | |||||
import requests | |||||
import web | |||||
from channel.dingtalk.dingtalk_message import DingTalkMessage | |||||
from bridge.context import Context | |||||
from bridge.reply import Reply, ReplyType | |||||
from common.log import logger | |||||
from common.singleton import singleton | |||||
from config import conf | |||||
from common.expired_dict import ExpiredDict | |||||
from bridge.context import ContextType | |||||
from channel.chat_channel import ChatChannel, check_prefix | |||||
from common import utils | |||||
import json | |||||
import os | |||||
import argparse | |||||
import logging | |||||
from dingtalk_stream import AckMessage | |||||
import dingtalk_stream | |||||
@singleton | |||||
class DingTalkChanel(ChatChannel,dingtalk_stream.ChatbotHandler): | |||||
dingtalk_client_id = conf().get('dingtalk_client_id') | |||||
dingtalk_client_secret = conf().get('dingtalk_client_secret') | |||||
def setup_logger(self): | |||||
logger = logging.getLogger() | |||||
handler = logging.StreamHandler() | |||||
handler.setFormatter( | |||||
logging.Formatter('%(asctime)s %(name)-8s %(levelname)-8s %(message)s [%(filename)s:%(lineno)d]')) | |||||
logger.addHandler(handler) | |||||
logger.setLevel(logging.INFO) | |||||
return logger | |||||
def __init__(self): | |||||
super().__init__() | |||||
super(dingtalk_stream.ChatbotHandler, self).__init__() | |||||
self.logger = self.setup_logger() | |||||
# 历史消息id暂存,用于幂等控制 | |||||
self.receivedMsgs = ExpiredDict(60 * 60 * 7.1) | |||||
logger.info("[dingtalk] client_id={}, client_secret={} ".format( | |||||
self.dingtalk_client_id, self.dingtalk_client_secret)) | |||||
# 无需群校验和前缀 | |||||
conf()["group_name_white_list"] = ["ALL_GROUP"] | |||||
def startup(self): | |||||
credential = dingtalk_stream.Credential( self.dingtalk_client_id, self.dingtalk_client_secret) | |||||
client = dingtalk_stream.DingTalkStreamClient(credential) | |||||
client.register_callback_handler(dingtalk_stream.chatbot.ChatbotMessage.TOPIC,self) | |||||
client.start_forever() | |||||
def handle_single(self, cmsg:DingTalkMessage): | |||||
# 处理单聊消息 | |||||
# | |||||
if cmsg.ctype == ContextType.VOICE: | |||||
logger.debug("[dingtalk]receive voice msg: {}".format(cmsg.content)) | |||||
elif cmsg.ctype == ContextType.IMAGE: | |||||
logger.debug("[dingtalk]receive image msg: {}".format(cmsg.content)) | |||||
elif cmsg.ctype == ContextType.PATPAT: | |||||
logger.debug("[dingtalk]receive patpat msg: {}".format(cmsg.content)) | |||||
elif cmsg.ctype == ContextType.TEXT: | |||||
expression = cmsg.my_msg | |||||
cmsg.content = conf()["single_chat_prefix"][0] + cmsg.content | |||||
context = self._compose_context(cmsg.ctype, cmsg.content, isgroup=False, msg=cmsg) | |||||
if context: | |||||
self.produce(context) | |||||
def handle_group(self, cmsg:DingTalkMessage): | |||||
# 处理群聊消息 | |||||
# | |||||
if cmsg.ctype == ContextType.VOICE: | |||||
logger.debug("[dingtalk]receive voice msg: {}".format(cmsg.content)) | |||||
elif cmsg.ctype == ContextType.IMAGE: | |||||
logger.debug("[dingtalk]receive image msg: {}".format(cmsg.content)) | |||||
elif cmsg.ctype == ContextType.PATPAT: | |||||
logger.debug("[dingtalk]receive patpat msg: {}".format(cmsg.content)) | |||||
elif cmsg.ctype == ContextType.TEXT: | |||||
expression = cmsg.my_msg | |||||
cmsg.content = conf()["group_chat_prefix"][0] + cmsg.content | |||||
context = self._compose_context(cmsg.ctype, cmsg.content, isgroup=True, msg=cmsg) | |||||
context['no_need_at']=True | |||||
if context: | |||||
self.produce(context) | |||||
async def process(self, callback: dingtalk_stream.CallbackMessage): | |||||
try: | |||||
incoming_message = dingtalk_stream.ChatbotMessage.from_dict(callback.data) | |||||
dingtalk_msg = DingTalkMessage(incoming_message) | |||||
if incoming_message.conversation_type == '1': | |||||
self.handle_single(dingtalk_msg) | |||||
else: | |||||
self.handle_group(dingtalk_msg) | |||||
return AckMessage.STATUS_OK, 'OK' | |||||
except Exception as e: | |||||
logger.error(e) | |||||
return self.FAILED_MSG | |||||
def send(self, reply: Reply, context: Context): | |||||
incoming_message = context.kwargs['msg'].incoming_message | |||||
self.reply_text(reply.content, incoming_message) | |||||
# def _compose_context(self, ctype: ContextType, content, **kwargs): | |||||
# context = Context(ctype, content) | |||||
# context.kwargs = kwargs | |||||
# if "origin_ctype" not in context: | |||||
# context["origin_ctype"] = ctype | |||||
# cmsg = context["msg"] | |||||
# context["session_id"] = cmsg.from_user_id | |||||
# context["receiver"] = cmsg.other_user_id | |||||
# if ctype == ContextType.TEXT: | |||||
# # 1.文本请求 | |||||
# # 图片生成处理 | |||||
# img_match_prefix = check_prefix(content, conf().get("image_create_prefix")) | |||||
# if img_match_prefix: | |||||
# content = content.replace(img_match_prefix, "", 1) | |||||
# context.type = ContextType.IMAGE_CREATE | |||||
# else: | |||||
# context.type = ContextType.TEXT | |||||
# context.content = content.strip() | |||||
# elif context.type == ContextType.VOICE: | |||||
# # 2.语音请求 | |||||
# if "desire_rtype" not in context and conf().get("voice_reply_voice"): | |||||
# context["desire_rtype"] = ReplyType.VOICE | |||||
# return context |
@@ -0,0 +1,40 @@ | |||||
from bridge.context import ContextType | |||||
from channel.chat_message import ChatMessage | |||||
import json | |||||
import requests | |||||
from common.log import logger | |||||
from common.tmp_dir import TmpDir | |||||
from common import utils | |||||
from dingtalk_stream import ChatbotMessage | |||||
class DingTalkMessage(ChatMessage): | |||||
def __init__(self, event: ChatbotMessage): | |||||
super().__init__(event) | |||||
self.msg_id = event.message_id | |||||
msg_type = event.message_type | |||||
self.incoming_message =event | |||||
self.sender_staff_id = event.sender_staff_id | |||||
self.create_time = event.create_at | |||||
if event.conversation_type=="1": | |||||
self.is_group = False | |||||
else: | |||||
self.is_group = True | |||||
if msg_type == "text": | |||||
self.ctype = ContextType.TEXT | |||||
self.content = event.text.content.strip() | |||||
self.from_user_id = event.sender_id | |||||
self.to_user_id = event.chatbot_user_id | |||||
self.other_user_nickname = event.conversation_title | |||||
user_id = event.sender_id | |||||
nickname =event.sender_nick | |||||
@@ -46,35 +46,6 @@ class FeishuMessage(ChatMessage): | |||||
else: | else: | ||||
logger.info(f"[FeiShu] Failed to download file, key={file_key}, res={response.text}") | logger.info(f"[FeiShu] Failed to download file, key={file_key}, res={response.text}") | ||||
self._prepare_fn = _download_file | self._prepare_fn = _download_file | ||||
# elif msg.type == "voice": | |||||
# self.ctype = ContextType.VOICE | |||||
# self.content = TmpDir().path() + msg.media_id + "." + msg.format # content直接存临时目录路径 | |||||
# | |||||
# def download_voice(): | |||||
# # 如果响应状态码是200,则将响应内容写入本地文件 | |||||
# response = client.media.download(msg.media_id) | |||||
# if response.status_code == 200: | |||||
# with open(self.content, "wb") as f: | |||||
# f.write(response.content) | |||||
# else: | |||||
# logger.info(f"[wechatcom] Failed to download voice file, {response.content}") | |||||
# | |||||
# self._prepare_fn = download_voice | |||||
# elif msg.type == "image": | |||||
# self.ctype = ContextType.IMAGE | |||||
# self.content = TmpDir().path() + msg.media_id + ".png" # content直接存临时目录路径 | |||||
# | |||||
# def download_image(): | |||||
# # 如果响应状态码是200,则将响应内容写入本地文件 | |||||
# response = client.media.download(msg.media_id) | |||||
# if response.status_code == 200: | |||||
# with open(self.content, "wb") as f: | |||||
# f.write(response.content) | |||||
# else: | |||||
# logger.info(f"[wechatcom] Failed to download image file, {response.content}") | |||||
# | |||||
# self._prepare_fn = download_image | |||||
else: | else: | ||||
raise NotImplementedError("Unsupported message type: Type:{} ".format(msg_type)) | raise NotImplementedError("Unsupported message type: Type:{} ".format(msg_type)) | ||||
@@ -120,7 +120,7 @@ def _check(func): | |||||
@wework.msg_register( | @wework.msg_register( | ||||
[ntwork.MT_RECV_TEXT_MSG, ntwork.MT_RECV_IMAGE_MSG, 11072, ntwork.MT_RECV_VOICE_MSG]) | |||||
[ntwork.MT_RECV_TEXT_MSG, ntwork.MT_RECV_IMAGE_MSG, 11072, ntwork.MT_RECV_LINK_CARD_MSG,ntwork.MT_RECV_FILE_MSG, ntwork.MT_RECV_VOICE_MSG]) | |||||
def all_msg_handler(wework_instance: ntwork.WeWork, message): | def all_msg_handler(wework_instance: ntwork.WeWork, message): | ||||
logger.debug(f"收到消息: {message}") | logger.debug(f"收到消息: {message}") | ||||
if 'data' in message: | if 'data' in message: | ||||
@@ -128,6 +128,18 @@ class WeworkMessage(ChatMessage): | |||||
self.ctype = ContextType.IMAGE | self.ctype = ContextType.IMAGE | ||||
self.content = os.path.join(current_dir, "tmp", file_name) | self.content = os.path.join(current_dir, "tmp", file_name) | ||||
self._prepare_fn = lambda: cdn_download(wework, wework_msg, file_name) | self._prepare_fn = lambda: cdn_download(wework, wework_msg, file_name) | ||||
elif wework_msg["type"] == 11045: # 文件消息 | |||||
print("文件消息") | |||||
print(wework_msg) | |||||
file_name = datetime.datetime.now().strftime('%Y%m%d%H%M%S') | |||||
file_name = file_name + wework_msg['data']['cdn']['file_name'] | |||||
current_dir = os.getcwd() | |||||
self.ctype = ContextType.FILE | |||||
self.content = os.path.join(current_dir, "tmp", file_name) | |||||
self._prepare_fn = lambda: cdn_download(wework, wework_msg, file_name) | |||||
elif wework_msg["type"] == 11047: # 链接消息 | |||||
self.ctype = ContextType.SHARING | |||||
self.content = wework_msg['data']['url'] | |||||
elif wework_msg["type"] == 11072: # 新成员入群通知 | elif wework_msg["type"] == 11072: # 新成员入群通知 | ||||
self.ctype = ContextType.JOIN_GROUP | self.ctype = ContextType.JOIN_GROUP | ||||
member_list = wework_msg['data']['member_list'] | member_list = wework_msg['data']['member_list'] | ||||
@@ -179,6 +191,7 @@ class WeworkMessage(ChatMessage): | |||||
if conversation_id: | if conversation_id: | ||||
room_info = get_room_info(wework=wework, conversation_id=conversation_id) | room_info = get_room_info(wework=wework, conversation_id=conversation_id) | ||||
self.other_user_nickname = room_info.get('nickname', None) if room_info else None | self.other_user_nickname = room_info.get('nickname', None) if room_info else None | ||||
self.from_user_nickname = room_info.get('nickname', None) if room_info else None | |||||
at_list = data.get('at_list', []) | at_list = data.get('at_list', []) | ||||
tmp_list = [] | tmp_list = [] | ||||
for at in at_list: | for at in at_list: | ||||
@@ -7,6 +7,7 @@ CHATGPTONAZURE = "chatGPTOnAzure" | |||||
LINKAI = "linkai" | LINKAI = "linkai" | ||||
CLAUDEAI = "claude" | CLAUDEAI = "claude" | ||||
QWEN = "qwen" | QWEN = "qwen" | ||||
GEMINI = "gemini" | |||||
# model | # model | ||||
GPT35 = "gpt-3.5-turbo" | GPT35 = "gpt-3.5-turbo" | ||||
@@ -17,7 +18,8 @@ WHISPER_1 = "whisper-1" | |||||
TTS_1 = "tts-1" | TTS_1 = "tts-1" | ||||
TTS_1_HD = "tts-1-hd" | TTS_1_HD = "tts-1-hd" | ||||
MODEL_LIST = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "wenxin", "wenxin-4", "xunfei", "claude", "gpt-4-turbo", GPT4_TURBO_PREVIEW, QWEN] | |||||
MODEL_LIST = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "wenxin", "wenxin-4", "xunfei", "claude", "gpt-4-turbo", GPT4_TURBO_PREVIEW, QWEN, GEMINI] | |||||
# channel | # channel | ||||
FEISHU = "feishu" | FEISHU = "feishu" | ||||
DINGTALK = "dingtalk" |
@@ -73,6 +73,8 @@ available_setting = { | |||||
"qwen_agent_key": "", | "qwen_agent_key": "", | ||||
"qwen_app_id": "", | "qwen_app_id": "", | ||||
"qwen_node_id": "", # 流程编排模型用到的id,如果没有用到qwen_node_id,请务必保持为空字符串 | "qwen_node_id": "", # 流程编排模型用到的id,如果没有用到qwen_node_id,请务必保持为空字符串 | ||||
# Google Gemini Api Key | |||||
"gemini_api_key": "", | |||||
# wework的通用配置 | # wework的通用配置 | ||||
"wework_smart": True, # 配置wework是否使用已登录的企业微信,False为多开 | "wework_smart": True, # 配置wework是否使用已登录的企业微信,False为多开 | ||||
# 语音设置 | # 语音设置 | ||||
@@ -130,7 +132,11 @@ available_setting = { | |||||
"feishu_app_secret": "", # 飞书机器人APP secret | "feishu_app_secret": "", # 飞书机器人APP secret | ||||
"feishu_token": "", # 飞书 verification token | "feishu_token": "", # 飞书 verification token | ||||
"feishu_bot_name": "", # 飞书机器人的名字 | "feishu_bot_name": "", # 飞书机器人的名字 | ||||
# 钉钉配置 | |||||
"dingtalk_client_id": "", # 钉钉机器人Client ID | |||||
"dingtalk_client_secret": "", # 钉钉机器人Client Secret | |||||
# chatgpt指令自定义触发词 | # chatgpt指令自定义触发词 | ||||
"clear_memory_commands": ["#清除记忆"], # 重置会话指令,必须以#开头 | "clear_memory_commands": ["#清除记忆"], # 重置会话指令,必须以#开头 | ||||
# channel配置 | # channel配置 | ||||
@@ -313,7 +313,7 @@ class Godcmd(Plugin): | |||||
except Exception as e: | except Exception as e: | ||||
ok, result = False, "你没有设置私有GPT模型" | ok, result = False, "你没有设置私有GPT模型" | ||||
elif cmd == "reset": | elif cmd == "reset": | ||||
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI, const.BAIDU, const.XUNFEI]: | |||||
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI, const.BAIDU, const.XUNFEI, const.QWEN, const.GEMINI]: | |||||
bot.sessions.clear_session(session_id) | bot.sessions.clear_session(session_id) | ||||
if Bridge().chat_bots.get(bottype): | if Bridge().chat_bots.get(bottype): | ||||
Bridge().chat_bots.get(bottype).sessions.clear_session(session_id) | Bridge().chat_bots.get(bottype).sessions.clear_session(session_id) | ||||
@@ -339,7 +339,7 @@ class Godcmd(Plugin): | |||||
ok, result = True, "配置已重载" | ok, result = True, "配置已重载" | ||||
elif cmd == "resetall": | elif cmd == "resetall": | ||||
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI, | if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI, | ||||
const.BAIDU, const.XUNFEI]: | |||||
const.BAIDU, const.XUNFEI, const.QWEN, const.GEMINI]: | |||||
channel.cancel_all_session() | channel.cancel_all_session() | ||||
bot.sessions.clear_all_session() | bot.sessions.clear_all_session() | ||||
ok, result = True, "重置所有会话成功" | ok, result = True, "重置所有会话成功" | ||||
@@ -33,3 +33,6 @@ curl_cffi | |||||
# tongyi qwen | # tongyi qwen | ||||
broadscope_bailian | broadscope_bailian | ||||
google-generativeai |