@@ -1,11 +1,11 @@ | |||||
# 简介 | # 简介 | ||||
> 本项目是基于大模型的智能对话机器人,支持微信、企业微信、公众号、飞书、钉钉接入,可选择GPT3.5/GPT4.0/Claude/文心一言/讯飞星火/通义千问/Gemini/LinkAI/ZhipuAI,能处理文本、语音和图片,通过插件访问操作系统和互联网等外部资源,支持基于自有知识库定制企业AI应用。 | |||||
> 本项目是基于大模型的智能对话机器人,支持企业微信、微信公众号、飞书、钉钉接入,可选择GPT3.5/GPT4.0/Claude/文心一言/讯飞星火/通义千问/Gemini/LinkAI/ZhipuAI,能处理文本、语音和图片,通过插件访问操作系统和互联网等外部资源,支持基于自有知识库定制企业AI应用。 | |||||
最新版本支持的功能如下: | 最新版本支持的功能如下: | ||||
- [x] **多端部署:** 有多种部署方式可选择且功能完备,目前已支持个人微信、微信公众号和、企业微信、飞书、钉钉等部署方式 | |||||
- [x] **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3.5, GPT-4, claude, Gemini, 文心一言, 讯飞星火, 通义千问,ChatGLM | |||||
- [x] **多端部署:** 有多种部署方式可选择且功能完备,目前已支持微信生态下公众号、企业微信应用、飞书、钉钉等部署方式 | |||||
- [x] **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3.5, GPT-4, Claude-3, Gemini, 文心一言, 讯飞星火, 通义千问,ChatGLM-4 | |||||
- [x] **语音能力:** 可识别语音消息,通过文字或语音回复,支持 azure, baidu, google, openai(whisper/tts) 等多种语音模型 | - [x] **语音能力:** 可识别语音消息,通过文字或语音回复,支持 azure, baidu, google, openai(whisper/tts) 等多种语音模型 | ||||
- [x] **图像能力:** 支持图片生成、图片识别、图生图(如照片修复),可选择 Dall-E-3, stable diffusion, replicate, midjourney, CogView-3, vision模型 | - [x] **图像能力:** 支持图片生成、图片识别、图生图(如照片修复),可选择 Dall-E-3, stable diffusion, replicate, midjourney, CogView-3, vision模型 | ||||
- [x] **丰富插件:** 支持个性化插件扩展,已实现多角色切换、文字冒险、敏感词过滤、聊天记录总结、文档总结和对话、联网搜索等插件 | - [x] **丰富插件:** 支持个性化插件扩展,已实现多角色切换、文字冒险、敏感词过滤、聊天记录总结、文档总结和对话、联网搜索等插件 | ||||
@@ -35,7 +35,11 @@ SaaS服务、私有化部署、稳定托管接入 等多种模式。 | |||||
# 更新日志 | # 更新日志 | ||||
>**2023.11.11:** [1.5.3版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.3) 和 [1.5.4版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.4),新增Google Gemini、通义千问模型 | |||||
>**2024.03.26:** [1.5.8版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.8) 和 [1.5.7版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.7),新增 GLM-4、Claude-3 模型,edge-tts 语音支持 | |||||
>**2024.01.26:** [1.5.6版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.6) 和 [1.5.5版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.5),钉钉接入,tool插件升级,4-turbo模型更新 | |||||
>**2023.11.11:** [1.5.3版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.3) 和 [1.5.4版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.4),新增通义千问模型、Google Gemini | |||||
>**2023.11.10:** [1.5.2版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.2),新增飞书通道、图像识别对话、黑名单配置 | >**2023.11.10:** [1.5.2版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.2),新增飞书通道、图像识别对话、黑名单配置 | ||||
@@ -47,19 +51,9 @@ SaaS服务、私有化部署、稳定托管接入 等多种模式。 | |||||
>**2023.08.08:** 接入百度文心一言模型,通过 [插件](https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins/linkai) 支持 Midjourney 绘图 | >**2023.08.08:** 接入百度文心一言模型,通过 [插件](https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins/linkai) 支持 Midjourney 绘图 | ||||
>**2023.06.12:** 接入 [LinkAI](https://link-ai.tech/console) 平台,可在线创建领域知识库,并接入微信、公众号及企业微信中,打造专属客服机器人。使用参考 [接入文档](https://link-ai.tech/platform/link-app/wechat)。 | |||||
>**2023.04.26:** 支持企业微信应用号部署,兼容插件,并支持语音图片交互,私人助理理想选择,[使用文档](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/wechatcom/README.md)。(contributed by [@lanvent](https://github.com/lanvent) in [#944](https://github.com/zhayujie/chatgpt-on-wechat/pull/944)) | |||||
>**2023.04.05:** 支持微信公众号部署,兼容插件,并支持语音图片交互,[使用文档](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/wechatmp/README.md)。(contributed by [@JS00000](https://github.com/JS00000) in [#686](https://github.com/zhayujie/chatgpt-on-wechat/pull/686)) | |||||
>**2023.04.05:** 增加能让ChatGPT使用工具的`tool`插件,[使用文档](https://github.com/goldfishh/chatgpt-on-wechat/blob/master/plugins/tool/README.md)。工具相关issue可反馈至[chatgpt-tool-hub](https://github.com/goldfishh/chatgpt-tool-hub)。(contributed by [@goldfishh](https://github.com/goldfishh) in [#663](https://github.com/zhayujie/chatgpt-on-wechat/pull/663)) | |||||
>**2023.03.25:** 支持插件化开发,目前已实现 多角色切换、文字冒险游戏、管理员指令、Stable Diffusion等插件,使用参考 [#578](https://github.com/zhayujie/chatgpt-on-wechat/issues/578)。(contributed by [@lanvent](https://github.com/lanvent) in [#565](https://github.com/zhayujie/chatgpt-on-wechat/pull/565)) | |||||
>**2023.03.09:** 基于 `whisper API`(后续已接入更多的语音`API`服务) 实现对微信语音消息的解析和回复,添加配置项 `"speech_recognition":true` 即可启用,使用参考 [#415](https://github.com/zhayujie/chatgpt-on-wechat/issues/415)。(contributed by [wanggang1987](https://github.com/wanggang1987) in [#385](https://github.com/zhayujie/chatgpt-on-wechat/pull/385)) | |||||
>**2023.06.12:** 接入 [LinkAI](https://link-ai.tech/console) 平台,可在线创建领域知识库,打造专属客服机器人。使用参考 [接入文档](https://link-ai.tech/platform/link-app/wechat)。 | |||||
>**2023.02.09:** 扫码登录存在账号限制风险,请谨慎使用,参考[#58](https://github.com/AutumnWhj/ChatGPT-wechat-bot/issues/158) | |||||
更早更新日志查看: [归档日志](/docs/version/old-version.md) | |||||
# 快速开始 | # 快速开始 | ||||
@@ -118,7 +112,8 @@ pip3 install -r requirements-optional.txt | |||||
# config.json文件内容示例 | # config.json文件内容示例 | ||||
{ | { | ||||
"open_ai_api_key": "YOUR API KEY", # 填入上面创建的 OpenAI API KEY | "open_ai_api_key": "YOUR API KEY", # 填入上面创建的 OpenAI API KEY | ||||
"model": "gpt-3.5-turbo", # 模型名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin, xunfei | |||||
"model": "gpt-3.5-turbo", # 模型名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin, xunfei, claude-3-opus-20240229 | |||||
"claude_api_key":"YOUR API KEY" # 如果选用claude3模型的话,配置这个key,同时如想使用生图,语音等功能,仍需配置open_ai_api_key | |||||
"proxy": "", # 代理客户端的ip和端口,国内环境开启代理的需要填写该项,如 "127.0.0.1:7890" | "proxy": "", # 代理客户端的ip和端口,国内环境开启代理的需要填写该项,如 "127.0.0.1:7890" | ||||
"single_chat_prefix": ["bot", "@bot"], # 私聊时文本需要包含该前缀才能触发机器人回复 | "single_chat_prefix": ["bot", "@bot"], # 私聊时文本需要包含该前缀才能触发机器人回复 | ||||
"single_chat_reply_prefix": "[bot] ", # 私聊时自动回复的前缀,用于区分真人 | "single_chat_reply_prefix": "[bot] ", # 私聊时自动回复的前缀,用于区分真人 | ||||
@@ -192,7 +187,7 @@ pip3 install -r requirements-optional.txt | |||||
python3 app.py # windows环境下该命令通常为 python app.py | python3 app.py # windows环境下该命令通常为 python app.py | ||||
``` | ``` | ||||
终端输出二维码后,使用微信进行扫码,当输出 "Start auto replying" 时表示自动回复程序已经成功运行了(注意:用于登录的微信需要在支付处已完成实名认证)。扫码登录后你的账号就成为机器人了,可以在微信手机端通过配置的关键词触发自动回复 (任意好友发送消息给你,或是自己发消息给好友),参考[#142](https://github.com/zhayujie/chatgpt-on-wechat/issues/142)。 | |||||
终端输出二维码后,使用微信进行扫码,当输出 "Start auto replying" 时表示自动回复程序已经成功运行了(注意:用于登录的微信需要在支付处已完成实名认证)。扫码登录后你的账号就成为机器人了,可以在手机端通过配置的关键词触发自动回复 (任意好友发送消息给你,或是自己发消息给好友),参考[#142](https://github.com/zhayujie/chatgpt-on-wechat/issues/142)。 | |||||
### 2.服务器部署 | ### 2.服务器部署 | ||||
@@ -2,6 +2,7 @@ | |||||
channel factory | channel factory | ||||
""" | """ | ||||
from common import const | from common import const | ||||
from common.log import logger | |||||
def create_bot(bot_type): | def create_bot(bot_type): | ||||
@@ -43,7 +44,9 @@ def create_bot(bot_type): | |||||
elif bot_type == const.CLAUDEAI: | elif bot_type == const.CLAUDEAI: | ||||
from bot.claude.claude_ai_bot import ClaudeAIBot | from bot.claude.claude_ai_bot import ClaudeAIBot | ||||
return ClaudeAIBot() | return ClaudeAIBot() | ||||
elif bot_type == const.CLAUDEAPI: | |||||
from bot.claudeapi.claude_api_bot import ClaudeAPIBot | |||||
return ClaudeAPIBot() | |||||
elif bot_type == const.QWEN: | elif bot_type == const.QWEN: | ||||
from bot.ali.ali_qwen_bot import AliQwenBot | from bot.ali.ali_qwen_bot import AliQwenBot | ||||
return AliQwenBot() | return AliQwenBot() | ||||
@@ -62,13 +62,14 @@ def num_tokens_from_messages(messages, model): | |||||
import tiktoken | import tiktoken | ||||
if model in ["gpt-3.5-turbo-0301", "gpt-35-turbo", "gpt-3.5-turbo-1106"]: | |||||
if model in ["gpt-3.5-turbo-0301", "gpt-35-turbo", "gpt-3.5-turbo-1106", "moonshot"]: | |||||
return num_tokens_from_messages(messages, model="gpt-3.5-turbo") | return num_tokens_from_messages(messages, model="gpt-3.5-turbo") | ||||
elif model in ["gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo-0613", | elif model in ["gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo-0613", | ||||
"gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-35-turbo-16k", "gpt-4-turbo-preview", | "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-35-turbo-16k", "gpt-4-turbo-preview", | ||||
"gpt-4-1106-preview", const.GPT4_TURBO_PREVIEW, const.GPT4_VISION_PREVIEW]: | "gpt-4-1106-preview", const.GPT4_TURBO_PREVIEW, const.GPT4_VISION_PREVIEW]: | ||||
return num_tokens_from_messages(messages, model="gpt-4") | return num_tokens_from_messages(messages, model="gpt-4") | ||||
elif model.startswith("claude-3"): | |||||
return num_tokens_from_messages(messages, model="gpt-3.5-turbo") | |||||
try: | try: | ||||
encoding = tiktoken.encoding_for_model(model) | encoding = tiktoken.encoding_for_model(model) | ||||
except KeyError: | except KeyError: | ||||
@@ -0,0 +1,133 @@ | |||||
# encoding:utf-8 | |||||
import time | |||||
import openai | |||||
import openai.error | |||||
import anthropic | |||||
from bot.bot import Bot | |||||
from bot.openai.open_ai_image import OpenAIImage | |||||
from bot.chatgpt.chat_gpt_session import ChatGPTSession | |||||
from bot.gemini.google_gemini_bot import GoogleGeminiBot | |||||
from bot.session_manager import SessionManager | |||||
from bridge.context import ContextType | |||||
from bridge.reply import Reply, ReplyType | |||||
from common.log import logger | |||||
from config import conf | |||||
user_session = dict() | |||||
# OpenAI对话模型API (可用) | |||||
class ClaudeAPIBot(Bot, OpenAIImage): | |||||
def __init__(self): | |||||
super().__init__() | |||||
self.claudeClient = anthropic.Anthropic( | |||||
api_key=conf().get("claude_api_key") | |||||
) | |||||
openai.api_key = conf().get("open_ai_api_key") | |||||
if conf().get("open_ai_api_base"): | |||||
openai.api_base = conf().get("open_ai_api_base") | |||||
proxy = conf().get("proxy") | |||||
if proxy: | |||||
openai.proxy = proxy | |||||
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "text-davinci-003") | |||||
def reply(self, query, context=None): | |||||
# acquire reply content | |||||
if context and context.type: | |||||
if context.type == ContextType.TEXT: | |||||
logger.info("[CLAUDE_API] query={}".format(query)) | |||||
session_id = context["session_id"] | |||||
reply = None | |||||
if query == "#清除记忆": | |||||
self.sessions.clear_session(session_id) | |||||
reply = Reply(ReplyType.INFO, "记忆已清除") | |||||
elif query == "#清除所有": | |||||
self.sessions.clear_all_session() | |||||
reply = Reply(ReplyType.INFO, "所有人记忆已清除") | |||||
else: | |||||
session = self.sessions.session_query(query, session_id) | |||||
result = self.reply_text(session) | |||||
logger.info(result) | |||||
total_tokens, completion_tokens, reply_content = ( | |||||
result["total_tokens"], | |||||
result["completion_tokens"], | |||||
result["content"], | |||||
) | |||||
logger.debug( | |||||
"[CLAUDE_API] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(str(session), session_id, reply_content, completion_tokens) | |||||
) | |||||
if total_tokens == 0: | |||||
reply = Reply(ReplyType.ERROR, reply_content) | |||||
else: | |||||
self.sessions.session_reply(reply_content, session_id, total_tokens) | |||||
reply = Reply(ReplyType.TEXT, reply_content) | |||||
return reply | |||||
elif context.type == ContextType.IMAGE_CREATE: | |||||
ok, retstring = self.create_img(query, 0) | |||||
reply = None | |||||
if ok: | |||||
reply = Reply(ReplyType.IMAGE_URL, retstring) | |||||
else: | |||||
reply = Reply(ReplyType.ERROR, retstring) | |||||
return reply | |||||
def reply_text(self, session: ChatGPTSession, retry_count=0): | |||||
try: | |||||
actual_model = self._model_mapping(conf().get("model")) | |||||
response = self.claudeClient.messages.create( | |||||
model=actual_model, | |||||
max_tokens=1024, | |||||
# system=conf().get("system"), | |||||
messages=GoogleGeminiBot.filter_messages(session.messages) | |||||
) | |||||
# response = openai.Completion.create(prompt=str(session), **self.args) | |||||
res_content = response.content[0].text.strip().replace("<|endoftext|>", "") | |||||
total_tokens = response.usage.input_tokens+response.usage.output_tokens | |||||
completion_tokens = response.usage.output_tokens | |||||
logger.info("[CLAUDE_API] reply={}".format(res_content)) | |||||
return { | |||||
"total_tokens": total_tokens, | |||||
"completion_tokens": completion_tokens, | |||||
"content": res_content, | |||||
} | |||||
except Exception as e: | |||||
need_retry = retry_count < 2 | |||||
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"} | |||||
if isinstance(e, openai.error.RateLimitError): | |||||
logger.warn("[CLAUDE_API] RateLimitError: {}".format(e)) | |||||
result["content"] = "提问太快啦,请休息一下再问我吧" | |||||
if need_retry: | |||||
time.sleep(20) | |||||
elif isinstance(e, openai.error.Timeout): | |||||
logger.warn("[CLAUDE_API] Timeout: {}".format(e)) | |||||
result["content"] = "我没有收到你的消息" | |||||
if need_retry: | |||||
time.sleep(5) | |||||
elif isinstance(e, openai.error.APIConnectionError): | |||||
logger.warn("[CLAUDE_API] APIConnectionError: {}".format(e)) | |||||
need_retry = False | |||||
result["content"] = "我连接不到你的网络" | |||||
else: | |||||
logger.warn("[CLAUDE_API] Exception: {}".format(e)) | |||||
need_retry = False | |||||
self.sessions.clear_session(session.session_id) | |||||
if need_retry: | |||||
logger.warn("[CLAUDE_API] 第{}次重试".format(retry_count + 1)) | |||||
return self.reply_text(session, retry_count + 1) | |||||
else: | |||||
return result | |||||
def _model_mapping(self, model) -> str: | |||||
if model == "claude-3-opus": | |||||
return "claude-3-opus-20240229" | |||||
elif model == "claude-3-sonnet": | |||||
return "claude-3-sonnet-20240229" | |||||
elif model == "claude-3-haiku": | |||||
return "claude-3-haiku-20240307" | |||||
return model |
@@ -33,7 +33,7 @@ class GoogleGeminiBot(Bot): | |||||
logger.info(f"[Gemini] query={query}") | logger.info(f"[Gemini] query={query}") | ||||
session_id = context["session_id"] | session_id = context["session_id"] | ||||
session = self.sessions.session_query(query, session_id) | session = self.sessions.session_query(query, session_id) | ||||
gemini_messages = self._convert_to_gemini_messages(self._filter_messages(session.messages)) | |||||
gemini_messages = self._convert_to_gemini_messages(self.filter_messages(session.messages)) | |||||
genai.configure(api_key=self.api_key) | genai.configure(api_key=self.api_key) | ||||
model = genai.GenerativeModel('gemini-pro') | model = genai.GenerativeModel('gemini-pro') | ||||
response = model.generate_content(gemini_messages) | response = model.generate_content(gemini_messages) | ||||
@@ -44,6 +44,7 @@ class GoogleGeminiBot(Bot): | |||||
except Exception as e: | except Exception as e: | ||||
logger.error("[Gemini] fetch reply error, may contain unsafe content") | logger.error("[Gemini] fetch reply error, may contain unsafe content") | ||||
logger.error(e) | logger.error(e) | ||||
return Reply(ReplyType.ERROR, "invoke [Gemini] api failed!") | |||||
def _convert_to_gemini_messages(self, messages: list): | def _convert_to_gemini_messages(self, messages: list): | ||||
res = [] | res = [] | ||||
@@ -60,9 +61,12 @@ class GoogleGeminiBot(Bot): | |||||
}) | }) | ||||
return res | return res | ||||
def _filter_messages(self, messages: list): | |||||
@staticmethod | |||||
def filter_messages(messages: list): | |||||
res = [] | res = [] | ||||
turn = "user" | turn = "user" | ||||
if not messages: | |||||
return res | |||||
for i in range(len(messages) - 1, -1, -1): | for i in range(len(messages) - 1, -1, -1): | ||||
message = messages[i] | message = messages[i] | ||||
if message.get("role") != turn: | if message.get("role") != turn: | ||||
@@ -92,6 +92,7 @@ class LinkAIBot(Bot): | |||||
"frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容 | "frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容 | ||||
"presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容 | "presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容 | ||||
"session_id": session_id, | "session_id": session_id, | ||||
"sender_id": session_id, | |||||
"channel_type": conf().get("channel_type", "wx") | "channel_type": conf().get("channel_type", "wx") | ||||
} | } | ||||
try: | try: | ||||
@@ -129,9 +130,12 @@ class LinkAIBot(Bot): | |||||
response = res.json() | response = res.json() | ||||
reply_content = response["choices"][0]["message"]["content"] | reply_content = response["choices"][0]["message"]["content"] | ||||
total_tokens = response["usage"]["total_tokens"] | total_tokens = response["usage"]["total_tokens"] | ||||
logger.info(f"[LINKAI] reply={reply_content}, total_tokens={total_tokens}") | |||||
self.sessions.session_reply(reply_content, session_id, total_tokens, query=query) | |||||
res_code = response.get('code') | |||||
logger.info(f"[LINKAI] reply={reply_content}, total_tokens={total_tokens}, res_code={res_code}") | |||||
if res_code == 429: | |||||
logger.warn(f"[LINKAI] 用户访问超出限流配置,sender_id={body.get('sender_id')}") | |||||
else: | |||||
self.sessions.session_reply(reply_content, session_id, total_tokens, query=query) | |||||
agent_suffix = self._fetch_agent_suffix(response) | agent_suffix = self._fetch_agent_suffix(response) | ||||
if agent_suffix: | if agent_suffix: | ||||
reply_content += agent_suffix | reply_content += agent_suffix | ||||
@@ -160,7 +164,10 @@ class LinkAIBot(Bot): | |||||
logger.warn(f"[LINKAI] do retry, times={retry_count}") | logger.warn(f"[LINKAI] do retry, times={retry_count}") | ||||
return self._chat(query, context, retry_count + 1) | return self._chat(query, context, retry_count + 1) | ||||
return Reply(ReplyType.TEXT, "提问太快啦,请休息一下再问我吧") | |||||
error_reply = "提问太快啦,请休息一下再问我吧" | |||||
if res.status_code == 409: | |||||
error_reply = "这个问题我还没有学会,请问我其它问题吧" | |||||
return Reply(ReplyType.TEXT, error_reply) | |||||
except Exception as e: | except Exception as e: | ||||
logger.exception(e) | logger.exception(e) | ||||
@@ -47,7 +47,8 @@ class XunFeiBot(Bot): | |||||
# 默认使用v2.0版本: "ws://spark-api.xf-yun.com/v2.1/chat" | # 默认使用v2.0版本: "ws://spark-api.xf-yun.com/v2.1/chat" | ||||
# v1.5版本为: "ws://spark-api.xf-yun.com/v1.1/chat" | # v1.5版本为: "ws://spark-api.xf-yun.com/v1.1/chat" | ||||
# v3.0版本为: "ws://spark-api.xf-yun.com/v3.1/chat" | # v3.0版本为: "ws://spark-api.xf-yun.com/v3.1/chat" | ||||
self.spark_url = "ws://spark-api.xf-yun.com/v3.1/chat" | |||||
# v3.5版本为: "wss://spark-api.xf-yun.com/v3.5/chat" | |||||
self.spark_url = "wss://spark-api.xf-yun.com/v3.5/chat" | |||||
self.host = urlparse(self.spark_url).netloc | self.host = urlparse(self.spark_url).netloc | ||||
self.path = urlparse(self.spark_url).path | self.path = urlparse(self.spark_url).path | ||||
# 和wenxin使用相同的session机制 | # 和wenxin使用相同的session机制 | ||||
@@ -18,6 +18,7 @@ class Bridge(object): | |||||
"text_to_voice": conf().get("text_to_voice", "google"), | "text_to_voice": conf().get("text_to_voice", "google"), | ||||
"translate": conf().get("translate", "baidu"), | "translate": conf().get("translate", "baidu"), | ||||
} | } | ||||
# 这边取配置的模型 | |||||
model_type = conf().get("model") or const.GPT35 | model_type = conf().get("model") or const.GPT35 | ||||
if model_type in ["text-davinci-003"]: | if model_type in ["text-davinci-003"]: | ||||
self.btype["chat"] = const.OPEN_AI | self.btype["chat"] = const.OPEN_AI | ||||
@@ -33,6 +34,8 @@ class Bridge(object): | |||||
self.btype["chat"] = const.GEMINI | self.btype["chat"] = const.GEMINI | ||||
if model_type in [const.ZHIPU_AI]: | if model_type in [const.ZHIPU_AI]: | ||||
self.btype["chat"] = const.ZHIPU_AI | self.btype["chat"] = const.ZHIPU_AI | ||||
if model_type and model_type.startswith("claude-3"): | |||||
self.btype["chat"] = const.CLAUDEAPI | |||||
if conf().get("use_linkai") and conf().get("linkai_api_key"): | if conf().get("use_linkai") and conf().get("linkai_api_key"): | ||||
self.btype["chat"] = const.LINKAI | self.btype["chat"] = const.LINKAI | ||||
@@ -40,12 +43,12 @@ class Bridge(object): | |||||
self.btype["voice_to_text"] = const.LINKAI | self.btype["voice_to_text"] = const.LINKAI | ||||
if not conf().get("text_to_voice") or conf().get("text_to_voice") in ["openai", const.TTS_1, const.TTS_1_HD]: | if not conf().get("text_to_voice") or conf().get("text_to_voice") in ["openai", const.TTS_1, const.TTS_1_HD]: | ||||
self.btype["text_to_voice"] = const.LINKAI | self.btype["text_to_voice"] = const.LINKAI | ||||
if model_type in ["claude"]: | if model_type in ["claude"]: | ||||
self.btype["chat"] = const.CLAUDEAI | self.btype["chat"] = const.CLAUDEAI | ||||
self.bots = {} | self.bots = {} | ||||
self.chat_bots = {} | self.chat_bots = {} | ||||
# 模型对应的接口 | |||||
def get_bot(self, typename): | def get_bot(self, typename): | ||||
if self.bots.get(typename) is None: | if self.bots.get(typename) is None: | ||||
logger.info("create bot {} for {}".format(self.btype[typename], typename)) | logger.info("create bot {} for {}".format(self.btype[typename], typename)) | ||||
@@ -11,7 +11,7 @@ class ReplyType(Enum): | |||||
VIDEO_URL = 5 # 视频URL | VIDEO_URL = 5 # 视频URL | ||||
FILE = 6 # 文件 | FILE = 6 # 文件 | ||||
CARD = 7 # 微信名片,仅支持ntchat | CARD = 7 # 微信名片,仅支持ntchat | ||||
InviteRoom = 8 # 邀请好友进群 | |||||
INVITE_ROOM = 8 # 邀请好友进群 | |||||
INFO = 9 | INFO = 9 | ||||
ERROR = 10 | ERROR = 10 | ||||
TEXT_ = 11 # 强制文本 | TEXT_ = 11 # 强制文本 | ||||
@@ -170,11 +170,13 @@ class ChatChannel(Channel): | |||||
reply = self._generate_reply(context) | reply = self._generate_reply(context) | ||||
logger.debug("[WX] ready to decorate reply: {}".format(reply)) | logger.debug("[WX] ready to decorate reply: {}".format(reply)) | ||||
# reply的包装步骤 | # reply的包装步骤 | ||||
reply = self._decorate_reply(context, reply) | |||||
if reply and reply.content: | |||||
reply = self._decorate_reply(context, reply) | |||||
# reply的发送步骤 | |||||
self._send_reply(context, reply) | |||||
# reply的发送步骤 | |||||
self._send_reply(context, reply) | |||||
def _generate_reply(self, context: Context, reply: Reply = Reply()) -> Reply: | def _generate_reply(self, context: Context, reply: Reply = Reply()) -> Reply: | ||||
e_context = PluginManager().emit_event( | e_context = PluginManager().emit_event( | ||||
@@ -233,7 +233,6 @@ class WechatChannel(ChatChannel): | |||||
logger.info("[WX] sendImage url={}, receiver={}".format(img_url, receiver)) | logger.info("[WX] sendImage url={}, receiver={}".format(img_url, receiver)) | ||||
elif reply.type == ReplyType.IMAGE: # 从文件读取图片 | elif reply.type == ReplyType.IMAGE: # 从文件读取图片 | ||||
image_storage = reply.content | image_storage = reply.content | ||||
image_storage.seek(0) | |||||
itchat.send_image(image_storage, toUserName=receiver) | itchat.send_image(image_storage, toUserName=receiver) | ||||
logger.info("[WX] sendImage, receiver={}".format(receiver)) | logger.info("[WX] sendImage, receiver={}".format(receiver)) | ||||
elif reply.type == ReplyType.FILE: # 新增文件回复类型 | elif reply.type == ReplyType.FILE: # 新增文件回复类型 | ||||
@@ -6,12 +6,15 @@ XUNFEI = "xunfei" | |||||
CHATGPTONAZURE = "chatGPTOnAzure" | CHATGPTONAZURE = "chatGPTOnAzure" | ||||
LINKAI = "linkai" | LINKAI = "linkai" | ||||
CLAUDEAI = "claude" | CLAUDEAI = "claude" | ||||
CLAUDEAPI= "claudeAPI" | |||||
QWEN = "qwen" | QWEN = "qwen" | ||||
GEMINI = "gemini" | GEMINI = "gemini" | ||||
ZHIPU_AI = "glm-4" | ZHIPU_AI = "glm-4" | ||||
MOONSHOT = "moonshot" | |||||
# model | # model | ||||
CLAUDE3 = "claude-3-opus-20240229" | |||||
GPT35 = "gpt-3.5-turbo" | GPT35 = "gpt-3.5-turbo" | ||||
GPT4 = "gpt-4" | GPT4 = "gpt-4" | ||||
GPT4_TURBO_PREVIEW = "gpt-4-0125-preview" | GPT4_TURBO_PREVIEW = "gpt-4-0125-preview" | ||||
@@ -20,8 +23,8 @@ WHISPER_1 = "whisper-1" | |||||
TTS_1 = "tts-1" | TTS_1 = "tts-1" | ||||
TTS_1_HD = "tts-1-hd" | TTS_1_HD = "tts-1-hd" | ||||
MODEL_LIST = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "wenxin", "wenxin-4", "xunfei", "claude", "gpt-4-turbo", | |||||
"gpt-4-turbo-preview", "gpt-4-1106-preview", GPT4_TURBO_PREVIEW, QWEN, GEMINI, ZHIPU_AI] | |||||
MODEL_LIST = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "wenxin", "wenxin-4", "xunfei", "claude","claude-3-opus-20240229", "gpt-4-turbo", | |||||
"gpt-4-turbo-preview", "gpt-4-1106-preview", GPT4_TURBO_PREVIEW, QWEN, GEMINI, ZHIPU_AI, MOONSHOT] | |||||
# channel | # channel | ||||
FEISHU = "feishu" | FEISHU = "feishu" | ||||
@@ -2,6 +2,7 @@ | |||||
"channel_type": "wx", | "channel_type": "wx", | ||||
"model": "", | "model": "", | ||||
"open_ai_api_key": "YOUR API KEY", | "open_ai_api_key": "YOUR API KEY", | ||||
"claude_api_key": "YOUR API KEY", | |||||
"text_to_image": "dall-e-2", | "text_to_image": "dall-e-2", | ||||
"voice_to_text": "openai", | "voice_to_text": "openai", | ||||
"text_to_voice": "openai", | "text_to_voice": "openai", | ||||
@@ -74,6 +74,8 @@ available_setting = { | |||||
# claude 配置 | # claude 配置 | ||||
"claude_api_cookie": "", | "claude_api_cookie": "", | ||||
"claude_uuid": "", | "claude_uuid": "", | ||||
# claude api key | |||||
"claude_api_key":"", | |||||
# 通义千问API, 获取方式查看文档 https://help.aliyun.com/document_detail/2587494.html | # 通义千问API, 获取方式查看文档 https://help.aliyun.com/document_detail/2587494.html | ||||
"qwen_access_key_id": "", | "qwen_access_key_id": "", | ||||
"qwen_access_key_secret": "", | "qwen_access_key_secret": "", | ||||
@@ -90,7 +92,7 @@ available_setting = { | |||||
"voice_reply_voice": False, # 是否使用语音回复语音,需要设置对应语音合成引擎的api key | "voice_reply_voice": False, # 是否使用语音回复语音,需要设置对应语音合成引擎的api key | ||||
"always_reply_voice": False, # 是否一直使用语音回复 | "always_reply_voice": False, # 是否一直使用语音回复 | ||||
"voice_to_text": "openai", # 语音识别引擎,支持openai,baidu,google,azure | "voice_to_text": "openai", # 语音识别引擎,支持openai,baidu,google,azure | ||||
"text_to_voice": "openai", # 语音合成引擎,支持openai,baidu,google,pytts(offline),azure,elevenlabs | |||||
"text_to_voice": "openai", # 语音合成引擎,支持openai,baidu,google,pytts(offline),azure,elevenlabs,edge(online) | |||||
"text_to_voice_model": "tts-1", | "text_to_voice_model": "tts-1", | ||||
"tts_voice_id": "alloy", | "tts_voice_id": "alloy", | ||||
# baidu 语音api配置, 使用百度语音识别和语音合成时需要 | # baidu 语音api配置, 使用百度语音识别和语音合成时需要 | ||||
@@ -0,0 +1,13 @@ | |||||
## 归档更新日志 | |||||
2023.04.26: 支持企业微信应用号部署,兼容插件,并支持语音图片交互,私人助理理想选择,使用文档。(contributed by @lanvent in #944) | |||||
2023.04.05: 支持微信公众号部署,兼容插件,并支持语音图片交互,使用文档。(contributed by @JS00000 in #686) | |||||
2023.04.05: 增加能让ChatGPT使用工具的tool插件,使用文档。工具相关issue可反馈至chatgpt-tool-hub。(contributed by @goldfishh in #663) | |||||
2023.03.25: 支持插件化开发,目前已实现 多角色切换、文字冒险游戏、管理员指令、Stable Diffusion等插件,使用参考 #578。(contributed by @lanvent in #565) | |||||
2023.03.09: 基于 whisper API(后续已接入更多的语音API服务) 实现对微信语音消息的解析和回复,添加配置项 "speech_recognition":true 即可启用,使用参考 #415。(contributed by wanggang1987 in #385) | |||||
2023.02.09: 扫码登录存在账号限制风险,请谨慎使用,参考#58 |
@@ -313,7 +313,7 @@ class Godcmd(Plugin): | |||||
except Exception as e: | except Exception as e: | ||||
ok, result = False, "你没有设置私有GPT模型" | ok, result = False, "你没有设置私有GPT模型" | ||||
elif cmd == "reset": | elif cmd == "reset": | ||||
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI, const.BAIDU, const.XUNFEI, const.QWEN, const.GEMINI]: | |||||
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI, const.BAIDU, const.XUNFEI, const.QWEN, const.GEMINI, const.ZHIPU_AI]: | |||||
bot.sessions.clear_session(session_id) | bot.sessions.clear_session(session_id) | ||||
if Bridge().chat_bots.get(bottype): | if Bridge().chat_bots.get(bottype): | ||||
Bridge().chat_bots.get(bottype).sessions.clear_session(session_id) | Bridge().chat_bots.get(bottype).sessions.clear_session(session_id) | ||||
@@ -339,7 +339,7 @@ class Godcmd(Plugin): | |||||
ok, result = True, "配置已重载" | ok, result = True, "配置已重载" | ||||
elif cmd == "resetall": | elif cmd == "resetall": | ||||
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI, | if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI, | ||||
const.BAIDU, const.XUNFEI, const.QWEN, const.GEMINI]: | |||||
const.BAIDU, const.XUNFEI, const.QWEN, const.GEMINI, const.ZHIPU_AI, const.MOONSHOT]: | |||||
channel.cancel_all_session() | channel.cancel_all_session() | ||||
bot.sessions.clear_all_session() | bot.sessions.clear_all_session() | ||||
ok, result = True, "重置所有会话成功" | ok, result = True, "重置所有会话成功" | ||||
@@ -99,7 +99,7 @@ class PluginManager: | |||||
try: | try: | ||||
self.current_plugin_path = plugin_path | self.current_plugin_path = plugin_path | ||||
if plugin_path in self.loaded: | if plugin_path in self.loaded: | ||||
if self.loaded[plugin_path] == None: | |||||
if plugin_name.upper() != 'GODCMD': | |||||
logger.info("reload module %s" % plugin_name) | logger.info("reload module %s" % plugin_name) | ||||
self.loaded[plugin_path] = importlib.reload(sys.modules[import_path]) | self.loaded[plugin_path] = importlib.reload(sys.modules[import_path]) | ||||
dependent_module_names = [name for name in sys.modules.keys() if name.startswith(import_path + ".")] | dependent_module_names = [name for name in sys.modules.keys() if name.startswith(import_path + ".")] | ||||
@@ -141,19 +141,21 @@ class PluginManager: | |||||
failed_plugins = [] | failed_plugins = [] | ||||
for name, plugincls in self.plugins.items(): | for name, plugincls in self.plugins.items(): | ||||
if plugincls.enabled: | if plugincls.enabled: | ||||
if name not in self.instances: | |||||
try: | |||||
instance = plugincls() | |||||
except Exception as e: | |||||
logger.warn("Failed to init %s, diabled. %s" % (name, e)) | |||||
self.disable_plugin(name) | |||||
failed_plugins.append(name) | |||||
continue | |||||
self.instances[name] = instance | |||||
for event in instance.handlers: | |||||
if event not in self.listening_plugins: | |||||
self.listening_plugins[event] = [] | |||||
self.listening_plugins[event].append(name) | |||||
if 'GODCMD' in self.instances and name == 'GODCMD': | |||||
continue | |||||
# if name not in self.instances: | |||||
try: | |||||
instance = plugincls() | |||||
except Exception as e: | |||||
logger.warn("Failed to init %s, diabled. %s" % (name, e)) | |||||
self.disable_plugin(name) | |||||
failed_plugins.append(name) | |||||
continue | |||||
self.instances[name] = instance | |||||
for event in instance.handlers: | |||||
if event not in self.listening_plugins: | |||||
self.listening_plugins[event] = [] | |||||
self.listening_plugins[event].append(name) | |||||
self.refresh_order() | self.refresh_order() | ||||
return failed_plugins | return failed_plugins | ||||
@@ -19,6 +19,14 @@ | |||||
"Apilot": { | "Apilot": { | ||||
"url": "https://github.com/6vision/Apilot.git", | "url": "https://github.com/6vision/Apilot.git", | ||||
"desc": "通过api直接查询早报、热榜、快递、天气等实用信息的插件" | "desc": "通过api直接查询早报、热榜、快递、天气等实用信息的插件" | ||||
}, | |||||
"pictureChange": { | |||||
"url": "https://github.com/Yanyutin753/pictureChange.git", | |||||
"desc": "利用stable-diffusion和百度Ai进行图生图或者画图的插件" | |||||
}, | |||||
"Blackroom": { | |||||
"url": "https://github.com/dividduang/blackroom.git", | |||||
"desc": "小黑屋插件,被拉进小黑屋的人将不能使用@bot的功能的插件" | |||||
} | } | ||||
} | } | ||||
} | } |
@@ -7,6 +7,7 @@ gTTS>=2.3.1 # google text to speech | |||||
pyttsx3>=2.90 # pytsx text to speech | pyttsx3>=2.90 # pytsx text to speech | ||||
baidu_aip>=4.16.10 # baidu voice | baidu_aip>=4.16.10 # baidu voice | ||||
azure-cognitiveservices-speech # azure voice | azure-cognitiveservices-speech # azure voice | ||||
edge-tts # edge-tts | |||||
numpy<=1.24.2 | numpy<=1.24.2 | ||||
langid # language detect | langid # language detect | ||||
@@ -25,6 +26,8 @@ websocket-client==1.2.0 | |||||
# claude bot | # claude bot | ||||
curl_cffi | curl_cffi | ||||
# claude API | |||||
anthropic | |||||
# tongyi qwen | # tongyi qwen | ||||
broadscope_bailian | broadscope_bailian | ||||
@@ -6,4 +6,4 @@ requests>=2.28.2 | |||||
chardet>=5.1.0 | chardet>=5.1.0 | ||||
Pillow | Pillow | ||||
pre-commit | pre-commit | ||||
web.py | |||||
web.py |
@@ -64,7 +64,9 @@ def any_to_wav(any_path, wav_path): | |||||
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"): | if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"): | ||||
return sil_to_wav(any_path, wav_path) | return sil_to_wav(any_path, wav_path) | ||||
audio = AudioSegment.from_file(any_path) | audio = AudioSegment.from_file(any_path) | ||||
audio.export(wav_path, format="wav") | |||||
audio.set_frame_rate(8000) # 百度语音转写支持8000采样率, pcm_s16le, 单通道语音识别 | |||||
audio.set_channels(1) | |||||
audio.export(wav_path, format="wav", codec='pcm_s16le') | |||||
def any_to_sil(any_path, sil_path): | def any_to_sil(any_path, sil_path): | ||||
@@ -62,7 +62,7 @@ class BaiduVoice(Voice): | |||||
# 识别本地文件 | # 识别本地文件 | ||||
logger.debug("[Baidu] voice file name={}".format(voice_file)) | logger.debug("[Baidu] voice file name={}".format(voice_file)) | ||||
pcm = get_pcm_from_wav(voice_file) | pcm = get_pcm_from_wav(voice_file) | ||||
res = self.client.asr(pcm, "pcm", 16000, {"dev_pid": self.dev_id}) | |||||
res = self.client.asr(pcm, "pcm", 8000, {"dev_pid": self.dev_id}) | |||||
if res["err_no"] == 0: | if res["err_no"] == 0: | ||||
logger.info("百度语音识别到了:{}".format(res["result"])) | logger.info("百度语音识别到了:{}".format(res["result"])) | ||||
text = "".join(res["result"]) | text = "".join(res["result"]) | ||||
@@ -0,0 +1,50 @@ | |||||
import time | |||||
import edge_tts | |||||
import asyncio | |||||
from bridge.reply import Reply, ReplyType | |||||
from common.log import logger | |||||
from common.tmp_dir import TmpDir | |||||
from voice.voice import Voice | |||||
class EdgeVoice(Voice): | |||||
def __init__(self): | |||||
''' | |||||
# 普通话 | |||||
zh-CN-XiaoxiaoNeural | |||||
zh-CN-XiaoyiNeural | |||||
zh-CN-YunjianNeural | |||||
zh-CN-YunxiNeural | |||||
zh-CN-YunxiaNeural | |||||
zh-CN-YunyangNeural | |||||
# 地方口音 | |||||
zh-CN-liaoning-XiaobeiNeural | |||||
zh-CN-shaanxi-XiaoniNeural | |||||
# 粤语 | |||||
zh-HK-HiuGaaiNeural | |||||
zh-HK-HiuMaanNeural | |||||
zh-HK-WanLungNeural | |||||
# 湾湾腔 | |||||
zh-TW-HsiaoChenNeural | |||||
zh-TW-HsiaoYuNeural | |||||
zh-TW-YunJheNeural | |||||
''' | |||||
self.voice = "zh-CN-YunjianNeural" | |||||
def voiceToText(self, voice_file): | |||||
pass | |||||
async def gen_voice(self, text, fileName): | |||||
communicate = edge_tts.Communicate(text, self.voice) | |||||
await communicate.save(fileName) | |||||
def textToVoice(self, text): | |||||
fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3" | |||||
asyncio.run(self.gen_voice(text, fileName)) | |||||
logger.info("[EdgeTTS] textToVoice text={} voice file name={}".format(text, fileName)) | |||||
return Reply(ReplyType.VOICE, fileName) |
@@ -42,4 +42,8 @@ def create_voice(voice_type): | |||||
from voice.ali.ali_voice import AliVoice | from voice.ali.ali_voice import AliVoice | ||||
return AliVoice() | return AliVoice() | ||||
elif voice_type == "edge": | |||||
from voice.edge.edge_voice import EdgeVoice | |||||
return EdgeVoice() | |||||
raise RuntimeError | raise RuntimeError |