diff --git a/README.md b/README.md index 5a95a57..e018d41 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ 最新版本支持的功能如下: - [x] **多端部署:** 有多种部署方式可选择且功能完备,目前已支持个人微信,微信公众号和企业微信应用等部署方式 -- [x] **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3, GPT-3.5, GPT-4, claude, 文心一言, 讯飞星火 +- [x] **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3.5, GPT-4, claude, 文心一言, 讯飞星火 - [x] **语音识别:** 可识别语音消息,通过文字或语音回复,支持 azure, baidu, google, openai等多种语音模型 - [x] **图片生成:** 支持图片生成 和 图生图(如照片修复),可选择 Dell-E, stable diffusion, replicate, midjourney模型 - [x] **丰富插件:** 支持个性化插件扩展,已实现多角色切换、文字冒险、敏感词过滤、聊天记录总结等插件 @@ -27,7 +27,7 @@ Demo made by [Visionn](https://www.wangpc.cc/) # 更新日志 ->**2023.09.01:** 接入讯飞星火,claude机器人 +>**2023.09.01:** 增加 [企微个人号](#1385) 通道,[claude](1388),讯飞星火模型 >**2023.08.08:** 接入百度文心一言模型,通过 [插件](https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins/linkai) 支持 Midjourney 绘图 @@ -176,26 +176,6 @@ pip3 install azure-cognitiveservices-speech + `linkai_api_key`: LinkAI Api Key,可在 [控制台](https://chat.link-ai.tech/console/interface) 创建 + `linkai_app_code`: LinkAI 应用code,选填 -**6.wenxin配置 (可选 model 为 wenxin 时生效)** - -+ `baidu_wenxin_api_key`: 文心一言官网api key。 -+ `baidu_wenxin_secret_key`: 文心一言官网secret key。 - - -**6.Claude配置 (可选 model 为 claude 时生效)** - -+ `claude_api_cookie`: claude官网聊天界面复制完整 cookie 字符串。 -+ `claude_uuid`: 可以指定对话id,默认新建对话实体。 - - -**7.xunfei配置 (可选 model 为 xunfei 时生效)** - -+ `xunfei_app_id`: 讯飞星火app id。 -+ `xunfei_api_key`: 讯飞星火 api key。 -+ `xunfei_api_secret`: 讯飞星火 secret。 - - - **本说明文档可能会未及时更新,当前所有可选的配置项均在该[`config.py`](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/config.py)中列出。** ## 运行 diff --git a/bot/claude/claude_ai_bot.py b/bot/claude/claude_ai_bot.py index d273f2c..faad274 100644 --- a/bot/claude/claude_ai_bot.py +++ b/bot/claude/claude_ai_bot.py @@ -27,6 +27,7 @@ class ClaudeAIBot(Bot, OpenAIImage): } else: self.proxies = None + self.error = "" self.org_uuid = self.get_organization_id() def generate_uuid(self): @@ -34,32 +35,6 @@ class ClaudeAIBot(Bot, OpenAIImage): random_uuid_str = str(random_uuid) formatted_uuid = f"{random_uuid_str[0:8]}-{random_uuid_str[9:13]}-{random_uuid_str[14:18]}-{random_uuid_str[19:23]}-{random_uuid_str[24:]}" return formatted_uuid - - def get_uuid(self): - if conf().get("claude_uuid") != None: - self.con_uuid = conf().get("claude_uuid") - else: - con_uuid = self.generate_uuid() - self.create_new_chat(con_uuid) - - def get_organization_id(self): - url = "https://claude.ai/api/organizations" - headers = { - 'User-Agent': - 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0', - 'Accept-Language': 'en-US,en;q=0.5', - 'Referer': 'https://claude.ai/chats', - 'Content-Type': 'application/json', - 'Sec-Fetch-Dest': 'empty', - 'Sec-Fetch-Mode': 'cors', - 'Sec-Fetch-Site': 'same-origin', - 'Connection': 'keep-alive', - 'Cookie': f'{self.claude_api_cookie}' - } - response = requests.get(url, headers=headers,impersonate="chrome110",proxies=self.proxies) - res = json.loads(response.text) - uuid = res[0]['uuid'] - return uuid def reply(self, query, context: Context = None) -> Reply: if context.type == ContextType.TEXT: @@ -90,20 +65,38 @@ class ClaudeAIBot(Bot, OpenAIImage): 'Cookie': f'{self.claude_api_cookie}' } try: - response = requests.get(url, headers=headers,impersonate="chrome110",proxies =self.proxies ) + response = requests.get(url, headers=headers, impersonate="chrome110", proxies =self.proxies, timeout=400) res = json.loads(response.text) uuid = res[0]['uuid'] except: - print(response.text) + if "App unavailable" in response.text: + logger.error("IP error: The IP is not allowed to be used on Claude") + self.error = "ip所在地区不被claude支持" + elif "Invalid authorization" in response.text: + logger.error("Cookie error: Invalid authorization of claude, check cookie please.") + self.error = "无法通过claude身份验证,请检查cookie" + return None return uuid def conversation_share_check(self,session_id): + if conf().get("claude_uuid") is not None and conf().get("claude_uuid") != "": + con_uuid = conf().get("claude_uuid") + return con_uuid if session_id not in self.con_uuid_dic: self.con_uuid_dic[session_id] = self.generate_uuid() self.create_new_chat(self.con_uuid_dic[session_id]) return self.con_uuid_dic[session_id] + def check_cookie(self): + flag = self.get_organization_id() + return flag + def create_new_chat(self, con_uuid): + """ + 新建claude对话实体 + :param con_uuid: 对话id + :return: + """ url = f"https://claude.ai/api/organizations/{self.org_uuid}/chat_conversations" payload = json.dumps({"uuid": con_uuid, "name": ""}) headers = { @@ -121,7 +114,7 @@ class ClaudeAIBot(Bot, OpenAIImage): 'Sec-Fetch-Site': 'same-origin', 'TE': 'trailers' } - response = requests.post(url, headers=headers, data=payload,impersonate="chrome110", proxies= self.proxies) + response = requests.post(url, headers=headers, data=payload, impersonate="chrome110", proxies=self.proxies, timeout=400) # Returns JSON of the newly created conversation information return response.json() @@ -140,8 +133,12 @@ class ClaudeAIBot(Bot, OpenAIImage): try: session_id = context["session_id"] + if self.org_uuid is None: + return Reply(ReplyType.ERROR, self.error) + session = self.sessions.session_query(query, session_id) con_uuid = self.conversation_share_check(session_id) + model = conf().get("model") or "gpt-3.5-turbo" # remove system message if session.messages[0].get("role") == "system": @@ -193,11 +190,18 @@ class ClaudeAIBot(Bot, OpenAIImage): completions.append(data['completion']) reply_content = ''.join(completions) - logger.info(f"[CLAUDE] reply={reply_content}, total_tokens=invisible") + if "rate limi" in reply_content: + logger.error("rate limit error: The conversation has reached the system speed limit and is synchronized with Cladue. Please go to the official website to check the lifting time") + return Reply(ReplyType.ERROR, "对话达到系统速率限制,与cladue同步,请进入官网查看解除限制时间") + logger.info(f"[CLAUDE] reply={reply_content}, total_tokens=invisible") self.sessions.session_reply(reply_content, session_id, 100) return Reply(ReplyType.TEXT, reply_content) else: + flag = self.check_cookie() + if flag == None: + return Reply(ReplyType.ERROR, self.error) + response = res.json() error = response.get("error") logger.error(f"[CLAUDE] chat failed, status_code={res.status_code}, " diff --git a/bot/claude/claude_ai_session b/bot/claude/claude_ai_session.py similarity index 100% rename from bot/claude/claude_ai_session rename to bot/claude/claude_ai_session.py diff --git a/bot/linkai/link_ai_bot.py b/bot/linkai/link_ai_bot.py index c5aed0f..97ac2e3 100644 --- a/bot/linkai/link_ai_bot.py +++ b/bot/linkai/link_ai_bot.py @@ -23,6 +23,7 @@ class LinkAIBot(Bot, OpenAIImage): def __init__(self): super().__init__() self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo") + self.args = {} def reply(self, query, context: Context = None) -> Reply: if context.type == ContextType.TEXT: @@ -72,7 +73,7 @@ class LinkAIBot(Bot, OpenAIImage): body = { "app_code": app_code, "messages": session.messages, - "model": model, # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin + "model": model, # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin, xunfei "temperature": conf().get("temperature"), "top_p": conf().get("top_p", 1), "frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容 @@ -114,3 +115,68 @@ class LinkAIBot(Bot, OpenAIImage): time.sleep(2) logger.warn(f"[LINKAI] do retry, times={retry_count}") return self._chat(query, context, retry_count + 1) + + def reply_text(self, session: ChatGPTSession, app_code="", retry_count=0) -> dict: + if retry_count >= 2: + # exit from retry 2 times + logger.warn("[LINKAI] failed after maximum number of retry times") + return { + "total_tokens": 0, + "completion_tokens": 0, + "content": "请再问我一次吧" + } + + try: + body = { + "app_code": app_code, + "messages": session.messages, + "model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin, xunfei + "temperature": conf().get("temperature"), + "top_p": conf().get("top_p", 1), + "frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容 + "presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容 + } + if self.args.get("max_tokens"): + body["max_tokens"] = self.args.get("max_tokens") + headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")} + + # do http request + base_url = conf().get("linkai_api_base", "https://api.link-ai.chat") + res = requests.post(url=base_url + "/v1/chat/completions", json=body, headers=headers, + timeout=conf().get("request_timeout", 180)) + if res.status_code == 200: + # execute success + response = res.json() + reply_content = response["choices"][0]["message"]["content"] + total_tokens = response["usage"]["total_tokens"] + logger.info(f"[LINKAI] reply={reply_content}, total_tokens={total_tokens}") + return { + "total_tokens": total_tokens, + "completion_tokens": response["usage"]["completion_tokens"], + "content": reply_content, + } + + else: + response = res.json() + error = response.get("error") + logger.error(f"[LINKAI] chat failed, status_code={res.status_code}, " + f"msg={error.get('message')}, type={error.get('type')}") + + if res.status_code >= 500: + # server error, need retry + time.sleep(2) + logger.warn(f"[LINKAI] do retry, times={retry_count}") + return self.reply_text(session, app_code, retry_count + 1) + + return { + "total_tokens": 0, + "completion_tokens": 0, + "content": "提问太快啦,请休息一下再问我吧" + } + + except Exception as e: + logger.exception(e) + # retry + time.sleep(2) + logger.warn(f"[LINKAI] do retry, times={retry_count}") + return self.reply_text(session, app_code, retry_count + 1) diff --git a/channel/wechatmp/wechatmp_channel.py b/channel/wechatmp/wechatmp_channel.py index 0c54a1d..6f21ce1 100644 --- a/channel/wechatmp/wechatmp_channel.py +++ b/channel/wechatmp/wechatmp_channel.py @@ -20,7 +20,7 @@ from common.log import logger from common.singleton import singleton from common.utils import split_string_by_utf8_length from config import conf -from voice.audio_convert import any_to_mp3 +from voice.audio_convert import any_to_mp3, split_audio # If using SSL, uncomment the following lines, and modify the certificate path. # from cheroot.server import HTTPServer @@ -162,13 +162,28 @@ class WechatMPChannel(ChatChannel): file_name = os.path.basename(file_path) file_type = "audio/mpeg" logger.info("[wechatmp] file_name: {}, file_type: {} ".format(file_name, file_type)) - # support: <2M, <60s, AMR\MP3 - response = self.client.media.upload("voice", (file_name, open(file_path, "rb"), file_type)) - logger.debug("[wechatmp] upload voice response: {}".format(response)) + media_ids = [] + duration, files = split_audio(file_path, 60 * 1000) + if len(files) > 1: + logger.info("[wechatmp] voice too long {}s > 60s , split into {} parts".format(duration / 1000.0, len(files))) + for path in files: + # support: <2M, <60s, AMR\MP3 + response = self.client.media.upload("voice", (os.path.basename(path), open(path, "rb"), file_type)) + logger.debug("[wechatcom] upload voice response: {}".format(response)) + media_ids.append(response["media_id"]) + os.remove(path) except WeChatClientException as e: logger.error("[wechatmp] upload voice failed: {}".format(e)) return - self.client.message.send_voice(receiver, response["media_id"]) + + try: + os.remove(file_path) + except Exception: + pass + + for media_id in media_ids: + self.client.message.send_voice(receiver, media_id) + time.sleep(1) logger.info("[wechatmp] Do send voice to {}".format(receiver)) elif reply.type == ReplyType.IMAGE_URL: # 从网络下载图片 img_url = reply.content diff --git a/channel/wework/wework_channel.py b/channel/wework/wework_channel.py index 722cc3e..fb77843 100644 --- a/channel/wework/wework_channel.py +++ b/channel/wework/wework_channel.py @@ -16,6 +16,7 @@ from channel.wework.wework_message import WeworkMessage from common.singleton import singleton from common.log import logger from common.time_check import time_checker +from common.utils import compress_imgfile, fsize from config import conf from channel.wework.run import wework from channel.wework import run @@ -38,12 +39,25 @@ def download_and_compress_image(url, filename, quality=30): os.makedirs(directory) # 下载图片 - response = requests.get(url) - image = Image.open(io.BytesIO(response.content)) - - # 压缩图片 - image_path = os.path.join(directory, f"{filename}.jpg") - image.save(image_path, "JPEG", quality=quality) + pic_res = requests.get(url, stream=True) + image_storage = io.BytesIO() + for block in pic_res.iter_content(1024): + image_storage.write(block) + + # 检查图片大小并可能进行压缩 + sz = fsize(image_storage) + if sz >= 10 * 1024 * 1024: # 如果图片大于 10 MB + logger.info("[wework] image too large, ready to compress, sz={}".format(sz)) + image_storage = compress_imgfile(image_storage, 10 * 1024 * 1024 - 1) + logger.info("[wework] image compressed, sz={}".format(fsize(image_storage))) + + # 将内存缓冲区的指针重置到起始位置 + image_storage.seek(0) + + # 读取并保存图片 + image = Image.open(image_storage) + image_path = os.path.join(directory, f"{filename}.png") + image.save(image_path, "png") return image_path @@ -213,6 +227,9 @@ class WeworkChannel(ChatChannel): @time_checker @_check def handle_single(self, cmsg: ChatMessage): + if cmsg.from_user_id == cmsg.to_user_id: + # ignore self reply + return if cmsg.ctype == ContextType.VOICE: if not conf().get("speech_recognition"): return diff --git a/plugins/linkai/README.md b/plugins/linkai/README.md index 5380c43..460224e 100644 --- a/plugins/linkai/README.md +++ b/plugins/linkai/README.md @@ -4,15 +4,15 @@ ## 插件配置 -将 `plugins/linkai` 目录下的 `config.json.template` 配置模板复制为最终生效的 `config.json`: +将 `plugins/linkai` 目录下的 `config.json.template` 配置模板复制为最终生效的 `config.json`。 (如果未配置则会默认使用`config.json.template`模板中配置,功能默认关闭,可通过指令进行开启)。 以下是配置项说明: ```bash { "group_app_map": { # 群聊 和 应用编码 的映射关系 - "测试群1": "default", # 表示在名称为 "测试群1" 的群聊中将使用app_code 为 default 的应用 - "测试群2": "Kv2fXJcH" + "测试群名称1": "default", # 表示在名称为 "测试群名称1" 的群聊中将使用app_code 为 default 的应用 + "测试群名称2": "Kv2fXJcH" }, "midjourney": { "enabled": true, # midjourney 绘画开关 @@ -51,6 +51,8 @@ ### 2.Midjourney绘画功能 +若未配置 `plugins/linkai/config.json`,默认会关闭画图功能,直接使用 `$mj open` 可基于默认配置直接使用mj画图。 + 指令格式: ``` @@ -69,7 +71,9 @@ "$mjr 11055927171882" ``` -注: -1. 开启 `use_image_create_prefix` 配置后可直接复用全局画图触发词,以"画"开头便可以生成图片。 -2. 提示词内容中包含敏感词或者参数格式错误可能导致绘画失败,生成失败不消耗积分 -3. 使用 `$mj open` 和 `$mj close` 指令可以快速打开和关闭绘图功能 +注意事项: +1. 使用 `$mj open` 和 `$mj close` 指令可以快速打开和关闭绘图功能 +2. 海外环境部署请将 `img_proxy` 设置为 `False` +3. 开启 `use_image_create_prefix` 配置后可直接复用全局画图触发词,以"画"开头便可以生成图片。 +4. 提示词内容中包含敏感词或者参数格式错误可能导致绘画失败,生成失败不消耗积分 +5. 若未收到图片可能有两种可能,一种是收到了图片但微信发送失败,可以在后台日志查看有没有获取到图片url,一般原因是受到了wx限制,可以稍后重试或更换账号尝试;另一种情况是图片提示词存在疑似违规,mj不会直接提示错误但会在画图后删掉原图导致程序无法获取,这种情况不消耗积分。 diff --git a/plugins/linkai/config.json.template b/plugins/linkai/config.json.template index 8e6b22c..3df5f3f 100644 --- a/plugins/linkai/config.json.template +++ b/plugins/linkai/config.json.template @@ -1,7 +1,7 @@ { "group_app_map": { - "测试群1": "default", - "测试群2": "Kv2fXJcH" + "测试群名1": "default", + "测试群名2": "Kv2fXJcH" }, "midjourney": { "enabled": true, diff --git a/plugins/linkai/linkai.py b/plugins/linkai/linkai.py index 727f02c..c50b814 100644 --- a/plugins/linkai/linkai.py +++ b/plugins/linkai/linkai.py @@ -18,6 +18,9 @@ class LinkAI(Plugin): super().__init__() self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context self.config = super().load_config() + if not self.config: + # 未加载到配置,使用模板中的配置 + self.config = self._load_config_template() if self.config: self.mj_bot = MJBot(self.config.get("midjourney")) logger.info("[LinkAI] inited") @@ -70,7 +73,7 @@ class LinkAI(Plugin): is_open = False conf()["use_linkai"] = is_open bridge.Bridge().reset_bot() - _set_reply_text(f"知识库功能已{tips_text}", e_context, level=ReplyType.INFO) + _set_reply_text(f"LinkAI对话功能{tips_text}", e_context, level=ReplyType.INFO) return if len(cmd) == 3 and cmd[1] == "app": @@ -139,6 +142,17 @@ class LinkAI(Plugin): help_text += f"\n\"{trigger_prefix}mjv 11055927171882 2\"\n\"{trigger_prefix}mjr 11055927171882\"" return help_text + def _load_config_template(self): + logger.debug("No LinkAI plugin config.json, use plugins/linkai/config.json.template") + try: + plugin_config_path = os.path.join(self.path, "config.json.template") + if os.path.exists(plugin_config_path): + with open(plugin_config_path, "r", encoding="utf-8") as f: + plugin_conf = json.load(f) + plugin_conf["midjourney"]["enabled"] = False + return plugin_conf + except Exception as e: + logger.exception(e) # 静态方法 def _is_admin(e_context: EventContext) -> bool: diff --git a/plugins/linkai/midjourney.py b/plugins/linkai/midjourney.py index d7d0634..3f5ca99 100644 --- a/plugins/linkai/midjourney.py +++ b/plugins/linkai/midjourney.py @@ -96,7 +96,7 @@ class MJBot: return TaskType.VARIATION elif cmd_list[0].lower() == f"{trigger_prefix}mjr": return TaskType.RESET - elif context.type == ContextType.IMAGE_CREATE and self.config.get("use_image_create_prefix"): + elif context.type == ContextType.IMAGE_CREATE and self.config.get("use_image_create_prefix") and self.config.get("enabled"): return TaskType.GENERATE def process_mj_task(self, mj_type: TaskType, e_context: EventContext): diff --git a/plugins/plugin.py b/plugins/plugin.py index 2e3e465..9e2a92e 100644 --- a/plugins/plugin.py +++ b/plugins/plugin.py @@ -15,8 +15,8 @@ class Plugin: """ # 优先获取 plugins/config.json 中的全局配置 plugin_conf = pconf(self.name) - if not plugin_conf or not conf().get("use_global_plugin_config"): - # 全局配置不存在 或者 未开启全局配置开关,则获取插件目录下的配置 + if not plugin_conf: + # 全局配置不存在,则获取插件目录下的配置 plugin_config_path = os.path.join(self.path, "config.json") if os.path.exists(plugin_config_path): with open(plugin_config_path, "r", encoding="utf-8") as f: diff --git a/requirements-optional.txt b/requirements-optional.txt index 17c4c1f..5633274 100644 --- a/requirements-optional.txt +++ b/requirements-optional.txt @@ -23,7 +23,6 @@ web.py wechatpy # chatgpt-tool-hub plugin ---extra-index-url https://pypi.python.org/simple chatgpt_tool_hub==0.4.6 # xunfei spark