diff --git a/bot/chatgpt/chat_gpt_bot.py b/bot/chatgpt/chat_gpt_bot.py index 22b13df..b704998 100644 --- a/bot/chatgpt/chat_gpt_bot.py +++ b/bot/chatgpt/chat_gpt_bot.py @@ -8,9 +8,9 @@ import openai import time if conf().get('expires_in_seconds'): - user_session = ExpiredDict(conf().get('expires_in_seconds')) + all_sessions = ExpiredDict(conf().get('expires_in_seconds')) else: - user_session = dict() + all_sessions = dict() # OpenAI对话模型API (可用) class ChatGPTBot(Bot): @@ -24,49 +24,49 @@ class ChatGPTBot(Bot): # acquire reply content if not context or not context.get('type') or context.get('type') == 'TEXT': logger.info("[OPEN_AI] query={}".format(query)) - from_user_id = context['from_user_id'] + session_id = context['session_id'] if query == '#清除记忆': - Session.clear_session(from_user_id) + Session.clear_session(session_id) return '记忆已清除' elif query == '#清除所有': Session.clear_all_session() return '所有人记忆已清除' - new_query = Session.build_session_query(query, from_user_id) - logger.debug("[OPEN_AI] session query={}".format(new_query)) + session = Session.build_session_query(query, session_id) + logger.debug("[OPEN_AI] session query={}".format(session)) # if context.get('stream'): # # reply in stream - # return self.reply_text_stream(query, new_query, from_user_id) + # return self.reply_text_stream(query, new_query, session_id) - reply_content = self.reply_text(new_query, from_user_id, 0) - logger.debug("[OPEN_AI] new_query={}, user={}, reply_cont={}".format(new_query, from_user_id, reply_content["content"])) + reply_content = self.reply_text(session, session_id, 0) + logger.debug("[OPEN_AI] new_query={}, session_id={}, reply_cont={}".format(session, session_id, reply_content["content"])) if reply_content["completion_tokens"] > 0: - Session.save_session(reply_content["content"], from_user_id, reply_content["total_tokens"]) + Session.save_session(reply_content["content"], session_id, reply_content["total_tokens"]) return reply_content["content"] elif context.get('type', None) == 'IMAGE_CREATE': return self.create_img(query, 0) - def reply_text(self, query, user_id, retry_count=0) ->dict: + def reply_text(self, session, session_id, retry_count=0) ->dict: ''' call openai's ChatCompletion to get the answer - :param query: query content - :param user_id: from user id + :param session: a conversation session + :param session_id: session id :param retry_count: retry count :return: {} ''' try: response = openai.ChatCompletion.create( model="gpt-3.5-turbo", # 对话模型的名称 - messages=query, + messages=session, temperature=0.9, # 值在[0,1]之间,越大表示回复越具有不确定性 #max_tokens=4096, # 回复最大的字符数 top_p=1, frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容 presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容 ) - logger.info("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"])) + # logger.info("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"])) return {"total_tokens": response["usage"]["total_tokens"], "completion_tokens": response["usage"]["completion_tokens"], "content": response.choices[0]['message']['content']} @@ -76,7 +76,7 @@ class ChatGPTBot(Bot): if retry_count < 1: time.sleep(5) logger.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1)) - return self.reply_text(query, user_id, retry_count+1) + return self.reply_text(session, session_id, retry_count+1) else: return {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"} except openai.error.APIConnectionError as e: @@ -91,7 +91,7 @@ class ChatGPTBot(Bot): except Exception as e: # unknown exception logger.exception(e) - Session.clear_session(user_id) + Session.clear_session(session_id) return {"completion_tokens": 0, "content": "请再问我一次吧"} def create_img(self, query, retry_count=0): @@ -119,7 +119,7 @@ class ChatGPTBot(Bot): class Session(object): @staticmethod - def build_session_query(query, user_id): + def build_session_query(query, session_id): ''' build query with conversation history e.g. [ @@ -129,28 +129,28 @@ class Session(object): {"role": "user", "content": "Where was it played?"} ] :param query: query content - :param user_id: from user id + :param session_id: session id :return: query content with conversaction ''' - session = user_session.get(user_id, []) + session = all_sessions.get(session_id, []) if len(session) == 0: system_prompt = conf().get("character_desc", "") system_item = {'role': 'system', 'content': system_prompt} session.append(system_item) - user_session[user_id] = session + all_sessions[session_id] = session user_item = {'role': 'user', 'content': query} session.append(user_item) return session @staticmethod - def save_session(answer, user_id, total_tokens): + def save_session(answer, session_id, total_tokens): max_tokens = conf().get("conversation_max_tokens") if not max_tokens: # default 3000 max_tokens = 1000 max_tokens=int(max_tokens) - session = user_session.get(user_id) + session = all_sessions.get(session_id) if session: # append conversation gpt_item = {'role': 'assistant', 'content': answer} @@ -174,9 +174,9 @@ class Session(object): dec_tokens = dec_tokens - max_tokens @staticmethod - def clear_session(user_id): - user_session[user_id] = [] + def clear_session(session_id): + all_sessions[session_id] = [] @staticmethod def clear_all_session(): - user_session.clear() + all_sessions.clear() diff --git a/channel/wechat/wechat_channel.py b/channel/wechat/wechat_channel.py index 66778f4..c7cbe82 100644 --- a/channel/wechat/wechat_channel.py +++ b/channel/wechat/wechat_channel.py @@ -113,7 +113,7 @@ class WechatChannel(Channel): if not query: return context = dict() - context['from_user_id'] = reply_user_id + context['session_id'] = reply_user_id reply_text = super().build_reply_content(query, context) if reply_text: self.send(conf().get("single_chat_reply_prefix") + reply_text, reply_user_id) @@ -147,7 +147,10 @@ class WechatChannel(Channel): if not query: return context = dict() - context['from_user_id'] = msg['ActualUserName'] + if conf().get("group_chat_in_one_session", False): + context['session_id'] = msg['User']['UserName'] + else: + context['session_id'] = msg['ActualUserName'] reply_text = super().build_reply_content(query, context) if reply_text: reply_text = '@' + msg['ActualNickName'] + ' ' + reply_text.strip() diff --git a/channel/wechat/wechaty_channel.py b/channel/wechat/wechaty_channel.py index 8f27f6d..240d2a7 100644 --- a/channel/wechat/wechaty_channel.py +++ b/channel/wechat/wechaty_channel.py @@ -128,7 +128,7 @@ class WechatyChannel(Channel): if not query: return context = dict() - context['from_user_id'] = reply_user_id + context['session_id'] = reply_user_id reply_text = super().build_reply_content(query, context) if reply_text: await self.send(conf().get("single_chat_reply_prefix") + reply_text, reply_user_id) @@ -163,7 +163,10 @@ class WechatyChannel(Channel): if not query: return context = dict() - context['from_user_id'] = str(group_id) + '-' + str(group_user_id) + if conf().get("group_chat_in_one_session", False): + context['session_id'] = str(group_id) + else: + context['session_id'] = str(group_id) + '-' + str(group_user_id) reply_text = super().build_reply_content(query, context) if reply_text: reply_text = '@' + group_user_name + ' ' + reply_text.strip() diff --git a/config-template.json b/config-template.json index fd6d46a..1dd5539 100644 --- a/config-template.json +++ b/config-template.json @@ -4,6 +4,7 @@ "single_chat_prefix": ["bot", "@bot"], "single_chat_reply_prefix": "[bot] ", "group_chat_prefix": ["@bot"], + "group_chat_in_one_session": "True", "group_name_white_list": ["ChatGPT测试群", "ChatGPT测试群2"], "image_create_prefix": ["画", "看", "找"], "conversation_max_tokens": 1000,