@@ -81,7 +81,9 @@ pip3 install --upgrade openai | |||||
**(3) 拓展依赖 (可选):** | **(3) 拓展依赖 (可选):** | ||||
语音识别及语音回复相关依赖:[#415](https://github.com/zhayujie/chatgpt-on-wechat/issues/415)。 | 语音识别及语音回复相关依赖:[#415](https://github.com/zhayujie/chatgpt-on-wechat/issues/415)。 | ||||
让会话token数量的计算更加精准: | 让会话token数量的计算更加精准: | ||||
```bash | ```bash | ||||
pip3 install --upgrade tiktoken | pip3 install --upgrade tiktoken | ||||
``` | ``` | ||||
@@ -1,6 +1,9 @@ | |||||
# encoding:utf-8 | # encoding:utf-8 | ||||
from bot.bot import Bot | from bot.bot import Bot | ||||
from bot.chatgpt.chat_gpt_session import ChatGPTSession | |||||
from bot.openai.open_ai_image import OpenAIImage | |||||
from bot.session_manager import Session, SessionManager | |||||
from bridge.context import ContextType | from bridge.context import ContextType | ||||
from bridge.reply import Reply, ReplyType | from bridge.reply import Reply, ReplyType | ||||
from config import conf, load_config | from config import conf, load_config | ||||
@@ -10,21 +13,20 @@ from common.expired_dict import ExpiredDict | |||||
import openai | import openai | ||||
import time | import time | ||||
# OpenAI对话模型API (可用) | # OpenAI对话模型API (可用) | ||||
class ChatGPTBot(Bot): | |||||
class ChatGPTBot(Bot,OpenAIImage): | |||||
def __init__(self): | def __init__(self): | ||||
super().__init__() | |||||
openai.api_key = conf().get('open_ai_api_key') | openai.api_key = conf().get('open_ai_api_key') | ||||
if conf().get('open_ai_api_base'): | if conf().get('open_ai_api_base'): | ||||
openai.api_base = conf().get('open_ai_api_base') | openai.api_base = conf().get('open_ai_api_base') | ||||
proxy = conf().get('proxy') | proxy = conf().get('proxy') | ||||
self.sessions = SessionManager(model= conf().get("model") or "gpt-3.5-turbo") | |||||
if proxy: | if proxy: | ||||
openai.proxy = proxy | openai.proxy = proxy | ||||
if conf().get('rate_limit_chatgpt'): | if conf().get('rate_limit_chatgpt'): | ||||
self.tb4chatgpt = TokenBucket(conf().get('rate_limit_chatgpt', 20)) | self.tb4chatgpt = TokenBucket(conf().get('rate_limit_chatgpt', 20)) | ||||
if conf().get('rate_limit_dalle'): | |||||
self.tb4dalle = TokenBucket(conf().get('rate_limit_dalle', 50)) | |||||
self.sessions = SessionManager(ChatGPTSession, model= conf().get("model") or "gpt-3.5-turbo") | |||||
def reply(self, query, context=None): | def reply(self, query, context=None): | ||||
# acquire reply content | # acquire reply content | ||||
@@ -45,19 +47,19 @@ class ChatGPTBot(Bot): | |||||
reply = Reply(ReplyType.INFO, '配置已更新') | reply = Reply(ReplyType.INFO, '配置已更新') | ||||
if reply: | if reply: | ||||
return reply | return reply | ||||
session = self.sessions.build_session_query(query, session_id) | |||||
logger.debug("[OPEN_AI] session query={}".format(session)) | |||||
session = self.sessions.session_query(query, session_id) | |||||
logger.debug("[OPEN_AI] session query={}".format(session.messages)) | |||||
# if context.get('stream'): | # if context.get('stream'): | ||||
# # reply in stream | # # reply in stream | ||||
# return self.reply_text_stream(query, new_query, session_id) | # return self.reply_text_stream(query, new_query, session_id) | ||||
reply_content = self.reply_text(session, session_id, 0) | reply_content = self.reply_text(session, session_id, 0) | ||||
logger.debug("[OPEN_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(session, session_id, reply_content["content"], reply_content["completion_tokens"])) | |||||
logger.debug("[OPEN_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(session.messages, session_id, reply_content["content"], reply_content["completion_tokens"])) | |||||
if reply_content['completion_tokens'] == 0 and len(reply_content['content']) > 0: | if reply_content['completion_tokens'] == 0 and len(reply_content['content']) > 0: | ||||
reply = Reply(ReplyType.ERROR, reply_content['content']) | reply = Reply(ReplyType.ERROR, reply_content['content']) | ||||
elif reply_content["completion_tokens"] > 0: | elif reply_content["completion_tokens"] > 0: | ||||
self.sessions.save_session(reply_content["content"], session_id, reply_content["total_tokens"]) | |||||
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"]) | |||||
reply = Reply(ReplyType.TEXT, reply_content["content"]) | reply = Reply(ReplyType.TEXT, reply_content["content"]) | ||||
else: | else: | ||||
reply = Reply(ReplyType.ERROR, reply_content['content']) | reply = Reply(ReplyType.ERROR, reply_content['content']) | ||||
@@ -86,7 +88,7 @@ class ChatGPTBot(Bot): | |||||
"presence_penalty":conf().get('presence_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容 | "presence_penalty":conf().get('presence_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容 | ||||
} | } | ||||
def reply_text(self, session, session_id, retry_count=0) -> dict: | |||||
def reply_text(self, session:ChatGPTSession, session_id, retry_count=0) -> dict: | |||||
''' | ''' | ||||
call openai's ChatCompletion to get the answer | call openai's ChatCompletion to get the answer | ||||
:param session: a conversation session | :param session: a conversation session | ||||
@@ -98,7 +100,7 @@ class ChatGPTBot(Bot): | |||||
if conf().get('rate_limit_chatgpt') and not self.tb4chatgpt.get_token(): | if conf().get('rate_limit_chatgpt') and not self.tb4chatgpt.get_token(): | ||||
return {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"} | return {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"} | ||||
response = openai.ChatCompletion.create( | response = openai.ChatCompletion.create( | ||||
messages=session, **self.compose_args() | |||||
messages=session.messages, **self.compose_args() | |||||
) | ) | ||||
# logger.info("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"])) | # logger.info("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"])) | ||||
return {"total_tokens": response["usage"]["total_tokens"], | return {"total_tokens": response["usage"]["total_tokens"], | ||||
@@ -128,31 +130,6 @@ class ChatGPTBot(Bot): | |||||
self.sessions.clear_session(session_id) | self.sessions.clear_session(session_id) | ||||
return {"completion_tokens": 0, "content": "请再问我一次吧"} | return {"completion_tokens": 0, "content": "请再问我一次吧"} | ||||
def create_img(self, query, retry_count=0): | |||||
try: | |||||
if conf().get('rate_limit_dalle') and not self.tb4dalle.get_token(): | |||||
return False, "请求太快了,请休息一下再问我吧" | |||||
logger.info("[OPEN_AI] image_query={}".format(query)) | |||||
response = openai.Image.create( | |||||
prompt=query, #图片描述 | |||||
n=1, #每次生成图片的数量 | |||||
size="256x256" #图片大小,可选有 256x256, 512x512, 1024x1024 | |||||
) | |||||
image_url = response['data'][0]['url'] | |||||
logger.info("[OPEN_AI] image_url={}".format(image_url)) | |||||
return True, image_url | |||||
except openai.error.RateLimitError as e: | |||||
logger.warn(e) | |||||
if retry_count < 1: | |||||
time.sleep(5) | |||||
logger.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count+1)) | |||||
return self.create_img(query, retry_count+1) | |||||
else: | |||||
return False, "提问太快啦,请休息一下再问我吧" | |||||
except Exception as e: | |||||
logger.exception(e) | |||||
return False, str(e) | |||||
class AzureChatGPTBot(ChatGPTBot): | class AzureChatGPTBot(ChatGPTBot): | ||||
def __init__(self): | def __init__(self): | ||||
@@ -164,123 +141,4 @@ class AzureChatGPTBot(ChatGPTBot): | |||||
args = super().compose_args() | args = super().compose_args() | ||||
args["engine"] = args["model"] | args["engine"] = args["model"] | ||||
del(args["model"]) | del(args["model"]) | ||||
return args | |||||
class SessionManager(object): | |||||
def __init__(self, model = "gpt-3.5-turbo-0301"): | |||||
if conf().get('expires_in_seconds'): | |||||
sessions = ExpiredDict(conf().get('expires_in_seconds')) | |||||
else: | |||||
sessions = dict() | |||||
self.sessions = sessions | |||||
self.model = model | |||||
def build_session(self, session_id, system_prompt=None): | |||||
session = self.sessions.get(session_id, []) | |||||
if len(session) == 0: | |||||
if system_prompt is None: | |||||
system_prompt = conf().get("character_desc", "") | |||||
system_item = {'role': 'system', 'content': system_prompt} | |||||
session.append(system_item) | |||||
self.sessions[session_id] = session | |||||
return session | |||||
def build_session_query(self, query, session_id): | |||||
''' | |||||
build query with conversation history | |||||
e.g. [ | |||||
{"role": "system", "content": "You are a helpful assistant."}, | |||||
{"role": "user", "content": "Who won the world series in 2020?"}, | |||||
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."}, | |||||
{"role": "user", "content": "Where was it played?"} | |||||
] | |||||
:param query: query content | |||||
:param session_id: session id | |||||
:return: query content with conversaction | |||||
''' | |||||
session = self.build_session(session_id) | |||||
user_item = {'role': 'user', 'content': query} | |||||
session.append(user_item) | |||||
try: | |||||
total_tokens = num_tokens_from_messages(session, self.model) | |||||
max_tokens = conf().get("conversation_max_tokens", 1000) | |||||
total_tokens = self.discard_exceed_conversation(session, max_tokens, total_tokens) | |||||
logger.debug("prompt tokens used={}".format(total_tokens)) | |||||
except Exception as e: | |||||
logger.debug("Exception when counting tokens precisely for prompt: {}".format(str(e))) | |||||
return session | |||||
def save_session(self, answer, session_id, total_tokens): | |||||
max_tokens = conf().get("conversation_max_tokens", 1000) | |||||
session = self.sessions.get(session_id) | |||||
if session: | |||||
# append conversation | |||||
gpt_item = {'role': 'assistant', 'content': answer} | |||||
session.append(gpt_item) | |||||
# discard exceed limit conversation | |||||
tokens_cnt = self.discard_exceed_conversation(session, max_tokens, total_tokens) | |||||
logger.debug("raw total_tokens={}, savesession tokens={}".format(total_tokens, tokens_cnt)) | |||||
def discard_exceed_conversation(self, session, max_tokens, total_tokens): | |||||
dec_tokens = int(total_tokens) | |||||
# logger.info("prompt tokens used={},max_tokens={}".format(used_tokens,max_tokens)) | |||||
while dec_tokens > max_tokens: | |||||
# pop first conversation | |||||
if len(session) > 2: | |||||
session.pop(1) | |||||
elif len(session) == 2 and session[1]["role"] == "assistant": | |||||
session.pop(1) | |||||
break | |||||
elif len(session) == 2 and session[1]["role"] == "user": | |||||
logger.warn("user message exceed max_tokens. total_tokens={}".format(dec_tokens)) | |||||
break | |||||
else: | |||||
logger.debug("max_tokens={}, total_tokens={}, len(sessions)={}".format(max_tokens, dec_tokens, len(session))) | |||||
break | |||||
try: | |||||
cur_tokens = num_tokens_from_messages(session, self.model) | |||||
dec_tokens = cur_tokens | |||||
except Exception as e: | |||||
logger.debug("Exception when counting tokens precisely for query: {}".format(e)) | |||||
dec_tokens = dec_tokens - max_tokens | |||||
return dec_tokens | |||||
def clear_session(self, session_id): | |||||
self.sessions[session_id] = [] | |||||
def clear_all_session(self): | |||||
self.sessions.clear() | |||||
# refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb | |||||
def num_tokens_from_messages(messages, model): | |||||
"""Returns the number of tokens used by a list of messages.""" | |||||
import tiktoken | |||||
try: | |||||
encoding = tiktoken.encoding_for_model(model) | |||||
except KeyError: | |||||
logger.debug("Warning: model not found. Using cl100k_base encoding.") | |||||
encoding = tiktoken.get_encoding("cl100k_base") | |||||
if model == "gpt-3.5-turbo": | |||||
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301") | |||||
elif model == "gpt-4": | |||||
return num_tokens_from_messages(messages, model="gpt-4-0314") | |||||
elif model == "gpt-3.5-turbo-0301": | |||||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n | |||||
tokens_per_name = -1 # if there's a name, the role is omitted | |||||
elif model == "gpt-4-0314": | |||||
tokens_per_message = 3 | |||||
tokens_per_name = 1 | |||||
else: | |||||
logger.warn(f"num_tokens_from_messages() is not implemented for model {model}. Returning num tokens assuming gpt-3.5-turbo-0301.") | |||||
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301") | |||||
num_tokens = 0 | |||||
for message in messages: | |||||
num_tokens += tokens_per_message | |||||
for key, value in message.items(): | |||||
num_tokens += len(encoding.encode(value)) | |||||
if key == "name": | |||||
num_tokens += tokens_per_name | |||||
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> | |||||
return num_tokens | |||||
return args |
@@ -0,0 +1,92 @@ | |||||
from bot.session_manager import Session | |||||
from common.log import logger | |||||
''' | |||||
e.g. [ | |||||
{"role": "system", "content": "You are a helpful assistant."}, | |||||
{"role": "user", "content": "Who won the world series in 2020?"}, | |||||
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."}, | |||||
{"role": "user", "content": "Where was it played?"} | |||||
] | |||||
''' | |||||
class ChatGPTSession(Session): | |||||
def __init__(self, session_id, system_prompt=None, model= "gpt-3.5-turbo"): | |||||
super().__init__(session_id, system_prompt) | |||||
self.messages = [] | |||||
self.model = model | |||||
self.reset() | |||||
def reset(self): | |||||
system_item = {'role': 'system', 'content': self.system_prompt} | |||||
self.messages = [system_item] | |||||
def add_query(self, query): | |||||
user_item = {'role': 'user', 'content': query} | |||||
self.messages.append(user_item) | |||||
def add_reply(self, reply): | |||||
assistant_item = {'role': 'assistant', 'content': reply} | |||||
self.messages.append(assistant_item) | |||||
def discard_exceeding(self, max_tokens, cur_tokens= None): | |||||
precise = True | |||||
try: | |||||
cur_tokens = num_tokens_from_messages(self.messages, self.model) | |||||
except Exception as e: | |||||
precise = False | |||||
if cur_tokens is None: | |||||
raise e | |||||
logger.debug("Exception when counting tokens precisely for query: {}".format(e)) | |||||
while cur_tokens > max_tokens: | |||||
if len(self.messages) > 2: | |||||
self.messages.pop(1) | |||||
elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant": | |||||
self.messages.pop(1) | |||||
if precise: | |||||
cur_tokens = num_tokens_from_messages(self.messages, self.model) | |||||
else: | |||||
cur_tokens = cur_tokens - max_tokens | |||||
break | |||||
elif len(self.messages) == 2 and self.messages[1]["role"] == "user": | |||||
logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens)) | |||||
break | |||||
else: | |||||
logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages))) | |||||
break | |||||
if precise: | |||||
cur_tokens = num_tokens_from_messages(self.messages, self.model) | |||||
else: | |||||
cur_tokens = cur_tokens - max_tokens | |||||
return cur_tokens | |||||
# refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb | |||||
def num_tokens_from_messages(messages, model): | |||||
"""Returns the number of tokens used by a list of messages.""" | |||||
import tiktoken | |||||
try: | |||||
encoding = tiktoken.encoding_for_model(model) | |||||
except KeyError: | |||||
logger.debug("Warning: model not found. Using cl100k_base encoding.") | |||||
encoding = tiktoken.get_encoding("cl100k_base") | |||||
if model == "gpt-3.5-turbo": | |||||
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301") | |||||
elif model == "gpt-4": | |||||
return num_tokens_from_messages(messages, model="gpt-4-0314") | |||||
elif model == "gpt-3.5-turbo-0301": | |||||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n | |||||
tokens_per_name = -1 # if there's a name, the role is omitted | |||||
elif model == "gpt-4-0314": | |||||
tokens_per_message = 3 | |||||
tokens_per_name = 1 | |||||
else: | |||||
logger.warn(f"num_tokens_from_messages() is not implemented for model {model}. Returning num tokens assuming gpt-3.5-turbo-0301.") | |||||
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301") | |||||
num_tokens = 0 | |||||
for message in messages: | |||||
num_tokens += tokens_per_message | |||||
for key, value in message.items(): | |||||
num_tokens += len(encoding.encode(value)) | |||||
if key == "name": | |||||
num_tokens += tokens_per_name | |||||
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> | |||||
return num_tokens |
@@ -1,6 +1,9 @@ | |||||
# encoding:utf-8 | # encoding:utf-8 | ||||
from bot.bot import Bot | from bot.bot import Bot | ||||
from bot.openai.open_ai_image import OpenAIImage | |||||
from bot.openai.open_ai_session import OpenAISession | |||||
from bot.session_manager import SessionManager | |||||
from bridge.context import ContextType | from bridge.context import ContextType | ||||
from bridge.reply import Reply, ReplyType | from bridge.reply import Reply, ReplyType | ||||
from config import conf | from config import conf | ||||
@@ -11,8 +14,9 @@ import time | |||||
user_session = dict() | user_session = dict() | ||||
# OpenAI对话模型API (可用) | # OpenAI对话模型API (可用) | ||||
class OpenAIBot(Bot): | |||||
class OpenAIBot(Bot, OpenAIImage): | |||||
def __init__(self): | def __init__(self): | ||||
super().__init__() | |||||
openai.api_key = conf().get('open_ai_api_key') | openai.api_key = conf().get('open_ai_api_key') | ||||
if conf().get('open_ai_api_base'): | if conf().get('open_ai_api_base'): | ||||
openai.api_base = conf().get('open_ai_api_base') | openai.api_base = conf().get('open_ai_api_base') | ||||
@@ -20,32 +24,43 @@ class OpenAIBot(Bot): | |||||
if proxy: | if proxy: | ||||
openai.proxy = proxy | openai.proxy = proxy | ||||
self.sessions = SessionManager(OpenAISession, model= conf().get("model") or "text-davinci-003") | |||||
def reply(self, query, context=None): | def reply(self, query, context=None): | ||||
# acquire reply content | # acquire reply content | ||||
if context and context.type: | if context and context.type: | ||||
if context.type == ContextType.TEXT: | if context.type == ContextType.TEXT: | ||||
logger.info("[OPEN_AI] query={}".format(query)) | logger.info("[OPEN_AI] query={}".format(query)) | ||||
from_user_id = context['session_id'] | |||||
session_id = context['session_id'] | |||||
reply = None | reply = None | ||||
if query == '#清除记忆': | if query == '#清除记忆': | ||||
Session.clear_session(from_user_id) | |||||
self.sessions.clear_session(session_id) | |||||
reply = Reply(ReplyType.INFO, '记忆已清除') | reply = Reply(ReplyType.INFO, '记忆已清除') | ||||
elif query == '#清除所有': | elif query == '#清除所有': | ||||
Session.clear_all_session() | |||||
self.sessions.clear_all_session() | |||||
reply = Reply(ReplyType.INFO, '所有人记忆已清除') | reply = Reply(ReplyType.INFO, '所有人记忆已清除') | ||||
else: | else: | ||||
new_query = Session.build_session_query(query, from_user_id) | |||||
session = self.sessions.session_query(query, session_id) | |||||
new_query = str(session) | |||||
logger.debug("[OPEN_AI] session query={}".format(new_query)) | logger.debug("[OPEN_AI] session query={}".format(new_query)) | ||||
reply_content = self.reply_text(new_query, from_user_id, 0) | |||||
logger.debug("[OPEN_AI] new_query={}, user={}, reply_cont={}".format(new_query, from_user_id, reply_content)) | |||||
if reply_content and query: | |||||
Session.save_session(query, reply_content, from_user_id) | |||||
reply = Reply(ReplyType.TEXT, reply_content) | |||||
total_tokens, completion_tokens, reply_content = self.reply_text(new_query, session_id, 0) | |||||
logger.debug("[OPEN_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(new_query, session_id, reply_content, completion_tokens)) | |||||
if total_tokens == 0 : | |||||
reply = Reply(ReplyType.ERROR, reply_content) | |||||
else: | |||||
self.sessions.session_reply(reply_content, session_id, total_tokens) | |||||
reply = Reply(ReplyType.TEXT, reply_content) | |||||
return reply | return reply | ||||
elif context.type == ContextType.IMAGE_CREATE: | elif context.type == ContextType.IMAGE_CREATE: | ||||
return self.create_img(query, 0) | |||||
ok, retstring = self.create_img(query, 0) | |||||
reply = None | |||||
if ok: | |||||
reply = Reply(ReplyType.IMAGE_URL, retstring) | |||||
else: | |||||
reply = Reply(ReplyType.ERROR, retstring) | |||||
return reply | |||||
def reply_text(self, query, user_id, retry_count=0): | def reply_text(self, query, user_id, retry_count=0): | ||||
try: | try: | ||||
@@ -60,8 +75,10 @@ class OpenAIBot(Bot): | |||||
stop=["\n\n\n"] | stop=["\n\n\n"] | ||||
) | ) | ||||
res_content = response.choices[0]['text'].strip().replace('<|endoftext|>', '') | res_content = response.choices[0]['text'].strip().replace('<|endoftext|>', '') | ||||
total_tokens = response["usage"]["total_tokens"] | |||||
completion_tokens = response["usage"]["completion_tokens"] | |||||
logger.info("[OPEN_AI] reply={}".format(res_content)) | logger.info("[OPEN_AI] reply={}".format(res_content)) | ||||
return res_content | |||||
return total_tokens, completion_tokens, res_content | |||||
except openai.error.RateLimitError as e: | except openai.error.RateLimitError as e: | ||||
# rate limit exception | # rate limit exception | ||||
logger.warn(e) | logger.warn(e) | ||||
@@ -70,106 +87,9 @@ class OpenAIBot(Bot): | |||||
logger.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1)) | logger.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1)) | ||||
return self.reply_text(query, user_id, retry_count+1) | return self.reply_text(query, user_id, retry_count+1) | ||||
else: | else: | ||||
return "提问太快啦,请休息一下再问我吧" | |||||
return 0,0, "提问太快啦,请休息一下再问我吧" | |||||
except Exception as e: | except Exception as e: | ||||
# unknown exception | # unknown exception | ||||
logger.exception(e) | logger.exception(e) | ||||
Session.clear_session(user_id) | |||||
return "请再问我一次吧" | |||||
def create_img(self, query, retry_count=0): | |||||
try: | |||||
logger.info("[OPEN_AI] image_query={}".format(query)) | |||||
response = openai.Image.create( | |||||
prompt=query, #图片描述 | |||||
n=1, #每次生成图片的数量 | |||||
size="256x256" #图片大小,可选有 256x256, 512x512, 1024x1024 | |||||
) | |||||
image_url = response['data'][0]['url'] | |||||
logger.info("[OPEN_AI] image_url={}".format(image_url)) | |||||
return image_url | |||||
except openai.error.RateLimitError as e: | |||||
logger.warn(e) | |||||
if retry_count < 1: | |||||
time.sleep(5) | |||||
logger.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count+1)) | |||||
return self.reply_text(query, retry_count+1) | |||||
else: | |||||
return "提问太快啦,请休息一下再问我吧" | |||||
except Exception as e: | |||||
logger.exception(e) | |||||
return None | |||||
class Session(object): | |||||
@staticmethod | |||||
def build_session_query(query, user_id): | |||||
''' | |||||
build query with conversation history | |||||
e.g. Q: xxx | |||||
A: xxx | |||||
Q: xxx | |||||
:param query: query content | |||||
:param user_id: from user id | |||||
:return: query content with conversaction | |||||
''' | |||||
prompt = conf().get("character_desc", "") | |||||
if prompt: | |||||
prompt += "<|endoftext|>\n\n\n" | |||||
session = user_session.get(user_id, None) | |||||
if session: | |||||
for conversation in session: | |||||
prompt += "Q: " + conversation["question"] + "\n\n\nA: " + conversation["answer"] + "<|endoftext|>\n" | |||||
prompt += "Q: " + query + "\nA: " | |||||
return prompt | |||||
else: | |||||
return prompt + "Q: " + query + "\nA: " | |||||
@staticmethod | |||||
def save_session(query, answer, user_id): | |||||
max_tokens = conf().get("conversation_max_tokens") | |||||
if not max_tokens: | |||||
# default 3000 | |||||
max_tokens = 1000 | |||||
conversation = dict() | |||||
conversation["question"] = query | |||||
conversation["answer"] = answer | |||||
session = user_session.get(user_id) | |||||
logger.debug(conversation) | |||||
logger.debug(session) | |||||
if session: | |||||
# append conversation | |||||
session.append(conversation) | |||||
else: | |||||
# create session | |||||
queue = list() | |||||
queue.append(conversation) | |||||
user_session[user_id] = queue | |||||
# discard exceed limit conversation | |||||
Session.discard_exceed_conversation(user_session[user_id], max_tokens) | |||||
@staticmethod | |||||
def discard_exceed_conversation(session, max_tokens): | |||||
count = 0 | |||||
count_list = list() | |||||
for i in range(len(session)-1, -1, -1): | |||||
# count tokens of conversation list | |||||
history_conv = session[i] | |||||
count += len(history_conv["question"]) + len(history_conv["answer"]) | |||||
count_list.append(count) | |||||
for c in count_list: | |||||
if c > max_tokens: | |||||
# pop first conversation | |||||
session.pop(0) | |||||
@staticmethod | |||||
def clear_session(user_id): | |||||
user_session[user_id] = [] | |||||
@staticmethod | |||||
def clear_all_session(): | |||||
user_session.clear() | |||||
self.sessions.clear_session(user_id) | |||||
return 0,0, "请再问我一次吧" |
@@ -0,0 +1,37 @@ | |||||
import time | |||||
import openai | |||||
from common.token_bucket import TokenBucket | |||||
from common.log import logger | |||||
from config import conf | |||||
# OPENAI提供的画图接口 | |||||
class OpenAIImage(object): | |||||
def __init__(self): | |||||
openai.api_key = conf().get('open_ai_api_key') | |||||
if conf().get('rate_limit_dalle'): | |||||
self.tb4dalle = TokenBucket(conf().get('rate_limit_dalle', 50)) | |||||
def create_img(self, query, retry_count=0): | |||||
try: | |||||
if conf().get('rate_limit_dalle') and not self.tb4dalle.get_token(): | |||||
return False, "请求太快了,请休息一下再问我吧" | |||||
logger.info("[OPEN_AI] image_query={}".format(query)) | |||||
response = openai.Image.create( | |||||
prompt=query, #图片描述 | |||||
n=1, #每次生成图片的数量 | |||||
size="256x256" #图片大小,可选有 256x256, 512x512, 1024x1024 | |||||
) | |||||
image_url = response['data'][0]['url'] | |||||
logger.info("[OPEN_AI] image_url={}".format(image_url)) | |||||
return True, image_url | |||||
except openai.error.RateLimitError as e: | |||||
logger.warn(e) | |||||
if retry_count < 1: | |||||
time.sleep(5) | |||||
logger.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count+1)) | |||||
return self.create_img(query, retry_count+1) | |||||
else: | |||||
return False, "提问太快啦,请休息一下再问我吧" | |||||
except Exception as e: | |||||
logger.exception(e) | |||||
return False, str(e) |
@@ -0,0 +1,77 @@ | |||||
from bot.session_manager import Session | |||||
from common.log import logger | |||||
class OpenAISession(Session): | |||||
def __init__(self, session_id, system_prompt=None, model= "text-davinci-003"): | |||||
super().__init__(session_id, system_prompt) | |||||
self.conversation = [] | |||||
self.model = model | |||||
self.reset() | |||||
def reset(self): | |||||
pass | |||||
def add_query(self, query): | |||||
question = {'type': 'question', 'content': query} | |||||
self.conversation.append(question) | |||||
def add_reply(self, reply): | |||||
answer = {'type': 'answer', 'content': reply} | |||||
self.conversation.append(answer) | |||||
def __str__(self): | |||||
''' | |||||
e.g. Q: xxx | |||||
A: xxx | |||||
Q: xxx | |||||
''' | |||||
prompt = self.system_prompt | |||||
if prompt: | |||||
prompt += "<|endoftext|>\n\n\n" | |||||
for item in self.conversation: | |||||
if item['type'] == 'question': | |||||
prompt += "Q: " + item['content'] + "\n" | |||||
elif item['type'] == 'answer': | |||||
prompt += "\n\nA: " + item['content'] + "<|endoftext|>\n" | |||||
if len(self.conversation) > 0 and self.conversation[-1]['type'] == 'question': | |||||
prompt += "A: " | |||||
return prompt | |||||
def discard_exceeding(self, max_tokens, cur_tokens= None): | |||||
precise = True | |||||
try: | |||||
cur_tokens = num_tokens_from_string(str(self), self.model) | |||||
except Exception as e: | |||||
precise = False | |||||
if cur_tokens is None: | |||||
raise e | |||||
logger.debug("Exception when counting tokens precisely for query: {}".format(e)) | |||||
while cur_tokens > max_tokens: | |||||
if len(self.conversation) > 1: | |||||
self.conversation.pop(0) | |||||
elif len(self.conversation) == 1 and self.conversation[0]["type"] == "answer": | |||||
self.conversation.pop(0) | |||||
if precise: | |||||
cur_tokens = num_tokens_from_string(str(self), self.model) | |||||
else: | |||||
cur_tokens = len(str(self)) | |||||
break | |||||
elif len(self.conversation) == 1 and self.conversation[0]["type"] == "question": | |||||
logger.warn("user question exceed max_tokens. total_tokens={}".format(cur_tokens)) | |||||
break | |||||
else: | |||||
logger.debug("max_tokens={}, total_tokens={}, len(conversation)={}".format(max_tokens, cur_tokens, len(self.conversation))) | |||||
break | |||||
if precise: | |||||
cur_tokens = num_tokens_from_string(str(self), self.model) | |||||
else: | |||||
cur_tokens = len(str(self)) | |||||
return cur_tokens | |||||
# refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb | |||||
def num_tokens_from_string(string: str, model: str) -> int: | |||||
"""Returns the number of tokens in a text string.""" | |||||
import tiktoken | |||||
encoding = tiktoken.encoding_for_model(model) | |||||
num_tokens = len(encoding.encode(string,disallowed_special=())) | |||||
return num_tokens |
@@ -0,0 +1,81 @@ | |||||
from common.expired_dict import ExpiredDict | |||||
from common.log import logger | |||||
from config import conf | |||||
class Session(object): | |||||
def __init__(self, session_id, system_prompt=None): | |||||
self.session_id = session_id | |||||
if system_prompt is None: | |||||
self.system_prompt = conf().get("character_desc", "") | |||||
else: | |||||
self.system_prompt = system_prompt | |||||
# 重置会话 | |||||
def reset(self): | |||||
raise NotImplementedError | |||||
def set_system_prompt(self, system_prompt): | |||||
self.system_prompt = system_prompt | |||||
self.reset() | |||||
def add_query(self, query): | |||||
raise NotImplementedError | |||||
def add_reply(self, reply): | |||||
raise NotImplementedError | |||||
def discard_exceeding(self, max_tokens=None, cur_tokens=None): | |||||
raise NotImplementedError | |||||
class SessionManager(object): | |||||
def __init__(self, sessioncls, **session_args): | |||||
if conf().get('expires_in_seconds'): | |||||
sessions = ExpiredDict(conf().get('expires_in_seconds')) | |||||
else: | |||||
sessions = dict() | |||||
self.sessions = sessions | |||||
self.sessioncls = sessioncls | |||||
self.session_args = session_args | |||||
def build_session(self, session_id, system_prompt=None): | |||||
''' | |||||
如果session_id不在sessions中,创建一个新的session并添加到sessions中 | |||||
如果system_prompt不会空,会更新session的system_prompt并重置session | |||||
''' | |||||
if session_id not in self.sessions: | |||||
self.sessions[session_id] = self.sessioncls(session_id, system_prompt, **self.session_args) | |||||
elif system_prompt is not None: # 如果有新的system_prompt,更新并重置session | |||||
self.sessions[session_id].set_system_prompt(system_prompt) | |||||
session = self.sessions[session_id] | |||||
return session | |||||
def session_query(self, query, session_id): | |||||
session = self.build_session(session_id) | |||||
session.add_query(query) | |||||
try: | |||||
max_tokens = conf().get("conversation_max_tokens", 1000) | |||||
total_tokens = session.discard_exceeding(max_tokens, None) | |||||
logger.debug("prompt tokens used={}".format(total_tokens)) | |||||
except Exception as e: | |||||
logger.debug("Exception when counting tokens precisely for prompt: {}".format(str(e))) | |||||
return session | |||||
def session_reply(self, reply, session_id, total_tokens = None): | |||||
session = self.build_session(session_id) | |||||
session.add_reply(reply) | |||||
try: | |||||
max_tokens = conf().get("conversation_max_tokens", 1000) | |||||
tokens_cnt = session.discard_exceeding(max_tokens, total_tokens) | |||||
logger.debug("raw total_tokens={}, savesession tokens={}".format(total_tokens, tokens_cnt)) | |||||
except Exception as e: | |||||
logger.debug("Exception when counting tokens precisely for session: {}".format(str(e))) | |||||
return session | |||||
def clear_session(self, session_id): | |||||
if session_id in self.sessions: | |||||
del(self.sessions[session_id]) | |||||
def clear_all_session(self): | |||||
self.sessions.clear() |
@@ -50,7 +50,8 @@ def handler_group_voice(msg): | |||||
class WechatChannel(Channel): | class WechatChannel(Channel): | ||||
def __init__(self): | def __init__(self): | ||||
pass | |||||
self.userName = None | |||||
self.nickName = None | |||||
def startup(self): | def startup(self): | ||||
@@ -67,6 +68,9 @@ class WechatChannel(Channel): | |||||
itchat.auto_login(enableCmdQR=2, hotReload=hotReload) | itchat.auto_login(enableCmdQR=2, hotReload=hotReload) | ||||
else: | else: | ||||
raise e | raise e | ||||
self.userName = itchat.instance.storageClass.userName | |||||
self.nickName = itchat.instance.storageClass.nickName | |||||
logger.info("Wechat login success, username: {}, nickname: {}".format(self.userName, self.nickName)) | |||||
# start message listener | # start message listener | ||||
itchat.run() | itchat.run() | ||||
@@ -84,8 +88,16 @@ class WechatChannel(Channel): | |||||
if conf().get('speech_recognition') != True: | if conf().get('speech_recognition') != True: | ||||
return | return | ||||
logger.debug("[WX]receive voice msg: " + msg['FileName']) | logger.debug("[WX]receive voice msg: " + msg['FileName']) | ||||
to_user_id = msg['ToUserName'] | |||||
from_user_id = msg['FromUserName'] | from_user_id = msg['FromUserName'] | ||||
other_user_id = msg['User']['UserName'] | |||||
try: | |||||
other_user_id = msg['User']['UserName'] # 对手方id | |||||
except Exception as e: | |||||
logger.warn("[WX]get other_user_id failed: " + str(e)) | |||||
if from_user_id == self.userName: | |||||
other_user_id = to_user_id | |||||
else: | |||||
other_user_id = from_user_id | |||||
if from_user_id == other_user_id: | if from_user_id == other_user_id: | ||||
context = Context(ContextType.VOICE,msg['FileName']) | context = Context(ContextType.VOICE,msg['FileName']) | ||||
context.kwargs = {'isgroup': False, 'msg': msg, 'receiver': other_user_id, 'session_id': other_user_id} | context.kwargs = {'isgroup': False, 'msg': msg, 'receiver': other_user_id, 'session_id': other_user_id} | ||||
@@ -97,7 +109,14 @@ class WechatChannel(Channel): | |||||
content = msg['Text'] | content = msg['Text'] | ||||
from_user_id = msg['FromUserName'] | from_user_id = msg['FromUserName'] | ||||
to_user_id = msg['ToUserName'] # 接收人id | to_user_id = msg['ToUserName'] # 接收人id | ||||
other_user_id = msg['User']['UserName'] # 对手方id | |||||
try: | |||||
other_user_id = msg['User']['UserName'] # 对手方id | |||||
except Exception as e: | |||||
logger.warn("[WX]get other_user_id failed: " + str(e)) | |||||
if from_user_id == self.userName: | |||||
other_user_id = to_user_id | |||||
else: | |||||
other_user_id = from_user_id | |||||
create_time = msg['CreateTime'] # 消息时间 | create_time = msg['CreateTime'] # 消息时间 | ||||
match_prefix = check_prefix(content, conf().get('single_chat_prefix')) | match_prefix = check_prefix(content, conf().get('single_chat_prefix')) | ||||
if conf().get('hot_reload') == True and int(create_time) < int(time.time()) - 60: #跳过1分钟前的历史消息 | if conf().get('hot_reload') == True and int(create_time) < int(time.time()) - 60: #跳过1分钟前的历史消息 | ||||
@@ -52,7 +52,7 @@ class Dungeon(Plugin): | |||||
if e_context['context'].type != ContextType.TEXT: | if e_context['context'].type != ContextType.TEXT: | ||||
return | return | ||||
bottype = Bridge().get_bot_type("chat") | bottype = Bridge().get_bot_type("chat") | ||||
if bottype != const.CHATGPT: | |||||
if bottype not in (const.CHATGPT, const.OPEN_AI): | |||||
return | return | ||||
bot = Bridge().get_bot("chat") | bot = Bridge().get_bot("chat") | ||||
content = e_context['context'].content[:] | content = e_context['context'].content[:] | ||||
@@ -179,7 +179,7 @@ class Godcmd(Plugin): | |||||
elif cmd == "id": | elif cmd == "id": | ||||
ok, result = True, f"用户id=\n{user}" | ok, result = True, f"用户id=\n{user}" | ||||
elif cmd == "reset": | elif cmd == "reset": | ||||
if bottype == const.CHATGPT: | |||||
if bottype in (const.CHATGPT, const.OPEN_AI): | |||||
bot.sessions.clear_session(session_id) | bot.sessions.clear_session(session_id) | ||||
ok, result = True, "会话已重置" | ok, result = True, "会话已重置" | ||||
else: | else: | ||||
@@ -201,7 +201,7 @@ class Godcmd(Plugin): | |||||
load_config() | load_config() | ||||
ok, result = True, "配置已重载" | ok, result = True, "配置已重载" | ||||
elif cmd == "resetall": | elif cmd == "resetall": | ||||
if bottype == const.CHATGPT: | |||||
if bottype in (const.CHATGPT, const.OPEN_AI): | |||||
bot.sessions.clear_all_session() | bot.sessions.clear_all_session() | ||||
ok, result = True, "重置所有会话成功" | ok, result = True, "重置所有会话成功" | ||||
else: | else: | ||||
@@ -17,15 +17,15 @@ class RolePlay(): | |||||
self.sessionid = sessionid | self.sessionid = sessionid | ||||
self.wrapper = wrapper or "%s" # 用于包装用户输入 | self.wrapper = wrapper or "%s" # 用于包装用户输入 | ||||
self.desc = desc | self.desc = desc | ||||
self.bot.sessions.build_session(self.sessionid, system_prompt=self.desc) | |||||
def reset(self): | def reset(self): | ||||
self.bot.sessions.clear_session(self.sessionid) | self.bot.sessions.clear_session(self.sessionid) | ||||
def action(self, user_action): | def action(self, user_action): | ||||
session = self.bot.sessions.build_session(self.sessionid, self.desc) | |||||
if session[0]['role'] == 'system' and session[0]['content'] != self.desc: # 目前没有触发session过期事件,这里先简单判断,然后重置 | |||||
self.reset() | |||||
self.bot.sessions.build_session(self.sessionid, self.desc) | |||||
session = self.bot.sessions.build_session(self.sessionid) | |||||
if session.system_prompt != self.desc: # 目前没有触发session过期事件,这里先简单判断,然后重置 | |||||
session.set_system_prompt(self.desc) | |||||
prompt = self.wrapper % user_action | prompt = self.wrapper % user_action | ||||
return prompt | return prompt | ||||
@@ -74,7 +74,7 @@ class Role(Plugin): | |||||
if e_context['context'].type != ContextType.TEXT: | if e_context['context'].type != ContextType.TEXT: | ||||
return | return | ||||
bottype = Bridge().get_bot_type("chat") | bottype = Bridge().get_bot_type("chat") | ||||
if bottype != const.CHATGPT: | |||||
if bottype not in (const.CHATGPT, const.OPEN_AI): | |||||
return | return | ||||
bot = Bridge().get_bot("chat") | bot = Bridge().get_bot("chat") | ||||
content = e_context['context'].content[:] | content = e_context['context'].content[:] | ||||