Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.

286 lines
13KB

  1. # encoding:utf-8
  2. from bot.bot import Bot
  3. from bridge.context import ContextType
  4. from bridge.reply import Reply, ReplyType
  5. from config import conf, load_config
  6. from common.log import logger
  7. from common.token_bucket import TokenBucket
  8. from common.expired_dict import ExpiredDict
  9. import openai
  10. import time
  11. # OpenAI对话模型API (可用)
  12. class ChatGPTBot(Bot):
  13. def __init__(self):
  14. openai.api_key = conf().get('open_ai_api_key')
  15. if conf().get('open_ai_api_base'):
  16. openai.api_base = conf().get('open_ai_api_base')
  17. proxy = conf().get('proxy')
  18. self.sessions = SessionManager(model= conf().get("model") or "gpt-3.5-turbo")
  19. if proxy:
  20. openai.proxy = proxy
  21. if conf().get('rate_limit_chatgpt'):
  22. self.tb4chatgpt = TokenBucket(conf().get('rate_limit_chatgpt', 20))
  23. if conf().get('rate_limit_dalle'):
  24. self.tb4dalle = TokenBucket(conf().get('rate_limit_dalle', 50))
  25. def reply(self, query, context=None):
  26. # acquire reply content
  27. if context.type == ContextType.TEXT:
  28. logger.info("[OPEN_AI] query={}".format(query))
  29. session_id = context['session_id']
  30. reply = None
  31. clear_memory_commands = conf().get('clear_memory_commands', ['#清除记忆'])
  32. if query in clear_memory_commands:
  33. self.sessions.clear_session(session_id)
  34. reply = Reply(ReplyType.INFO, '记忆已清除')
  35. elif query == '#清除所有':
  36. self.sessions.clear_all_session()
  37. reply = Reply(ReplyType.INFO, '所有人记忆已清除')
  38. elif query == '#更新配置':
  39. load_config()
  40. reply = Reply(ReplyType.INFO, '配置已更新')
  41. if reply:
  42. return reply
  43. session = self.sessions.build_session_query(query, session_id)
  44. logger.debug("[OPEN_AI] session query={}".format(session))
  45. # if context.get('stream'):
  46. # # reply in stream
  47. # return self.reply_text_stream(query, new_query, session_id)
  48. reply_content = self.reply_text(session, session_id, 0)
  49. logger.debug("[OPEN_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(session, session_id, reply_content["content"], reply_content["completion_tokens"]))
  50. if reply_content['completion_tokens'] == 0 and len(reply_content['content']) > 0:
  51. reply = Reply(ReplyType.ERROR, reply_content['content'])
  52. elif reply_content["completion_tokens"] > 0:
  53. self.sessions.save_session(reply_content["content"], session_id, reply_content["total_tokens"])
  54. reply = Reply(ReplyType.TEXT, reply_content["content"])
  55. else:
  56. reply = Reply(ReplyType.ERROR, reply_content['content'])
  57. logger.debug("[OPEN_AI] reply {} used 0 tokens.".format(reply_content))
  58. return reply
  59. elif context.type == ContextType.IMAGE_CREATE:
  60. ok, retstring = self.create_img(query, 0)
  61. reply = None
  62. if ok:
  63. reply = Reply(ReplyType.IMAGE_URL, retstring)
  64. else:
  65. reply = Reply(ReplyType.ERROR, retstring)
  66. return reply
  67. else:
  68. reply = Reply(ReplyType.ERROR, 'Bot不支持处理{}类型的消息'.format(context.type))
  69. return reply
  70. def compose_args(self):
  71. return {
  72. "model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
  73. "temperature":conf().get('temperature', 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
  74. # "max_tokens":4096, # 回复最大的字符数
  75. "top_p":1,
  76. "frequency_penalty":conf().get('frequency_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
  77. "presence_penalty":conf().get('presence_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
  78. }
  79. def reply_text(self, session, session_id, retry_count=0) -> dict:
  80. '''
  81. call openai's ChatCompletion to get the answer
  82. :param session: a conversation session
  83. :param session_id: session id
  84. :param retry_count: retry count
  85. :return: {}
  86. '''
  87. try:
  88. if conf().get('rate_limit_chatgpt') and not self.tb4chatgpt.get_token():
  89. return {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
  90. response = openai.ChatCompletion.create(
  91. messages=session, **self.compose_args()
  92. )
  93. # logger.info("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"]))
  94. return {"total_tokens": response["usage"]["total_tokens"],
  95. "completion_tokens": response["usage"]["completion_tokens"],
  96. "content": response.choices[0]['message']['content']}
  97. except openai.error.RateLimitError as e:
  98. # rate limit exception
  99. logger.warn(e)
  100. if retry_count < 1:
  101. time.sleep(5)
  102. logger.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1))
  103. return self.reply_text(session, session_id, retry_count+1)
  104. else:
  105. return {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
  106. except openai.error.APIConnectionError as e:
  107. # api connection exception
  108. logger.warn(e)
  109. logger.warn("[OPEN_AI] APIConnection failed")
  110. return {"completion_tokens": 0, "content": "我连接不到你的网络"}
  111. except openai.error.Timeout as e:
  112. logger.warn(e)
  113. logger.warn("[OPEN_AI] Timeout")
  114. return {"completion_tokens": 0, "content": "我没有收到你的消息"}
  115. except Exception as e:
  116. # unknown exception
  117. logger.exception(e)
  118. self.sessions.clear_session(session_id)
  119. return {"completion_tokens": 0, "content": "请再问我一次吧"}
  120. def create_img(self, query, retry_count=0):
  121. try:
  122. if conf().get('rate_limit_dalle') and not self.tb4dalle.get_token():
  123. return False, "请求太快了,请休息一下再问我吧"
  124. logger.info("[OPEN_AI] image_query={}".format(query))
  125. response = openai.Image.create(
  126. prompt=query, #图片描述
  127. n=1, #每次生成图片的数量
  128. size="256x256" #图片大小,可选有 256x256, 512x512, 1024x1024
  129. )
  130. image_url = response['data'][0]['url']
  131. logger.info("[OPEN_AI] image_url={}".format(image_url))
  132. return True, image_url
  133. except openai.error.RateLimitError as e:
  134. logger.warn(e)
  135. if retry_count < 1:
  136. time.sleep(5)
  137. logger.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count+1))
  138. return self.create_img(query, retry_count+1)
  139. else:
  140. return False, "提问太快啦,请休息一下再问我吧"
  141. except Exception as e:
  142. logger.exception(e)
  143. return False, str(e)
  144. class AzureChatGPTBot(ChatGPTBot):
  145. def __init__(self):
  146. super().__init__()
  147. openai.api_type = "azure"
  148. openai.api_version = "2023-03-15-preview"
  149. def compose_args(self):
  150. args = super().compose_args()
  151. args["engine"] = args["model"]
  152. del(args["model"])
  153. return args
  154. class SessionManager(object):
  155. def __init__(self, model = "gpt-3.5-turbo-0301"):
  156. if conf().get('expires_in_seconds'):
  157. sessions = ExpiredDict(conf().get('expires_in_seconds'))
  158. else:
  159. sessions = dict()
  160. self.sessions = sessions
  161. self.model = model
  162. def build_session(self, session_id, system_prompt=None):
  163. session = self.sessions.get(session_id, [])
  164. if len(session) == 0:
  165. if system_prompt is None:
  166. system_prompt = conf().get("character_desc", "")
  167. system_item = {'role': 'system', 'content': system_prompt}
  168. session.append(system_item)
  169. self.sessions[session_id] = session
  170. return session
  171. def build_session_query(self, query, session_id):
  172. '''
  173. build query with conversation history
  174. e.g. [
  175. {"role": "system", "content": "You are a helpful assistant."},
  176. {"role": "user", "content": "Who won the world series in 2020?"},
  177. {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
  178. {"role": "user", "content": "Where was it played?"}
  179. ]
  180. :param query: query content
  181. :param session_id: session id
  182. :return: query content with conversaction
  183. '''
  184. session = self.build_session(session_id)
  185. user_item = {'role': 'user', 'content': query}
  186. session.append(user_item)
  187. try:
  188. total_tokens = num_tokens_from_messages(session, self.model)
  189. max_tokens = conf().get("conversation_max_tokens", 1000)
  190. total_tokens = self.discard_exceed_conversation(session, max_tokens, total_tokens)
  191. logger.debug("prompt tokens used={}".format(total_tokens))
  192. except Exception as e:
  193. logger.debug("Exception when counting tokens precisely for prompt: {}".format(str(e)))
  194. return session
  195. def save_session(self, answer, session_id, total_tokens):
  196. max_tokens = conf().get("conversation_max_tokens", 1000)
  197. session = self.sessions.get(session_id)
  198. if session:
  199. # append conversation
  200. gpt_item = {'role': 'assistant', 'content': answer}
  201. session.append(gpt_item)
  202. # discard exceed limit conversation
  203. tokens_cnt = self.discard_exceed_conversation(session, max_tokens, total_tokens)
  204. logger.debug("raw total_tokens={}, savesession tokens={}".format(total_tokens, tokens_cnt))
  205. def discard_exceed_conversation(self, session, max_tokens, total_tokens):
  206. dec_tokens = int(total_tokens)
  207. # logger.info("prompt tokens used={},max_tokens={}".format(used_tokens,max_tokens))
  208. while dec_tokens > max_tokens:
  209. # pop first conversation
  210. if len(session) > 2:
  211. session.pop(1)
  212. elif len(session) == 2 and session[1]["role"] == "assistant":
  213. session.pop(1)
  214. break
  215. elif len(session) == 2 and session[1]["role"] == "user":
  216. logger.warn("user message exceed max_tokens. total_tokens={}".format(dec_tokens))
  217. break
  218. else:
  219. logger.debug("max_tokens={}, total_tokens={}, len(sessions)={}".format(max_tokens, dec_tokens, len(session)))
  220. break
  221. try:
  222. cur_tokens = num_tokens_from_messages(session, self.model)
  223. dec_tokens = cur_tokens
  224. except Exception as e:
  225. logger.debug("Exception when counting tokens precisely for query: {}".format(e))
  226. dec_tokens = dec_tokens - max_tokens
  227. return dec_tokens
  228. def clear_session(self, session_id):
  229. self.sessions[session_id] = []
  230. def clear_all_session(self):
  231. self.sessions.clear()
  232. # refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
  233. def num_tokens_from_messages(messages, model):
  234. """Returns the number of tokens used by a list of messages."""
  235. import tiktoken
  236. try:
  237. encoding = tiktoken.encoding_for_model(model)
  238. except KeyError:
  239. logger.debug("Warning: model not found. Using cl100k_base encoding.")
  240. encoding = tiktoken.get_encoding("cl100k_base")
  241. if model == "gpt-3.5-turbo":
  242. return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301")
  243. elif model == "gpt-4":
  244. return num_tokens_from_messages(messages, model="gpt-4-0314")
  245. elif model == "gpt-3.5-turbo-0301":
  246. tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
  247. tokens_per_name = -1 # if there's a name, the role is omitted
  248. elif model == "gpt-4-0314":
  249. tokens_per_message = 3
  250. tokens_per_name = 1
  251. else:
  252. logger.warn(f"num_tokens_from_messages() is not implemented for model {model}. Returning num tokens assuming gpt-3.5-turbo-0301.")
  253. return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301")
  254. num_tokens = 0
  255. for message in messages:
  256. num_tokens += tokens_per_message
  257. for key, value in message.items():
  258. num_tokens += len(encoding.encode(value))
  259. if key == "name":
  260. num_tokens += tokens_per_name
  261. num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
  262. return num_tokens