No puede seleccionar más de 25 temas Los temas deben comenzar con una letra o número, pueden incluir guiones ('-') y pueden tener hasta 35 caracteres de largo.

389 líneas
17KB

  1. # access LinkAI knowledge base platform
  2. # docs: https://link-ai.tech/platform/link-app/wechat
  3. import time
  4. import requests
  5. from bot.bot import Bot
  6. from bot.chatgpt.chat_gpt_session import ChatGPTSession
  7. from bot.session_manager import SessionManager
  8. from bridge.context import Context, ContextType
  9. from bridge.reply import Reply, ReplyType
  10. from common.log import logger
  11. from config import conf, pconf
  12. import threading
  13. from common import memory, utils
  14. import base64
  15. class LinkAIBot(Bot):
  16. # authentication failed
  17. AUTH_FAILED_CODE = 401
  18. NO_QUOTA_CODE = 406
  19. def __init__(self):
  20. super().__init__()
  21. self.sessions = LinkAISessionManager(LinkAISession, model=conf().get("model") or "gpt-3.5-turbo")
  22. self.args = {}
  23. def reply(self, query, context: Context = None) -> Reply:
  24. if context.type == ContextType.TEXT:
  25. return self._chat(query, context)
  26. elif context.type == ContextType.IMAGE_CREATE:
  27. ok, res = self.create_img(query, 0)
  28. if ok:
  29. reply = Reply(ReplyType.IMAGE_URL, res)
  30. else:
  31. reply = Reply(ReplyType.ERROR, res)
  32. return reply
  33. else:
  34. reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
  35. return reply
  36. def _chat(self, query, context, retry_count=0) -> Reply:
  37. """
  38. 发起对话请求
  39. :param query: 请求提示词
  40. :param context: 对话上下文
  41. :param retry_count: 当前递归重试次数
  42. :return: 回复
  43. """
  44. if retry_count > 2:
  45. # exit from retry 2 times
  46. logger.warn("[LINKAI] failed after maximum number of retry times")
  47. return Reply(ReplyType.TEXT, "请再问我一次吧")
  48. try:
  49. # load config
  50. if context.get("generate_breaked_by"):
  51. logger.info(f"[LINKAI] won't set appcode because a plugin ({context['generate_breaked_by']}) affected the context")
  52. app_code = None
  53. else:
  54. app_code = context.kwargs.get("app_code") or conf().get("linkai_app_code")
  55. linkai_api_key = conf().get("linkai_api_key")
  56. session_id = context["session_id"]
  57. session_message = self.sessions.session_msg_query(query, session_id)
  58. logger.debug(f"[LinkAI] session={session_message}, session_id={session_id}")
  59. # image process
  60. img_cache = memory.USER_IMAGE_CACHE.get(session_id)
  61. if img_cache:
  62. messages = self._process_image_msg(app_code=app_code, session_id=session_id, query=query, img_cache=img_cache)
  63. if messages:
  64. session_message = messages
  65. model = conf().get("model")
  66. # remove system message
  67. if session_message[0].get("role") == "system":
  68. if app_code or model == "wenxin":
  69. session_message.pop(0)
  70. body = {
  71. "app_code": app_code,
  72. "messages": session_message,
  73. "model": model, # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin, xunfei
  74. "temperature": conf().get("temperature"),
  75. "top_p": conf().get("top_p", 1),
  76. "frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
  77. "presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
  78. }
  79. file_id = context.kwargs.get("file_id")
  80. if file_id:
  81. body["file_id"] = file_id
  82. logger.info(f"[LINKAI] query={query}, app_code={app_code}, mode={body.get('model')}, file_id={file_id}")
  83. headers = {"Authorization": "Bearer " + linkai_api_key}
  84. # do http request
  85. base_url = conf().get("linkai_api_base", "https://api.link-ai.chat")
  86. res = requests.post(url=base_url + "/v1/chat/completions", json=body, headers=headers,
  87. timeout=conf().get("request_timeout", 180))
  88. if res.status_code == 200:
  89. # execute success
  90. response = res.json()
  91. reply_content = response["choices"][0]["message"]["content"]
  92. total_tokens = response["usage"]["total_tokens"]
  93. logger.info(f"[LINKAI] reply={reply_content}, total_tokens={total_tokens}")
  94. self.sessions.session_reply(reply_content, session_id, total_tokens, query=query)
  95. agent_suffix = self._fetch_agent_suffix(response)
  96. if agent_suffix:
  97. reply_content += agent_suffix
  98. if not agent_suffix:
  99. knowledge_suffix = self._fetch_knowledge_search_suffix(response)
  100. if knowledge_suffix:
  101. reply_content += knowledge_suffix
  102. # image process
  103. if response["choices"][0].get("img_urls"):
  104. thread = threading.Thread(target=self._send_image, args=(context.get("channel"), context, response["choices"][0].get("img_urls")))
  105. thread.start()
  106. return Reply(ReplyType.TEXT, reply_content)
  107. else:
  108. response = res.json()
  109. error = response.get("error")
  110. logger.error(f"[LINKAI] chat failed, status_code={res.status_code}, "
  111. f"msg={error.get('message')}, type={error.get('type')}")
  112. if res.status_code >= 500:
  113. # server error, need retry
  114. time.sleep(2)
  115. logger.warn(f"[LINKAI] do retry, times={retry_count}")
  116. return self._chat(query, context, retry_count + 1)
  117. return Reply(ReplyType.TEXT, "提问太快啦,请休息一下再问我吧")
  118. except Exception as e:
  119. logger.exception(e)
  120. # retry
  121. time.sleep(2)
  122. logger.warn(f"[LINKAI] do retry, times={retry_count}")
  123. return self._chat(query, context, retry_count + 1)
  124. def _process_image_msg(self, app_code: str, session_id: str, query:str, img_cache: dict):
  125. try:
  126. enable_image_input = False
  127. app_info = self._fetch_app_info(app_code)
  128. if not app_info:
  129. logger.debug(f"[LinkAI] not found app, can't process images, app_code={app_code}")
  130. return None
  131. plugins = app_info.get("data").get("plugins")
  132. for plugin in plugins:
  133. if plugin.get("input_type") and "IMAGE" in plugin.get("input_type"):
  134. enable_image_input = True
  135. if not enable_image_input:
  136. return
  137. msg = img_cache.get("msg")
  138. path = img_cache.get("path")
  139. msg.prepare()
  140. logger.info(f"[LinkAI] query with images, path={path}")
  141. messages = self._build_vision_msg(query, path)
  142. memory.USER_IMAGE_CACHE[session_id] = None
  143. return messages
  144. except Exception as e:
  145. logger.exception(e)
  146. def _build_vision_msg(self, query: str, path: str):
  147. try:
  148. suffix = utils.get_path_suffix(path)
  149. with open(path, "rb") as file:
  150. base64_str = base64.b64encode(file.read()).decode('utf-8')
  151. messages = [{
  152. "role": "user",
  153. "content": [
  154. {
  155. "type": "text",
  156. "text": query
  157. },
  158. {
  159. "type": "image_url",
  160. "image_url": {
  161. "url": f"data:image/{suffix};base64,{base64_str}"
  162. }
  163. }
  164. ]
  165. }]
  166. return messages
  167. except Exception as e:
  168. logger.exception(e)
  169. def reply_text(self, session: ChatGPTSession, app_code="", retry_count=0) -> dict:
  170. if retry_count >= 2:
  171. # exit from retry 2 times
  172. logger.warn("[LINKAI] failed after maximum number of retry times")
  173. return {
  174. "total_tokens": 0,
  175. "completion_tokens": 0,
  176. "content": "请再问我一次吧"
  177. }
  178. try:
  179. body = {
  180. "app_code": app_code,
  181. "messages": session.messages,
  182. "model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin, xunfei
  183. "temperature": conf().get("temperature"),
  184. "top_p": conf().get("top_p", 1),
  185. "frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
  186. "presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
  187. }
  188. if self.args.get("max_tokens"):
  189. body["max_tokens"] = self.args.get("max_tokens")
  190. headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
  191. # do http request
  192. base_url = conf().get("linkai_api_base", "https://api.link-ai.chat")
  193. res = requests.post(url=base_url + "/v1/chat/completions", json=body, headers=headers,
  194. timeout=conf().get("request_timeout", 180))
  195. if res.status_code == 200:
  196. # execute success
  197. response = res.json()
  198. reply_content = response["choices"][0]["message"]["content"]
  199. total_tokens = response["usage"]["total_tokens"]
  200. logger.info(f"[LINKAI] reply={reply_content}, total_tokens={total_tokens}")
  201. return {
  202. "total_tokens": total_tokens,
  203. "completion_tokens": response["usage"]["completion_tokens"],
  204. "content": reply_content,
  205. }
  206. else:
  207. response = res.json()
  208. error = response.get("error")
  209. logger.error(f"[LINKAI] chat failed, status_code={res.status_code}, "
  210. f"msg={error.get('message')}, type={error.get('type')}")
  211. if res.status_code >= 500:
  212. # server error, need retry
  213. time.sleep(2)
  214. logger.warn(f"[LINKAI] do retry, times={retry_count}")
  215. return self.reply_text(session, app_code, retry_count + 1)
  216. return {
  217. "total_tokens": 0,
  218. "completion_tokens": 0,
  219. "content": "提问太快啦,请休息一下再问我吧"
  220. }
  221. except Exception as e:
  222. logger.exception(e)
  223. # retry
  224. time.sleep(2)
  225. logger.warn(f"[LINKAI] do retry, times={retry_count}")
  226. return self.reply_text(session, app_code, retry_count + 1)
  227. def _fetch_app_info(self, app_code: str):
  228. headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
  229. # do http request
  230. base_url = conf().get("linkai_api_base", "https://api.link-ai.chat")
  231. params = {"app_code": app_code}
  232. res = requests.get(url=base_url + "/v1/app/info", params=params, headers=headers, timeout=(5, 10))
  233. if res.status_code == 200:
  234. return res.json()
  235. else:
  236. logger.warning(f"[LinkAI] find app info exception, res={res}")
  237. def create_img(self, query, retry_count=0, api_key=None):
  238. try:
  239. logger.info("[LinkImage] image_query={}".format(query))
  240. headers = {
  241. "Content-Type": "application/json",
  242. "Authorization": f"Bearer {conf().get('linkai_api_key')}"
  243. }
  244. data = {
  245. "prompt": query,
  246. "n": 1,
  247. "model": conf().get("text_to_image") or "dall-e-2",
  248. "response_format": "url",
  249. "img_proxy": conf().get("image_proxy")
  250. }
  251. url = conf().get("linkai_api_base", "https://api.link-ai.chat") + "/v1/images/generations"
  252. res = requests.post(url, headers=headers, json=data, timeout=(5, 90))
  253. t2 = time.time()
  254. image_url = res.json()["data"][0]["url"]
  255. logger.info("[OPEN_AI] image_url={}".format(image_url))
  256. return True, image_url
  257. except Exception as e:
  258. logger.error(format(e))
  259. return False, "画图出现问题,请休息一下再问我吧"
  260. def _fetch_knowledge_search_suffix(self, response) -> str:
  261. try:
  262. if response.get("knowledge_base"):
  263. search_hit = response.get("knowledge_base").get("search_hit")
  264. first_similarity = response.get("knowledge_base").get("first_similarity")
  265. logger.info(f"[LINKAI] knowledge base, search_hit={search_hit}, first_similarity={first_similarity}")
  266. plugin_config = pconf("linkai")
  267. if plugin_config and plugin_config.get("knowledge_base") and plugin_config.get("knowledge_base").get("search_miss_text_enabled"):
  268. search_miss_similarity = plugin_config.get("knowledge_base").get("search_miss_similarity")
  269. search_miss_text = plugin_config.get("knowledge_base").get("search_miss_suffix")
  270. if not search_hit:
  271. return search_miss_text
  272. if search_miss_similarity and float(search_miss_similarity) > first_similarity:
  273. return search_miss_text
  274. except Exception as e:
  275. logger.exception(e)
  276. def _fetch_agent_suffix(self, response):
  277. try:
  278. plugin_list = []
  279. logger.debug(f"[LinkAgent] res={response}")
  280. if response.get("agent") and response.get("agent").get("chain") and response.get("agent").get("need_show_plugin"):
  281. chain = response.get("agent").get("chain")
  282. suffix = "\n\n- - - - - - - - - - - -"
  283. i = 0
  284. for turn in chain:
  285. plugin_name = turn.get('plugin_name')
  286. suffix += "\n"
  287. need_show_thought = response.get("agent").get("need_show_thought")
  288. if turn.get("thought") and plugin_name and need_show_thought:
  289. suffix += f"{turn.get('thought')}\n"
  290. if plugin_name:
  291. plugin_list.append(turn.get('plugin_name'))
  292. suffix += f"{turn.get('plugin_icon')} {turn.get('plugin_name')}"
  293. if turn.get('plugin_input'):
  294. suffix += f":{turn.get('plugin_input')}"
  295. if i < len(chain) - 1:
  296. suffix += "\n"
  297. i += 1
  298. logger.info(f"[LinkAgent] use plugins: {plugin_list}")
  299. return suffix
  300. except Exception as e:
  301. logger.exception(e)
  302. def _send_image(self, channel, context, image_urls):
  303. if not image_urls:
  304. return
  305. try:
  306. for url in image_urls:
  307. reply = Reply(ReplyType.IMAGE_URL, url)
  308. channel.send(reply, context)
  309. except Exception as e:
  310. logger.error(e)
  311. class LinkAISessionManager(SessionManager):
  312. def session_msg_query(self, query, session_id):
  313. session = self.build_session(session_id)
  314. messages = session.messages + [{"role": "user", "content": query}]
  315. return messages
  316. def session_reply(self, reply, session_id, total_tokens=None, query=None):
  317. session = self.build_session(session_id)
  318. if query:
  319. session.add_query(query)
  320. session.add_reply(reply)
  321. try:
  322. max_tokens = conf().get("conversation_max_tokens", 2500)
  323. tokens_cnt = session.discard_exceeding(max_tokens, total_tokens)
  324. logger.info(f"[LinkAI] chat history discard, before tokens={total_tokens}, now tokens={tokens_cnt}")
  325. except Exception as e:
  326. logger.warning("Exception when counting tokens precisely for session: {}".format(str(e)))
  327. return session
  328. class LinkAISession(ChatGPTSession):
  329. def calc_tokens(self):
  330. try:
  331. cur_tokens = super().calc_tokens()
  332. except Exception as e:
  333. logger.debug("Exception when counting tokens precisely for query: {}".format(e))
  334. cur_tokens = len(str(self.messages))
  335. return cur_tokens
  336. def discard_exceeding(self, max_tokens, cur_tokens=None):
  337. cur_tokens = self.calc_tokens()
  338. if cur_tokens > max_tokens:
  339. for i in range(0, len(self.messages)):
  340. if i > 0 and self.messages[i].get("role") == "assistant" and self.messages[i - 1].get("role") == "user":
  341. self.messages.pop(i)
  342. self.messages.pop(i - 1)
  343. return self.calc_tokens()
  344. return cur_tokens