|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206 |
- # access LinkAI knowledge base platform
- # docs: https://link-ai.tech/platform/link-app/wechat
-
- import time
-
- import requests
-
- from bot.bot import Bot
- from bot.chatgpt.chat_gpt_session import ChatGPTSession
- from bot.openai.open_ai_image import OpenAIImage
- from bot.session_manager import SessionManager
- from bridge.context import Context, ContextType
- from bridge.reply import Reply, ReplyType
- from common.log import logger
- from config import conf, pconf
-
-
- class LinkAIBot(Bot, OpenAIImage):
- # authentication failed
- AUTH_FAILED_CODE = 401
- NO_QUOTA_CODE = 406
-
- def __init__(self):
- super().__init__()
- self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
- self.args = {}
-
- def reply(self, query, context: Context = None) -> Reply:
- if context.type == ContextType.TEXT:
- return self._chat(query, context)
- elif context.type == ContextType.IMAGE_CREATE:
- ok, res = self.create_img(query, 0)
- if ok:
- reply = Reply(ReplyType.IMAGE_URL, res)
- else:
- reply = Reply(ReplyType.ERROR, res)
- return reply
- else:
- reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
- return reply
-
- def _chat(self, query, context, retry_count=0) -> Reply:
- """
- 发起对话请求
- :param query: 请求提示词
- :param context: 对话上下文
- :param retry_count: 当前递归重试次数
- :return: 回复
- """
- if retry_count >= 2:
- # exit from retry 2 times
- logger.warn("[LINKAI] failed after maximum number of retry times")
- return Reply(ReplyType.ERROR, "请再问我一次吧")
-
- try:
- # load config
- if context.get("generate_breaked_by"):
- logger.info(f"[LINKAI] won't set appcode because a plugin ({context['generate_breaked_by']}) affected the context")
- app_code = None
- else:
- app_code = context.kwargs.get("app_code") or conf().get("linkai_app_code")
- linkai_api_key = conf().get("linkai_api_key")
-
- session_id = context["session_id"]
-
- session = self.sessions.session_query(query, session_id)
- model = conf().get("model") or "gpt-3.5-turbo"
- # remove system message
- if session.messages[0].get("role") == "system":
- if app_code or model == "wenxin":
- session.messages.pop(0)
-
- body = {
- "app_code": app_code,
- "messages": session.messages,
- "model": model, # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin, xunfei
- "temperature": conf().get("temperature"),
- "top_p": conf().get("top_p", 1),
- "frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
- "presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
- }
- file_id = context.kwargs.get("file_id")
- if file_id:
- body["file_id"] = file_id
- logger.info(f"[LINKAI] query={query}, app_code={app_code}, mode={body.get('model')}, file_id={file_id}")
- headers = {"Authorization": "Bearer " + linkai_api_key}
-
- # do http request
- base_url = conf().get("linkai_api_base", "https://api.link-ai.chat")
- res = requests.post(url=base_url + "/v1/chat/completions", json=body, headers=headers,
- timeout=conf().get("request_timeout", 180))
- if res.status_code == 200:
- # execute success
- response = res.json()
- reply_content = response["choices"][0]["message"]["content"]
- total_tokens = response["usage"]["total_tokens"]
- suffix = self._fecth_knowledge_search_suffix(response)
- if suffix:
- reply_content += suffix
- logger.info(f"[LINKAI] reply={reply_content}, total_tokens={total_tokens}")
- self.sessions.session_reply(reply_content, session_id, total_tokens)
- return Reply(ReplyType.TEXT, reply_content)
-
- else:
- response = res.json()
- error = response.get("error")
- logger.error(f"[LINKAI] chat failed, status_code={res.status_code}, "
- f"msg={error.get('message')}, type={error.get('type')}")
-
- if res.status_code >= 500:
- # server error, need retry
- time.sleep(2)
- logger.warn(f"[LINKAI] do retry, times={retry_count}")
- return self._chat(query, context, retry_count + 1)
-
- return Reply(ReplyType.ERROR, "提问太快啦,请休息一下再问我吧")
-
- except Exception as e:
- logger.exception(e)
- # retry
- time.sleep(2)
- logger.warn(f"[LINKAI] do retry, times={retry_count}")
- return self._chat(query, context, retry_count + 1)
-
- def reply_text(self, session: ChatGPTSession, app_code="", retry_count=0) -> dict:
- if retry_count >= 2:
- # exit from retry 2 times
- logger.warn("[LINKAI] failed after maximum number of retry times")
- return {
- "total_tokens": 0,
- "completion_tokens": 0,
- "content": "请再问我一次吧"
- }
-
- try:
- body = {
- "app_code": app_code,
- "messages": session.messages,
- "model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin, xunfei
- "temperature": conf().get("temperature"),
- "top_p": conf().get("top_p", 1),
- "frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
- "presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
- }
- if self.args.get("max_tokens"):
- body["max_tokens"] = self.args.get("max_tokens")
- headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
-
- # do http request
- base_url = conf().get("linkai_api_base", "https://api.link-ai.chat")
- res = requests.post(url=base_url + "/v1/chat/completions", json=body, headers=headers,
- timeout=conf().get("request_timeout", 180))
- if res.status_code == 200:
- # execute success
- response = res.json()
- reply_content = response["choices"][0]["message"]["content"]
- total_tokens = response["usage"]["total_tokens"]
- logger.info(f"[LINKAI] reply={reply_content}, total_tokens={total_tokens}")
- return {
- "total_tokens": total_tokens,
- "completion_tokens": response["usage"]["completion_tokens"],
- "content": reply_content,
- }
-
- else:
- response = res.json()
- error = response.get("error")
- logger.error(f"[LINKAI] chat failed, status_code={res.status_code}, "
- f"msg={error.get('message')}, type={error.get('type')}")
-
- if res.status_code >= 500:
- # server error, need retry
- time.sleep(2)
- logger.warn(f"[LINKAI] do retry, times={retry_count}")
- return self.reply_text(session, app_code, retry_count + 1)
-
- return {
- "total_tokens": 0,
- "completion_tokens": 0,
- "content": "提问太快啦,请休息一下再问我吧"
- }
-
- except Exception as e:
- logger.exception(e)
- # retry
- time.sleep(2)
- logger.warn(f"[LINKAI] do retry, times={retry_count}")
- return self.reply_text(session, app_code, retry_count + 1)
-
-
- def _fecth_knowledge_search_suffix(self, response) -> str:
- try:
- if response.get("knowledge_base"):
- search_hit = response.get("knowledge_base").get("search_hit")
- first_similarity = response.get("knowledge_base").get("first_similarity")
- logger.info(f"[LINKAI] knowledge base, search_hit={search_hit}, first_similarity={first_similarity}")
- plugin_config = pconf("linkai")
- if plugin_config.get("knowledge_base") and plugin_config.get("knowledge_base").get("search_miss_text_enabled"):
- search_miss_similarity = plugin_config.get("knowledge_base").get("search_miss_similarity")
- search_miss_text = plugin_config.get("knowledge_base").get("search_miss_suffix")
- if not search_hit:
- return search_miss_text
- if search_miss_similarity and float(search_miss_similarity) > first_similarity:
- return search_miss_text
- except Exception as e:
- logger.exception(e)
|