@@ -47,10 +47,10 @@ class LinkAIBot(Bot): | |||
:param retry_count: 当前递归重试次数 | |||
:return: 回复 | |||
""" | |||
if retry_count >= 2: | |||
if retry_count > 2: | |||
# exit from retry 2 times | |||
logger.warn("[LINKAI] failed after maximum number of retry times") | |||
return Reply(ReplyType.ERROR, "请再问我一次吧") | |||
return Reply(ReplyType.TEXT, "请再问我一次吧") | |||
try: | |||
# load config | |||
@@ -118,7 +118,7 @@ class LinkAIBot(Bot): | |||
logger.warn(f"[LINKAI] do retry, times={retry_count}") | |||
return self._chat(query, context, retry_count + 1) | |||
return Reply(ReplyType.ERROR, "提问太快啦,请休息一下再问我吧") | |||
return Reply(ReplyType.TEXT, "提问太快啦,请休息一下再问我吧") | |||
except Exception as e: | |||
logger.exception(e) | |||
@@ -18,7 +18,7 @@ class Bridge(object): | |||
"text_to_voice": conf().get("text_to_voice", "google"), | |||
"translate": conf().get("translate", "baidu"), | |||
} | |||
model_type = conf().get("model") | |||
model_type = conf().get("model") or const.GPT35 | |||
if model_type in ["text-davinci-003"]: | |||
self.btype["chat"] = const.OPEN_AI | |||
if conf().get("use_azure_chatgpt", False): | |||
@@ -8,6 +8,7 @@ LINKAI = "linkai" | |||
CLAUDEAI = "claude" | |||
# model | |||
GPT35 = "gpt-3.5-turbo" | |||
GPT4 = "gpt-4" | |||
GPT4_TURBO_PREVIEW = "gpt-4-1106-preview" | |||
GPT4_VISION_PREVIEW = "gpt-4-vision-preview" | |||
@@ -29,7 +29,7 @@ | |||
"group_speech_recognition": false, | |||
"voice_reply_voice": false, | |||
"tts_voice_id": "alloy", | |||
"conversation_max_tokens": 1000, | |||
"conversation_max_tokens": 2500, | |||
"expires_in_seconds": 3600, | |||
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。", | |||
"temperature": 0.7, | |||
@@ -266,14 +266,16 @@ class Godcmd(Plugin): | |||
if not isadmin and not self.is_admin_in_group(e_context["context"]): | |||
ok, result = False, "需要管理员权限执行" | |||
elif len(args) == 0: | |||
ok, result = True, "当前模型为: " + str(conf().get("model")) | |||
model = conf().get("model") or const.GPT35 | |||
ok, result = True, "当前模型为: " + str(model) | |||
elif len(args) == 1: | |||
if args[0] not in const.MODEL_LIST: | |||
ok, result = False, "模型名称不存在" | |||
else: | |||
conf()["model"] = self.model_mapping(args[0]) | |||
Bridge().reset_bot() | |||
ok, result = True, "模型设置为: " + str(conf().get("model")) | |||
model = conf().get("model") or const.GPT35 | |||
ok, result = True, "模型设置为: " + str(model) | |||
elif cmd == "id": | |||
ok, result = True, user | |||
elif cmd == "set_openai_api_key": | |||
@@ -25,9 +25,12 @@ class LinkAIVoice(Voice): | |||
if not conf().get("text_to_voice") or conf().get("voice_to_text") == "openai": | |||
model = const.WHISPER_1 | |||
if voice_file.endswith(".amr"): | |||
mp3_file = os.path.splitext(voice_file)[0] + ".mp3" | |||
audio_convert.any_to_mp3(voice_file, mp3_file) | |||
voice_file = mp3_file | |||
try: | |||
mp3_file = os.path.splitext(voice_file)[0] + ".mp3" | |||
audio_convert.any_to_mp3(voice_file, mp3_file) | |||
voice_file = mp3_file | |||
except Exception as e: | |||
logger.warn(f"[LinkVoice] amr file transfer failed, directly send amr voice file: {format(e)}") | |||
file = open(voice_file, "rb") | |||
file_body = { | |||
"file": file | |||
@@ -46,7 +49,7 @@ class LinkAIVoice(Voice): | |||
logger.info(f"[LinkVoice] voiceToText success, text={text}, file name={voice_file}") | |||
except Exception as e: | |||
logger.error(e) | |||
reply = Reply(ReplyType.ERROR, "我暂时还无法听清您的语音,请稍后再试吧~") | |||
return None | |||
return reply | |||
def textToVoice(self, text): | |||
@@ -75,5 +78,5 @@ class LinkAIVoice(Voice): | |||
return None | |||
except Exception as e: | |||
logger.error(e) | |||
reply = Reply(ReplyType.ERROR, "遇到了一点小问题,请稍后再问我吧") | |||
return reply | |||
# reply = Reply(ReplyType.ERROR, "遇到了一点小问题,请稍后再问我吧") | |||
return None |