@@ -19,7 +19,7 @@ class Bridge(object): | |||
model_type = conf().get("model") | |||
if model_type in ["text-davinci-003"]: | |||
self.btype['chat'] = const.OPEN_AI | |||
if conf().get("use_azure_chatgpt"): | |||
if conf().get("use_azure_chatgpt", False): | |||
self.btype['chat'] = const.CHATGPTONAZURE | |||
self.bots={} | |||
@@ -147,6 +147,8 @@ class WechatChannel(ChatChannel): | |||
if conf().get('speech_recognition') != True: | |||
return | |||
logger.debug("[WX]receive voice for group msg: {}".format(cmsg.content)) | |||
elif cmsg.ctype == ContextType.IMAGE: | |||
logger.debug("[WX]receive image for group msg: {}".format(cmsg.content)) | |||
else: | |||
# logger.debug("[WX]receive group msg: {}, cmsg={}".format(json.dumps(cmsg._rawmsg, ensure_ascii=False), cmsg)) | |||
pass | |||
@@ -2,7 +2,6 @@ | |||
"open_ai_api_key": "YOUR API KEY", | |||
"model": "gpt-3.5-turbo", | |||
"proxy": "", | |||
"use_azure_chatgpt": false, | |||
"single_chat_prefix": ["bot", "@bot"], | |||
"single_chat_reply_prefix": "[bot] ", | |||
"group_chat_prefix": ["@bot"], | |||
@@ -21,4 +21,4 @@ web.py | |||
# chatgpt-tool-hub plugin | |||
--extra-index-url https://pypi.python.org/simple | |||
chatgpt_tool_hub>=0.3.5 | |||
chatgpt_tool_hub>=0.3.7 |
@@ -1,4 +1,4 @@ | |||
openai>=0.27.2 | |||
openai==0.27.2 | |||
HTMLParser>=0.0.2 | |||
PyQRCode>=1.2.1 | |||
qrcode>=7.4.2 | |||