diff --git a/bot/chatgpt/chat_gpt_session.py b/bot/chatgpt/chat_gpt_session.py index 3c2657c..299ae19 100644 --- a/bot/chatgpt/chat_gpt_session.py +++ b/bot/chatgpt/chat_gpt_session.py @@ -59,7 +59,7 @@ def num_tokens_from_messages(messages, model): if model in ["gpt-3.5-turbo-0301", "gpt-35-turbo"]: return num_tokens_from_messages(messages, model="gpt-3.5-turbo") - elif model in ["gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613"]: + elif model in ["gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-35-turbo-16k"]: return num_tokens_from_messages(messages, model="gpt-4") try: diff --git a/channel/wechat/wechat_channel.py b/channel/wechat/wechat_channel.py index 69f56f3..80746d2 100644 --- a/channel/wechat/wechat_channel.py +++ b/channel/wechat/wechat_channel.py @@ -159,7 +159,7 @@ class WechatChannel(ChatChannel): @_check def handle_group(self, cmsg: ChatMessage): if cmsg.ctype == ContextType.VOICE: - if conf().get("speech_recognition") != True: + if conf().get("group_speech_recognition") != True: return logger.debug("[WX]receive voice for group msg: {}".format(cmsg.content)) elif cmsg.ctype == ContextType.IMAGE: