@@ -65,7 +65,8 @@ def num_tokens_from_messages(messages, model): | |||
if model in ["gpt-3.5-turbo-0301", "gpt-35-turbo", "gpt-3.5-turbo-1106"]: | |||
return num_tokens_from_messages(messages, model="gpt-3.5-turbo") | |||
elif model in ["gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo-0613", | |||
"gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-35-turbo-16k", const.GPT4_TURBO_PREVIEW, const.GPT4_VISION_PREVIEW]: | |||
"gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-35-turbo-16k", "gpt-4-turbo-preview", | |||
"gpt-4-1106-preview", const.GPT4_TURBO_PREVIEW, const.GPT4_VISION_PREVIEW]: | |||
return num_tokens_from_messages(messages, model="gpt-4") | |||
try: | |||
@@ -12,13 +12,14 @@ GEMINI = "gemini" | |||
# model | |||
GPT35 = "gpt-3.5-turbo" | |||
GPT4 = "gpt-4" | |||
GPT4_TURBO_PREVIEW = "gpt-4-1106-preview" | |||
GPT4_TURBO_PREVIEW = "gpt-4-0125-preview" | |||
GPT4_VISION_PREVIEW = "gpt-4-vision-preview" | |||
WHISPER_1 = "whisper-1" | |||
TTS_1 = "tts-1" | |||
TTS_1_HD = "tts-1-hd" | |||
MODEL_LIST = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "wenxin", "wenxin-4", "xunfei", "claude", "gpt-4-turbo", GPT4_TURBO_PREVIEW, QWEN, GEMINI] | |||
MODEL_LIST = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "wenxin", "wenxin-4", "xunfei", "claude", "gpt-4-turbo", | |||
"gpt-4-turbo-preview", "gpt-4-1106-preview", GPT4_TURBO_PREVIEW, QWEN, GEMINI] | |||
# channel | |||
FEISHU = "feishu" | |||