From 04fec4a5854f63f10944f8cd464b75407aff647e Mon Sep 17 00:00:00 2001 From: Pin Fang Date: Fri, 24 Mar 2023 23:41:53 +0800 Subject: [PATCH] Support Azure hosted chatgpt service --- README.md | 8 ++++-- bot/bot_factory.py | 5 ++++ bot/chatgpt/chat_gpt_bot.py | 55 +++++++++++++++++++++++++++++++++++++ bridge/bridge.py | 2 ++ common/const.py | 3 +- config-template.json | 6 ++-- 6 files changed, 73 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 4a773c9..28a08ea 100644 --- a/README.md +++ b/README.md @@ -56,6 +56,9 @@ > 项目中使用的对话模型是 davinci,计费方式是约每 750 字 (包含请求和回复) 消耗 $0.02,图片生成是每张消耗 $0.016,账号创建有免费的 $18 额度,使用完可以更换邮箱重新注册。 +#### 1.1 ChapGPT service On Azure +一种替换以上的方法是使用Azure推出的[ChatGPT service](https://azure.microsoft.com/en-in/products/cognitive-services/openai-service/)。它host在公有云Azure上,因此不需要VPN就可以直接访问。不过目前仍然处于preview阶段。新用户可以通过Try Azure for free来薅一段时间的羊毛 + ### 2.运行环境 @@ -96,7 +99,7 @@ pip3 install --upgrade openai # config.json文件内容示例 { "open_ai_api_key": "YOUR API KEY", # 填入上面创建的 OpenAI API KEY - "model": "gpt-3.5-turbo", # 模型名称 + "model": "gpt-3.5-turbo", # 模型名称。当use_azure_chatgpt为true时,其名称为Azure上model deployment名称 "proxy": "127.0.0.1:7890", # 代理客户端的ip和端口 "single_chat_prefix": ["bot", "@bot"], # 私聊时文本需要包含该前缀才能触发机器人回复 "single_chat_reply_prefix": "[bot] ", # 私聊时自动回复的前缀,用于区分真人 @@ -105,7 +108,8 @@ pip3 install --upgrade openai "image_create_prefix": ["画", "看", "找"], # 开启图片回复的前缀 "conversation_max_tokens": 1000, # 支持上下文记忆的最多字符数 "speech_recognition": false, # 是否开启语音识别 - "character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。", # 人格描述 + "use_azure_chatgpt": false, # 是否使用Azure ChatGPT service代替openai ChatGPT service. 当设置为true时需要设置 open_ai_api_base,如 https://xxx.openai.azure.com/ + "character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。", # 人格描述, } ``` **配置说明:** diff --git a/bot/bot_factory.py b/bot/bot_factory.py index a920524..06df336 100644 --- a/bot/bot_factory.py +++ b/bot/bot_factory.py @@ -24,4 +24,9 @@ def create_bot(bot_type): # OpenAI 官方对话模型API from bot.openai.open_ai_bot import OpenAIBot return OpenAIBot() + + elif bot_type == const.CHATGPTONAZURE: + # Azure chatgpt service https://azure.microsoft.com/en-in/products/cognitive-services/openai-service/ + from bot.chatgpt.chat_gpt_bot import AzureChatGPTBot + return AzureChatGPTBot() raise RuntimeError diff --git a/bot/chatgpt/chat_gpt_bot.py b/bot/chatgpt/chat_gpt_bot.py index a0b4bbc..52ab400 100644 --- a/bot/chatgpt/chat_gpt_bot.py +++ b/bot/chatgpt/chat_gpt_bot.py @@ -150,6 +150,61 @@ class ChatGPTBot(Bot): return False, str(e) +class AzureChatGPTBot(ChatGPTBot): + def __init__(self): + super().__init__() + openai.api_type = "azure" + openai.api_version = "2023-03-15-preview" + + def reply_text(self, session, session_id, retry_count=0) ->dict: + ''' + call openai's ChatCompletion to get the answer + :param session: a conversation session + :param session_id: session id + :param retry_count: retry count + :return: {} + ''' + try: + if conf().get('rate_limit_chatgpt') and not self.tb4chatgpt.get_token(): + return {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"} + response = openai.ChatCompletion.create( + engine=conf().get("model") or "gpt-3.5-turbo", # the model deployment name on Azure + messages=session, + temperature=conf().get('temperature', 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性 + #max_tokens=4096, # 回复最大的字符数 + top_p=1, + frequency_penalty=conf().get('frequency_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容 + presence_penalty=conf().get('presence_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容 + ) + # logger.info("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"])) + return {"total_tokens": response["usage"]["total_tokens"], + "completion_tokens": response["usage"]["completion_tokens"], + "content": response.choices[0]['message']['content']} + except openai.error.RateLimitError as e: + # rate limit exception + logger.warn(e) + if retry_count < 1: + time.sleep(5) + logger.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1)) + return self.reply_text(session, session_id, retry_count+1) + else: + return {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"} + except openai.error.APIConnectionError as e: + # api connection exception + logger.warn(e) + logger.warn("[OPEN_AI] APIConnection failed") + return {"completion_tokens": 0, "content":"我连接不到你的网络"} + except openai.error.Timeout as e: + logger.warn(e) + logger.warn("[OPEN_AI] Timeout") + return {"completion_tokens": 0, "content":"我没有收到你的消息"} + except Exception as e: + # unknown exception + logger.exception(e) + Session.clear_session(session_id) + return {"completion_tokens": 0, "content": "请再问我一次吧"} + + class SessionManager(object): def __init__(self): if conf().get('expires_in_seconds'): diff --git a/bridge/bridge.py b/bridge/bridge.py index a439e1b..15a6d02 100644 --- a/bridge/bridge.py +++ b/bridge/bridge.py @@ -19,6 +19,8 @@ class Bridge(object): model_type = conf().get("model") if model_type in ["text-davinci-003"]: self.btype['chat'] = const.OPEN_AI + if conf().get("use_azure_chatgpt"): + self.btype['chat'] = const.CHATGPTONAZURE self.bots={} def get_bot(self,typename): diff --git a/common/const.py b/common/const.py index 37f2dbd..8336da3 100644 --- a/common/const.py +++ b/common/const.py @@ -1,4 +1,5 @@ # bot_type OPEN_AI = "openAI" CHATGPT = "chatGPT" -BAIDU = "baidu" \ No newline at end of file +BAIDU = "baidu" +CHATGPTONAZURE = "chatGPTOnAzure" \ No newline at end of file diff --git a/config-template.json b/config-template.json index 902f290..1fd8c11 100644 --- a/config-template.json +++ b/config-template.json @@ -11,6 +11,6 @@ "voice_reply_voice": false, "conversation_max_tokens": 1000, "expires_in_seconds": 3600, - "character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。" -} - + "character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。", + "use_azure_chatgpt": false +} \ No newline at end of file