Bladeren bron

Merge pull request #586 from fangpin/railway

Support Railway deployment
master
zhayujie GitHub 1 jaar geleden
bovenliggende
commit
da18e3312a
Geen bekende sleutel gevonden voor deze handtekening in de database GPG sleutel-ID: 4AEE18F83AFDEB23
10 gewijzigde bestanden met toevoegingen van 97 en 5 verwijderingen
  1. +14
    -2
      README.md
  2. +5
    -1
      app.py
  3. +5
    -0
      bot/bot_factory.py
  4. +55
    -0
      bot/chatgpt/chat_gpt_bot.py
  5. +2
    -0
      bridge/bridge.py
  6. +2
    -1
      common/const.py
  7. +1
    -0
      config-template.json
  8. +8
    -1
      config.py
  9. +5
    -0
      main.py
  10. +0
    -0
      requirements.txt

+ 14
- 2
README.md Bestand weergeven

@@ -54,6 +54,9 @@

> 项目中使用的对话模型是 davinci,计费方式是约每 750 字 (包含请求和回复) 消耗 $0.02,图片生成是每张消耗 $0.016,账号创建有免费的 $18 额度 (更新3.25: 最新注册的已经无免费额度了),使用完可以更换邮箱重新注册。

#### 1.1 ChapGPT service On Azure
一种替换以上的方法是使用Azure推出的[ChatGPT service](https://azure.microsoft.com/en-in/products/cognitive-services/openai-service/)。它host在公有云Azure上,因此不需要VPN就可以直接访问。不过目前仍然处于preview阶段。新用户可以通过Try Azure for free来薅一段时间的羊毛


### 2.运行环境

@@ -94,7 +97,7 @@ pip3 install --upgrade openai
# config.json文件内容示例
{
"open_ai_api_key": "YOUR API KEY", # 填入上面创建的 OpenAI API KEY
"model": "gpt-3.5-turbo", # 模型名称
"model": "gpt-3.5-turbo", # 模型名称。当use_azure_chatgpt为true时,其名称为Azure上model deployment名称
"proxy": "127.0.0.1:7890", # 代理客户端的ip和端口
"single_chat_prefix": ["bot", "@bot"], # 私聊时文本需要包含该前缀才能触发机器人回复
"single_chat_reply_prefix": "[bot] ", # 私聊时自动回复的前缀,用于区分真人
@@ -104,7 +107,8 @@ pip3 install --upgrade openai
"image_create_prefix": ["画", "看", "找"], # 开启图片回复的前缀
"conversation_max_tokens": 1000, # 支持上下文记忆的最多字符数
"speech_recognition": false, # 是否开启语音识别
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。", # 人格描述
"use_azure_chatgpt": false, # 是否使用Azure ChatGPT service代替openai ChatGPT service. 当设置为true时需要设置 open_ai_api_base,如 https://xxx.openai.azure.com/
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。", # 人格描述,
}
```
**配置说明:**
@@ -173,6 +177,14 @@ nohup python3 app.py & tail -f nohup.out # 在后台运行程序并通

参考文档 [Docker部署](https://github.com/limccn/chatgpt-on-wechat/wiki/Docker%E9%83%A8%E7%BD%B2) (Contributed by [limccn](https://github.com/limccn))。

### 4. Railway部署
[Use with Railway](#use-with-railway)(PaaS, Free, Stable, ✅Recommended)
> Railway offers $5 (500 hours) of runtime per month
1. Click the [Railway](https://railway.app/) button to go to the Railway homepage
2. Click the `Start New Project` button.
3. Click the `Deploy from Github repo` button.
4. Choose your repo (you can fork this repo firstly)
5. Set environment variable to override settings in config-template.json, such as: model, open_ai_api_base, open_ai_api_key, use_azure_chatgpt etc.

## 常见问题



+ 5
- 1
app.py Bestand weergeven

@@ -5,7 +5,8 @@ from channel import channel_factory
from common.log import logger

from plugins import *
if __name__ == '__main__':

def run():
try:
# load config
config.load_config()
@@ -21,3 +22,6 @@ if __name__ == '__main__':
except Exception as e:
logger.error("App startup failed!")
logger.exception(e)

if __name__ == '__main__':
run()

+ 5
- 0
bot/bot_factory.py Bestand weergeven

@@ -24,4 +24,9 @@ def create_bot(bot_type):
# OpenAI 官方对话模型API
from bot.openai.open_ai_bot import OpenAIBot
return OpenAIBot()

elif bot_type == const.CHATGPTONAZURE:
# Azure chatgpt service https://azure.microsoft.com/en-in/products/cognitive-services/openai-service/
from bot.chatgpt.chat_gpt_bot import AzureChatGPTBot
return AzureChatGPTBot()
raise RuntimeError

+ 55
- 0
bot/chatgpt/chat_gpt_bot.py Bestand weergeven

@@ -150,6 +150,61 @@ class ChatGPTBot(Bot):
return False, str(e)


class AzureChatGPTBot(ChatGPTBot):
def __init__(self):
super().__init__()
openai.api_type = "azure"
openai.api_version = "2023-03-15-preview"

def reply_text(self, session, session_id, retry_count=0) ->dict:
'''
call openai's ChatCompletion to get the answer
:param session: a conversation session
:param session_id: session id
:param retry_count: retry count
:return: {}
'''
try:
if conf().get('rate_limit_chatgpt') and not self.tb4chatgpt.get_token():
return {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
response = openai.ChatCompletion.create(
engine=conf().get("model") or "gpt-3.5-turbo", # the model deployment name on Azure
messages=session,
temperature=conf().get('temperature', 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
#max_tokens=4096, # 回复最大的字符数
top_p=1,
frequency_penalty=conf().get('frequency_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
presence_penalty=conf().get('presence_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
)
# logger.info("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"]))
return {"total_tokens": response["usage"]["total_tokens"],
"completion_tokens": response["usage"]["completion_tokens"],
"content": response.choices[0]['message']['content']}
except openai.error.RateLimitError as e:
# rate limit exception
logger.warn(e)
if retry_count < 1:
time.sleep(5)
logger.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1))
return self.reply_text(session, session_id, retry_count+1)
else:
return {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
except openai.error.APIConnectionError as e:
# api connection exception
logger.warn(e)
logger.warn("[OPEN_AI] APIConnection failed")
return {"completion_tokens": 0, "content":"我连接不到你的网络"}
except openai.error.Timeout as e:
logger.warn(e)
logger.warn("[OPEN_AI] Timeout")
return {"completion_tokens": 0, "content":"我没有收到你的消息"}
except Exception as e:
# unknown exception
logger.exception(e)
Session.clear_session(session_id)
return {"completion_tokens": 0, "content": "请再问我一次吧"}


class SessionManager(object):
def __init__(self):
if conf().get('expires_in_seconds'):


+ 2
- 0
bridge/bridge.py Bestand weergeven

@@ -19,6 +19,8 @@ class Bridge(object):
model_type = conf().get("model")
if model_type in ["text-davinci-003"]:
self.btype['chat'] = const.OPEN_AI
if conf().get("use_azure_chatgpt"):
self.btype['chat'] = const.CHATGPTONAZURE
self.bots={}

def get_bot(self,typename):


+ 2
- 1
common/const.py Bestand weergeven

@@ -1,4 +1,5 @@
# bot_type
OPEN_AI = "openAI"
CHATGPT = "chatGPT"
BAIDU = "baidu"
BAIDU = "baidu"
CHATGPTONAZURE = "chatGPTOnAzure"

+ 1
- 0
config-template.json Bestand weergeven

@@ -2,6 +2,7 @@
"open_ai_api_key": "YOUR API KEY",
"model": "gpt-3.5-turbo",
"proxy": "",
"use_azure_chatgpt": false,
"single_chat_prefix": ["bot", "@bot"],
"single_chat_reply_prefix": "[bot] ",
"group_chat_prefix": ["@bot"],


+ 8
- 1
config.py Bestand weergeven

@@ -10,11 +10,18 @@ def load_config():
global config
config_path = "./config.json"
if not os.path.exists(config_path):
raise Exception('配置文件不存在,请根据config-template.json模板创建config.json文件')
logger.info('配置文件不存在,将使用config-template.json模板')
config_path = "./config-template.json"

config_str = read_file(config_path)
# 将json字符串反序列化为dict类型
config = json.loads(config_str)

# override config with environment variables.
# Some online deployment platforms (e.g. Railway) deploy project from github directly. So you shouldn't put your secrets like api key in a config file, instead use environment variables to override the default config.
for name, value in os.environ.items():
config[name] = value

logger.info("[INIT] load config: {}".format(config))




+ 5
- 0
main.py Bestand weergeven

@@ -0,0 +1,5 @@
# entry point for online railway deployment
from app import run

if __name__ == '__main__':
run()

requirement.txt → requirements.txt Bestand weergeven


Laden…
Annuleren
Opslaan