瀏覽代碼

feat: add options to set voice bot

master
lanvent 1 年之前
父節點
當前提交
cd86801eac
共有 3 個文件被更改,包括 100 次插入59 次删除
  1. +16
    -54
      bot/chatgpt/chat_gpt_bot.py
  2. +2
    -2
      bridge/bridge.py
  3. +82
    -3
      config.py

+ 16
- 54
bot/chatgpt/chat_gpt_bot.py 查看文件

@@ -76,6 +76,16 @@ class ChatGPTBot(Bot):
reply = Reply(ReplyType.ERROR, 'Bot不支持处理{}类型的消息'.format(context.type))
return reply

def compose_args(self):
return {
"model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
"temperature":conf().get('temperature', 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
# "max_tokens":4096, # 回复最大的字符数
"top_p":1,
"frequency_penalty":conf().get('frequency_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty":conf().get('presence_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
}

def reply_text(self, session, session_id, retry_count=0) -> dict:
'''
call openai's ChatCompletion to get the answer
@@ -88,13 +98,7 @@ class ChatGPTBot(Bot):
if conf().get('rate_limit_chatgpt') and not self.tb4chatgpt.get_token():
return {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
response = openai.ChatCompletion.create(
model= conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
messages=session,
temperature=conf().get('temperature', 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
#max_tokens=4096, # 回复最大的字符数
top_p=1,
frequency_penalty=conf().get('frequency_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
presence_penalty=conf().get('presence_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
messages=session, **self.compose_args()
)
# logger.info("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"]))
return {"total_tokens": response["usage"]["total_tokens"],
@@ -156,53 +160,11 @@ class AzureChatGPTBot(ChatGPTBot):
openai.api_type = "azure"
openai.api_version = "2023-03-15-preview"

def reply_text(self, session, session_id, retry_count=0) ->dict:
'''
call openai's ChatCompletion to get the answer
:param session: a conversation session
:param session_id: session id
:param retry_count: retry count
:return: {}
'''
try:
if conf().get('rate_limit_chatgpt') and not self.tb4chatgpt.get_token():
return {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
response = openai.ChatCompletion.create(
engine=conf().get("model") or "gpt-3.5-turbo", # the model deployment name on Azure
messages=session,
temperature=conf().get('temperature', 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
#max_tokens=4096, # 回复最大的字符数
top_p=1,
frequency_penalty=conf().get('frequency_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
presence_penalty=conf().get('presence_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
)
# logger.info("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"]))
return {"total_tokens": response["usage"]["total_tokens"],
"completion_tokens": response["usage"]["completion_tokens"],
"content": response.choices[0]['message']['content']}
except openai.error.RateLimitError as e:
# rate limit exception
logger.warn(e)
if retry_count < 1:
time.sleep(5)
logger.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1))
return self.reply_text(session, session_id, retry_count+1)
else:
return {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
except openai.error.APIConnectionError as e:
# api connection exception
logger.warn(e)
logger.warn("[OPEN_AI] APIConnection failed")
return {"completion_tokens": 0, "content":"我连接不到你的网络"}
except openai.error.Timeout as e:
logger.warn(e)
logger.warn("[OPEN_AI] Timeout")
return {"completion_tokens": 0, "content":"我没有收到你的消息"}
except Exception as e:
# unknown exception
logger.exception(e)
Session.clear_session(session_id)
return {"completion_tokens": 0, "content": "请再问我一次吧"}
def compose_args(self):
args = super().compose_args()
args["engine"] = args["model"]
del(args["model"])
return args


class SessionManager(object):


+ 2
- 2
bridge/bridge.py 查看文件

@@ -13,8 +13,8 @@ class Bridge(object):
def __init__(self):
self.btype={
"chat": const.CHATGPT,
"voice_to_text": "openai",
"text_to_voice": "baidu"
"voice_to_text": conf().get("voice_to_text", "openai"),
"text_to_voice": conf().get("text_to_voice", "baidu")
}
model_type = conf().get("model")
if model_type in ["text-davinci-003"]:


+ 82
- 3
config.py 查看文件

@@ -4,7 +4,84 @@ import json
import os
from common.log import logger

config = {}
# 将所有可用的配置项写在字典里
available_setting ={
#openai api配置
"open_ai_api_key": "", # openai api key
"open_ai_api_base": "https://api.openai.com/v1", # openai apibase,当use_azure_chatgpt为true时,需要设置对应的api base
"proxy": "", # openai使用的代理
"model": "gpt-3.5-turbo", # chatgpt模型, 当use_azure_chatgpt为true时,其名称为Azure上model deployment名称
"use_azure_chatgpt": False, # 是否使用azure的chatgpt

#Bot触发配置
"single_chat_prefix": ["bot", "@bot"], # 私聊时文本需要包含该前缀才能触发机器人回复
"single_chat_reply_prefix": "[bot] ", # 私聊时自动回复的前缀,用于区分真人
"group_chat_prefix": ["@bot"], # 群聊时包含该前缀则会触发机器人回复
"group_name_white_list": ["ChatGPT测试群", "ChatGPT测试群2"], # 开启自动回复的群名称列表
"group_chat_in_one_session": ["ChatGPT测试群"], # 支持会话上下文共享的群名称
"image_create_prefix": ["画", "看", "找"], # 开启图片回复的前缀
#chatgpt会话参数
"expires_in_seconds": 3600, # 无操作会话的过期时间
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。", # 人格描述
"conversation_max_tokens": 1000, # 支持上下文记忆的最多字符数
#chatgpt限流配置
"rate_limit_chatgpt": 20, # chatgpt的调用频率限制
"rate_limit_dalle": 50, # openai dalle的调用频率限制


#chatgpt api参数 参考https://platform.openai.com/docs/api-reference/chat/create
"temperature": 0.9,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,

#语音设置
"speech_recognition": False, # 是否开启语音识别
"voice_reply_voice": False, # 是否使用语音回复语音,需要设置对应语音合成引擎的api key
"voice_to_text": "openai", # 语音识别引擎,支持openai和google
"text_to_voice": "baidu", # 语音合成引擎,支持baidu和google

# baidu api的配置, 使用百度语音识别和语音合成时需要
'baidu_app_id': "",
'baidu_api_key': "",
'baidu_secret_key': "",

#服务时间限制,目前支持itchat
"chat_time_module": False, # 是否开启服务时间限制
"chat_start_time": "00:00", # 服务开始时间
"chat_stop_time": "24:00", # 服务结束时间

# itchat的配置
"hot_reload": False, # 是否开启热重载

# chatgpt指令自定义触发词
"clear_memory_commands": ['#清除记忆'], # 重置会话指令


}

class Config(dict):
def __getitem__(self, key):
if key not in available_setting:
raise Exception("key {} not in available_setting".format(key))
return super().__getitem__(key)

def __setitem__(self, key, value):
if key not in available_setting:
raise Exception("key {} not in available_setting".format(key))
return super().__setitem__(key, value)

def get(self, key, default=None):
try :
return self[key]
except KeyError as e:
return default
except Exception as e:
raise e
config = Config()

def load_config():
global config
@@ -15,12 +92,14 @@ def load_config():

config_str = read_file(config_path)
# 将json字符串反序列化为dict类型
config = json.loads(config_str)
config = Config(json.loads(config_str))

# override config with environment variables.
# Some online deployment platforms (e.g. Railway) deploy project from github directly. So you shouldn't put your secrets like api key in a config file, instead use environment variables to override the default config.
for name, value in os.environ.items():
config[name] = value
if name in available_setting:
logger.info("[INIT] override config by environ args: {}={}".format(name, value))
config[name] = value

logger.info("[INIT] load config: {}".format(config))



Loading…
取消
儲存