@@ -31,7 +31,7 @@ class Bridge(object): | |||
self.btype["chat"] = const.LINKAI | |||
if not conf().get("voice_to_text") or conf().get("voice_to_text") in ["openai"]: | |||
self.btype["voice_to_text"] = const.LINKAI | |||
if not conf().get("text_to_voice") or conf().get("text_to_voice") in [const.TTS_1, const.TTS_1_HD]: | |||
if not conf().get("text_to_voice") or conf().get("text_to_voice") in ["openai", const.TTS_1, const.TTS_1_HD]: | |||
self.btype["text_to_voice"] = const.LINKAI | |||
if model_type in ["claude"]: | |||
self.btype["chat"] = const.CLAUDEAI | |||
@@ -73,7 +73,8 @@ available_setting = { | |||
"voice_reply_voice": False, # 是否使用语音回复语音,需要设置对应语音合成引擎的api key | |||
"always_reply_voice": False, # 是否一直使用语音回复 | |||
"voice_to_text": "openai", # 语音识别引擎,支持openai,baidu,google,azure | |||
"text_to_voice": "tts-1", # 语音合成引擎,支持tts-1,tts-1-hd,baidu,google,pytts(offline),azure,elevenlabs | |||
"text_to_voice": "openai", # 语音合成引擎,支持openai,baidu,google,pytts(offline),azure,elevenlabs | |||
"text_to_voice_model": "tts-1", | |||
"tts_voice_id": "alloy", | |||
# baidu 语音api配置, 使用百度语音识别和语音合成时需要 | |||
"baidu_app_id": "", | |||
@@ -53,8 +53,8 @@ class LinkAIVoice(Voice): | |||
url = conf().get("linkai_api_base", "https://api.link-ai.chat") + "/v1/audio/speech" | |||
headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")} | |||
model = const.TTS_1 | |||
if not conf().get("text_to_voice") or conf().get("text_to_voice") in [const.TTS_1, const.TTS_1_HD]: | |||
model = conf().get("text_to_voice") or const.TTS_1 | |||
if not conf().get("text_to_voice") or conf().get("text_to_voice") in ["openai", const.TTS_1, const.TTS_1_HD]: | |||
model = conf().get("text_to_voice_model") or const.TTS_1 | |||
data = { | |||
"model": model, | |||
"input": text, | |||
@@ -9,7 +9,9 @@ from bridge.reply import Reply, ReplyType | |||
from common.log import logger | |||
from config import conf | |||
from voice.voice import Voice | |||
import requests | |||
from common import const | |||
import datetime, random | |||
class OpenaiVoice(Voice): | |||
def __init__(self): | |||
@@ -27,3 +29,28 @@ class OpenaiVoice(Voice): | |||
reply = Reply(ReplyType.ERROR, "我暂时还无法听清您的语音,请稍后再试吧~") | |||
finally: | |||
return reply | |||
def textToVoice(self, text): | |||
try: | |||
url = 'https://api.openai.com/v1/audio/speech' | |||
headers = { | |||
'Authorization': 'Bearer ' + conf().get("open_ai_api_key"), | |||
'Content-Type': 'application/json' | |||
} | |||
data = { | |||
'model': conf().get("text_to_voice_model") or const.TTS_1, | |||
'input': text, | |||
'voice': conf().get("tts_voice_id") or "alloy" | |||
} | |||
response = requests.post(url, headers=headers, json=data) | |||
file_name = "tmp/" + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + str(random.randint(0, 1000)) + ".mp3" | |||
logger.debug(f"[OPENAI] text_to_Voice file_name={file_name}, input={text}") | |||
with open(file_name, 'wb') as f: | |||
f.write(response.content) | |||
logger.info(f"[OPENAI] text_to_Voice success") | |||
reply = Reply(ReplyType.VOICE, file_name) | |||
except Exception as e: | |||
logger.error(e) | |||
reply = Reply(ReplyType.ERROR, "遇到了一点小问题,请稍后再问我吧") | |||
return reply |