@@ -53,7 +53,7 @@ available_setting = { | |||
"voice_reply_voice": False, # 是否使用语音回复语音,需要设置对应语音合成引擎的api key | |||
"always_reply_voice": False, # 是否一直使用语音回复 | |||
"voice_to_text": "openai", # 语音识别引擎,支持openai,baidu,google,azure | |||
"text_to_voice": "baidu", # 语音合成引擎,支持baidu,google,pytts(offline),azure,eleven | |||
"text_to_voice": "baidu", # 语音合成引擎,支持baidu,google,pytts(offline),azure | |||
# baidu 语音api配置, 使用百度语音识别和语音合成时需要 | |||
"baidu_app_id": "", | |||
"baidu_api_key": "", | |||
@@ -9,7 +9,6 @@ baidu_aip>=4.16.10 # baidu voice | |||
azure-cognitiveservices-speech # azure voice | |||
numpy<=1.24.2 | |||
langid # language detect | |||
elevenlabs==0.2.15 | |||
#install plugin | |||
dulwich | |||
@@ -1,32 +0,0 @@ | |||
""" | |||
eleventLabs voice service | |||
""" | |||
import time | |||
from elevenlabs import generate | |||
from bridge.reply import Reply, ReplyType | |||
from common.log import logger | |||
from common.tmp_dir import TmpDir | |||
from voice.voice import Voice | |||
class ElevenLabsVoice(Voice): | |||
def __init__(self): | |||
pass | |||
def voiceToText(self, voice_file): | |||
pass | |||
def textToVoice(self, text): | |||
audio = generate( | |||
text=text | |||
) | |||
fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3" | |||
with open(fileName, "wb") as f: | |||
f.write(audio) | |||
logger.info("[ElevenLabs] textToVoice text={} voice file name={}".format(text, fileName)) | |||
return Reply(ReplyType.VOICE, fileName) | |||
@@ -29,8 +29,4 @@ def create_voice(voice_type): | |||
from voice.azure.azure_voice import AzureVoice | |||
return AzureVoice() | |||
elif voice_type == "eleven": | |||
from voice.elevent.elevent_voice import ElevenLabsVoice | |||
return ElevenLabsVoice() | |||
raise RuntimeError |