Ver código fonte

Merge pull request #629 from Chiaki-Chan/master

ItChat-uos方案下添加对群组语音消息的响应
master
lanvent 1 ano atrás
pai
commit
74f383a7d4
3 arquivos alterados com 23 adições e 13 exclusões
  1. +3
    -3
      README.md
  2. +1
    -1
      app.py
  3. +19
    -9
      channel/wechat/wechat_channel.py

+ 3
- 3
README.md Ver arquivo

@@ -112,7 +112,7 @@ pip3 install --upgrade tiktoken
"image_create_prefix": ["画", "看", "找"], # 开启图片回复的前缀
"conversation_max_tokens": 1000, # 支持上下文记忆的最多字符数
"speech_recognition": false, # 是否开启语音识别
"group_speech_recognition": false, # 是否开启群组语音识别 (目前仅支持wechaty)
"group_speech_recognition": false, # 是否开启群组语音识别
"use_azure_chatgpt": false, # 是否使用Azure ChatGPT service代替openai ChatGPT service. 当设置为true时需要设置 open_ai_api_base,如 https://xxx.openai.azure.com/
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。", # 人格描述,
}
@@ -133,8 +133,8 @@ pip3 install --upgrade tiktoken

**3.语音识别**

+ 添加 `"speech_recognition": true` 将开启语音识别,默认使用openai的whisper模型识别为文字,同时以文字回复,该参数仅支持私聊 (注意由于语音消息无法匹配前缀,一旦开启将对所有语音自动回复);
+ 添加 `"group_speech_recognition": true` 将开启群组语音识别,默认使用openai的whisper模型识别为文字,同时以文字回复,参数仅支持群聊 (可以匹配group_chat_prefix和group_chat_keyword, 目前wechaty可支持群聊语音触发画图,itchat无法触发画图);
+ 添加 `"speech_recognition": true` 将开启语音识别,默认使用openai的whisper模型识别为文字,同时以文字回复,该参数仅支持私聊 (注意由于语音消息无法匹配前缀,一旦开启将对所有语音自动回复,支持语音触发画图);
+ 添加 `"group_speech_recognition": true` 将开启群组语音识别,默认使用openai的whisper模型识别为文字,同时以文字回复,参数仅支持群聊 (会匹配group_chat_prefix和group_chat_keyword, 支持语音触发画图);
+ 添加 `"voice_reply_voice": true` 将开启语音回复语音(同时作用于私聊和群聊),但是需要配置对应语音合成平台的key,由于itchat协议的限制,只能发送语音mp3文件,若使用wechaty则回复的是微信语音。

**4.其他配置**


+ 1
- 1
app.py Ver arquivo

@@ -24,4 +24,4 @@ def run():
logger.exception(e)

if __name__ == '__main__':
run()
run()

+ 19
- 9
channel/wechat/wechat_channel.py Ver arquivo

@@ -190,14 +190,14 @@ class WechatChannel(Channel):
thread_pool.submit(self.handle, context).add_done_callback(thread_pool_callback)

def handle_group_voice(self, msg):
if conf().get('group_speech_recognition') != True:
if conf().get('group_speech_recognition', False) != True:
return
logger.debug("[WX]receive voice for group msg: " + msg['FileName'])
group_name = msg['User'].get('NickName', None)
group_id = msg['User'].get('UserName', None)
create_time = msg['CreateTime'] # 消息时间
if conf().get('hot_reload') == True and int(create_time) < int(time.time()) - 60: #跳过1分钟前的历史消息
logger.debug("[WX]history group message skipped")
logger.debug("[WX]history group voice skipped")
return
# 验证群名
if not group_name:
@@ -260,19 +260,29 @@ class WechatChannel(Channel):
file_name = TmpDir().path() + context.content
msg.download(file_name)
reply = super().build_voice_to_text(file_name)
if reply.type != ReplyType.ERROR and reply.type != ReplyType.INFO:
context.content = reply.content # 语音转文字后,将文字内容作为新的context
if reply.type == ReplyType.TEXT:
content = reply.content # 语音转文字后,将文字内容作为新的context
# 如果是群消息,判断是否触发关键字
if context['isgroup']:
match_prefix = check_prefix(context.content, conf().get('group_chat_prefix')) or check_contain(context.content, conf().get('group_chat_keyword'))
if match_prefix != True:
match_prefix = check_prefix(content, conf().get('group_chat_prefix'))
match_contain = check_contain(content, conf().get('group_chat_keyword'))
logger.debug('[WX] group chat prefix match: {}'.format(match_prefix))
if match_prefix is None and match_contain is None:
return
context.type = ContextType.TEXT
else:
if match_prefix:
content = content.replace(match_prefix, '', 1).strip()
img_match_prefix = check_prefix(content, conf().get('image_create_prefix'))
if img_match_prefix:
content = content.replace(img_match_prefix, '', 1).strip()
context.type = ContextType.IMAGE_CREATE
else:
context.type = ContextType.TEXT
context.content = content
reply = super().build_reply_content(context.content, context)
if reply.type == ReplyType.TEXT:
if conf().get('voice_reply_voice'):
if context['isgroup']:
reply.content = '@' + context['msg']['ActualNickName'] + ' ' + reply.content
reply = super().build_text_to_voice(reply.content)
else:
logger.error('[WX] unknown context type: {}'.format(context.type))


Carregando…
Cancelar
Salvar