|
- from flask_restful import Resource, reqparse
- from flask import jsonify,request
- from bridge.context import ContextType
- import requests,json
- from wechat import gewe_chat
- from voice.ali.ali_voice import AliVoice
- from common import utils,redis_helper,memory,kafka_helper
- from common.log import logger
- import openai
- import xml.etree.ElementTree as ET
-
- import os
-
- from voice import audio_convert
-
-
- class MessagesResource(Resource):
- def __init__(self):
- self.parser = reqparse.RequestParser()
-
- def post(self):
- """处理微信回调的POST请求"""
- msg = request.get_json()
- if 'Data' not in msg:
- return jsonify({"message": "无效的数据格式"}), 400 # 如果数据格式不正确,返回错误信息
-
- msg_data = msg.get("Data")
- msg_type = msg_data.get("MsgType")
-
- # 根据不同的消息类型调用对应的处理函数
- if msg_type == 1: # 文字消息
- return self.handle_text_message(msg)
- elif msg_type == 3: # 图片消息
- return self.handle_image_message(msg)
- elif msg_type == 34: # 语音消息
- return self.handle_voice_message(msg)
- elif msg_type == 49: # 引用消息
- return self.handle_replied_message(msg)
-
- return jsonify({"message": "不支持的消息类型"}), 400 # 如果消息类型不支持,返回错误信息
-
- def handle_text_message(self, msg):
- """处理文字消息"""
- msg_data = msg["Data"]
- wxid = msg["Wxid"]
- hash_key = f"__AI_OPS_WX__:MESSAGES:{wxid}"
- app_id = msg["Appid"]
- callback_to_user = msg_data["FromUserName"]["string"]
- msg_content = msg_data["Content"]["string"]
-
- # 构造 GPT 请求的 prompt
- prompt = {"role": "user", "content": [{"type": "text", "text": msg_content}]}
- messages_to_send = self.get_messages_from_cache(hash_key, prompt)
-
- # 将消息发送到 Kafka 中
- self.send_to_kafka(wxid, callback_to_user, msg_content)
-
- # 调用 GPT API 获取回复
- res = self.fast_gpt_api(messages_to_send, wxid)
- reply_content = self.process_reply_content(res, wxid)
-
- # 将 GPT 回复的内容发送到微信
- gewe_chat.wxchat.post_text(msg["TokenId"], app_id, callback_to_user, reply_content)
- self.get_messages_from_cache(hash_key, {"role": "assistant", "content": reply_content})
-
- return jsonify({"message": "文字消息处理成功"})
-
- def handle_image_message(self, msg):
- """处理图片消息"""
- msg_data = msg["Data"]
- wxid = msg["Wxid"]
- hash_key = f"__AI_OPS_WX__:MESSAGES:{wxid}"
- app_id = msg["Appid"]
- callback_to_user = msg_data["FromUserName"]["string"]
- img_url = msg_data["Content"]["string"]
-
- # 图片下载并上传至 OSS
- wx_img_url = gewe_chat.wxchat.download_image_msg(msg["TokenId"], app_id, img_url)
- # img_url = self.upload_image_to_oss(wx_img_url)
- oss_access_key_id="LTAI5tRTG6pLhTpKACJYoPR5"
- oss_access_key_secret="E7dMzeeMxq4VQvLg7Tq7uKf3XWpYfN"
- oss_endpoint="http://oss-cn-shanghai.aliyuncs.com"
- oss_bucket_name="cow-agent"
- oss_prefix="cow"
-
- img_url=utils.upload_oss(oss_access_key_id, oss_access_key_secret, oss_endpoint, oss_bucket_name, wx_img_url, oss_prefix)
-
-
- # 发送确认消息
- gewe_chat.wxchat.post_text(msg["TokenId"], app_id, callback_to_user, '已经上传了图片,有什么可以为您服务')
-
- # 构造消息并发送到 Kafka
- wx_content_dialogue_message = [{"type": "image_url", "image_url": {"url": img_url}}]
- self.send_to_kafka(wxid, callback_to_user, wx_content_dialogue_message)
-
- return jsonify({"message": "图片消息处理成功"})
-
- def handle_voice_message(self, msg):
- """处理语音消息"""
- msg_data = msg["Data"]
- wxid = msg["Wxid"]
- hash_key = f"__AI_OPS_WX__:MESSAGES:{wxid}"
- app_id = msg["Appid"]
- callback_to_user = msg_data["FromUserName"]["string"]
- msg_id = msg_data["MsgId"]
- msg_content = msg_data["Content"]["string"]
-
- # 下载语音文件并转为文本
- file_url = gewe_chat.wxchat.download_audio_msg(msg["TokenId"], app_id, msg_id, msg_content)
- react_silk_path = utils.save_to_local_from_url(file_url)
- react_wav_path = os.path.splitext(react_silk_path)[0] + ".wav"
- audio_convert.any_to_wav(react_silk_path, react_wav_path)
- react_voice_text = AliVoice().voiceToText(react_wav_path)
-
- # 发送到 GPT 获取回复
- messages_to_send = self.get_messages_from_cache(hash_key, {"role": "user", "content": react_voice_text})
- ai_res = self.fast_gpt_api(messages_to_send, wxid)
- ai_res_content = ai_res["choices"][0]["message"]["content"]
-
- # 将 GPT 的文本转换成语音并上传到 OSS
- reply_text_voice = AliVoice().textToVoice(ai_res_content)
- reply_text_voice_path = os.path.join(os.getcwd(), reply_text_voice)
- reply_silk_path = os.path.splitext(reply_text_voice_path)[0] + ".silk"
- audio_convert.any_to_sil(reply_text_voice_path, reply_silk_path)
-
- file_url = self.upload_audio_to_oss(reply_silk_path)
-
- # 发送语音回复
- res = gewe_chat.wxchat.post_voice(msg["TokenId"], app_id, callback_to_user, file_url, int(reply_silk_during))
-
- # 删除临时文件
- self.delete_temp_files([react_silk_path, react_wav_path, reply_text_voice_path, reply_silk_path])
-
- return jsonify({"message": "语音消息处理成功"})
-
- def handle_replied_message(self, msg):
- """处理引用消息"""
- msg_data = msg["Data"]
- wxid = msg["Wxid"]
- hash_key = f"__AI_OPS_WX__:MESSAGES:{wxid}"
- app_id = msg["Appid"]
- callback_to_user = msg_data["FromUserName"]["string"]
- msg_content_xml = msg_data["Content"]["string"]
-
- # 解析 XML,获取引用消息
- root = ET.fromstring(msg_content_xml)
- type_value = root.find(".//appmsg/type").text
- if type_value == '57': # 如果是引用消息
- prompt = {"role": "user", "content": [{"type": "text", "text": msg_content_xml}]}
- messages_to_send = self.get_messages_from_cache(hash_key, prompt)
-
- # 发送到 Kafka
- self.send_to_kafka(wxid, callback_to_user, msg_content_xml)
-
- # 获取 GPT 回复并发送
- res = self.fast_gpt_api(messages_to_send, wxid)
- reply_content = res["choices"][0]["message"]["content"]
- gewe_chat.wxchat.post_text(msg["TokenId"], app_id, callback_to_user, reply_content)
-
- return jsonify({"message": "引用消息处理成功"})
-
- def get_messages_from_cache(self, hash_key, object: object) -> object:
- """从缓存中获取消息并更新缓存"""
- messages = redis_helper.redis_helper.get_hash(hash_key)
- if not messages:
- messages = [{"role": "system", "content": ""}]
- messages.append(object)
- redis_helper.redis_helper.set_hash(hash_key, {"data": json.dumps(messages, ensure_ascii=False)}, 3600)
- else:
- messages_str = redis_helper.redis_helper.get_hash_field(hash_key, "data")
- messages = json.loads(messages_str) if messages_str else []
- last_message = messages[-1]
- content = last_message.get("content", [])
- if isinstance(content, list) and content:
- last_content_type = content[-1].get("type")
- if last_content_type == 'image_url':
- content.append(object['content'][0])
- messages[-1]['content'] = content
- else:
- messages.append(object)
- else:
- messages.append(object)
-
- redis_helper.redis_helper.set_hash(hash_key, {"data": json.dumps(messages, ensure_ascii=False)}, 3600)
- return messages
-
- def send_to_kafka(self, wxid, callback_to_user, msg_content):
- """将消息发送到 Kafka"""
- input_wx_content_dialogue_message = [{"type": "text", "text": msg_content}]
- input_message = utils.dialogue_message(callback_to_user, wxid, input_wx_content_dialogue_message)
- kafka_helper.kafka_client.produce_message(input_message)
- logger.info("发送对话 %s", input_message)
-
- def fast_gpt_api(self, messages: list, session_id: str):
- """调用 GPT API 获取回复"""
- api_key = "sk-tdi7u0zuLsR0JpPMGBeFZxymOpL0zoFVafX8EEEvEakIDAGQ22NyQ6w"
- api_url = "http://106.15.182.218:3000/api/v1/chat/completions"
- headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
- data = {"model": "", "messages": messages, "chatId": session_id, "detail": True}
-
- try:
- response = requests.post(url=api_url, headers=headers, data=json.dumps(data), timeout=600)
- response.raise_for_status()
- return response.json()
- except requests.exceptions.RequestException as e:
- logger.error(f"GPT API 请求失败: {e}")
- return {"error": "API 请求失败"}
-
- def process_reply_content(self, res, wxid):
- """处理 GPT 回复内容"""
- reply_content = res["choices"][0]["message"]["content"]
- if isinstance(reply_content, list) and any(item.get("type") == "interactive" for item in reply_content):
- return self.process_interactive_reply(reply_content, wxid)
- elif isinstance(reply_content, list) and any(item.get("type") == "text" for item in reply_content):
- return self.process_text_reply(reply_content, wxid)
- else:
- return reply_content
-
- def process_interactive_reply(self, reply_content, wxid):
- """处理交互式回复"""
- description = ''
- user_select_options = []
- for item in reply_content:
- if item["type"] == "interactive" and item["interactive"]["type"] == "userSelect":
- params = item["interactive"]["params"]
- description = params.get("description")
- user_select_options = params.get("userSelectOptions", [])
- values_string = "\n".join(option["value"] for option in user_select_options)
-
- if description:
- memory.USER_INTERACTIVE_CACHE[wxid] = {"interactive": True}
- return f"{description}\n------------------------------\n{values_string}"
- return reply_content
-
- def process_text_reply(self, reply_content, wxid):
- """处理文本回复"""
- memory.USER_INTERACTIVE_CACHE[wxid] = {"interactive": False}
- text = next((item["text"]["content"] for item in reply_content if item["type"] == "text"), '')
- if not text:
- messages_to_send = self.get_messages_from_cache(f"__AI_OPS_WX__:MESSAGES:{wxid}", {"role": "user", "content": text})
- res = self.fast_gpt_api(messages_to_send, wxid)
- return res["choices"][0]["message"]["content"]
- return text
|