|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319 |
- from flask_restful import Resource, reqparse
- from flask import jsonify,request
- from bridge.context import ContextType
- import requests,json
- from wechat import gewe_chat
- from voice.ali.ali_voice import AliVoice
- from common import utils,redis_helper,memory,kafka_helper
- from common.log import logger
- import openai
- import xml.etree.ElementTree as ET
-
-
- import os
-
- from voice import audio_convert
-
- class MessagesBKResource(Resource):
- def __init__(self):
- self.parser = reqparse.RequestParser()
-
- def post(self):
- msg = request.get_json()
- logger.debug(f"Received message: {msg}")
-
- if 'Data' in msg:
- msg_data=msg.get("Data")
- msg_type=msg_data.get("MsgType")
- if msg_type == 1:#ContextType.TEXT: # 文字
- msg_content=msg_data["Content"]["string"]
- #print(msg_content)
- logger.info(msg_content)
-
- app_id=msg["Appid"]
- wxid=msg["Wxid"]
- from_wxid=msg_data["FromUserName"]["string"]
- to_wxid=msg_data["ToUserName"]["string"]
-
- token_id="f828cb3c-1039-489f-b9ae-7494d1778a15"
- hash_key = f"__AI_OPS_WX__:MESSAGES:{wxid}"
-
- if wxid == from_wxid: #主动发送消息
- logger.info("Active message sending detected")
- gewe_chat.wxchat.save_contacts_brief_to_cache(token_id,app_id,wxid,[to_wxid])
- callback_to_user=msg_data["ToUserName"]["string"]
-
- input_wx_content_dialogue_message=[{"type": "text", "text": msg_content}]
- input_message=utils.dialogue_message(from_wxid,to_wxid,input_wx_content_dialogue_message)
- kafka_helper.kafka_client.produce_message(input_message)
- logger.info("发送对话 %s",input_message)
- else:
- callback_to_user=msg_data["FromUserName"]["string"]
-
- prompt={"role": "user", "content": [{
- "type": "text",
- "text": msg_content
- }]}
- messages_to_send=get_messages_from_cache(hash_key, prompt)
- # 收到的对话
- input_wx_content_dialogue_message=[{"type": "text", "text": msg_content}]
- input_message=utils.dialogue_message(callback_to_user,wxid,input_wx_content_dialogue_message)
- kafka_helper.kafka_client.produce_message(input_message)
- logger.info("发送对话 %s",input_message)
-
- cache_data = memory.USER_INTERACTIVE_CACHE.get(wxid)
- if cache_data and cache_data.get('interactive'):
- messages_to_send=[{"role": "user", "content": msg_content}]
-
- res=fast_gpt_api(messages_to_send,wxid)
- reply_content=res["choices"][0]["message"]["content"]
-
- description = ''
- userSelectOptions = []
-
- if isinstance(reply_content, list) and any(item.get("type") == "interactive" for item in reply_content):
- for item in reply_content:
- if item["type"] == "interactive" and item["interactive"]["type"] == "userSelect":
- params = item["interactive"]["params"]
- description = params.get("description")
- userSelectOptions = params.get("userSelectOptions", [])
- values_string = "\n".join(option["value"] for option in userSelectOptions)
-
- if description is not None:
- memory.USER_INTERACTIVE_CACHE[wxid] = {
- "interactive":True
- }
- reply_content=description + '------------------------------\n'+values_string
-
- elif isinstance(reply_content, list) and any(item.get("type") == "text" for item in reply_content):
- memory.USER_INTERACTIVE_CACHE[wxid] = {
- "interactive":False
- }
- text=''
- for item in reply_content:
- if item["type"] == "text":
- text=item["text"]["content"]
- if text=='':
- # 去除上次上一轮对话再次请求
- cache_messages_str=redis_helper.redis_helper.get_hash_field(hash_key,"data")
- cache_messages = json.loads(cache_messages_str) if cache_messages_str else []
-
- if len(cache_messages) >= 3:
- cache_messages = cache_messages[:-3]
-
- redis_helper.redis_helper.update_hash_field(hash_key,"data",json.dumps(cache_messages,ensure_ascii=False))
- messages_to_send=get_messages_from_cache(hash_key, prompt)
- res=fast_gpt_api(messages_to_send,wxid)
- reply_content=res["choices"][0]["message"]["content"]
- else:
- reply_content=text
- else:
- memory.USER_INTERACTIVE_CACHE[wxid] = {
- "interactive":False
- }
- reply_content=res["choices"][0]["message"]["content"]
-
- print(f'token_id {token_id}')
- print(f'app_id {app_id}')
- print(f'touser: {callback_to_user}')
- # print(f'towxuser:{towxuser}')
- print(reply_content)
- gewe_chat.wxchat.post_text(token_id,app_id,callback_to_user,reply_content)
- get_messages_from_cache(hash_key, {"role": "assistant", "content": reply_content})
- # 回复的对话
- input_wx_content_dialogue_message=[{"type": "text", "text": reply_content}]
- input_message=utils.dialogue_message(wxid,callback_to_user,input_wx_content_dialogue_message,True)
- kafka_helper.kafka_client.produce_message(input_message)
- logger.info("发送对话 %s",input_message)
-
- if msg_type == 3: #图片
-
- token_id="f828cb3c-1039-489f-b9ae-7494d1778a15"
- app_id=msg["Appid"]
- callback_to_user=msg_data["FromUserName"]["string"]
- msg_content=msg_data["Content"]["string"]
-
- print(f'token_id {token_id}')
- print(f'app_id {app_id}')
- print(f'touser: {callback_to_user}')
- # print(res_content)
- wxid=msg["Wxid"]
- hash_key = f"__AI_OPS_WX__:MESSAGES:{wxid}"
- wx_img_url=gewe_chat.wxchat.download_image_msg(token_id,app_id,msg_content)
-
- oss_access_key_id="LTAI5tRTG6pLhTpKACJYoPR5"
- oss_access_key_secret="E7dMzeeMxq4VQvLg7Tq7uKf3XWpYfN"
- oss_endpoint="http://oss-cn-shanghai.aliyuncs.com"
- oss_bucket_name="cow-agent"
- oss_prefix="cow"
-
- img_url=utils.upload_oss(oss_access_key_id, oss_access_key_secret, oss_endpoint, oss_bucket_name, wx_img_url, oss_prefix)
-
- prompt={
- "role": "user",
- "content": [{
- "type": "image_url",
- "image_url": {"url": img_url}
- }]
- }
-
- get_messages_from_cache(hash_key, prompt)
- gewe_chat.wxchat.post_text(token_id,app_id,callback_to_user,'已经上传了图片,有什么可以为您服务')
- logger.debug(f"Uploaded image URL: {img_url}")
-
- wx_content_dialogue_message=[{"type": "image_url", "image_url": {"url": img_url}}]
- input_message=utils.dialogue_message(wxid,callback_to_user,wx_content_dialogue_message)
- kafka_helper.kafka_client.produce_message(input_message)
- logger.info("发送对话 %s",input_message)
-
- if msg_type == 34: # 语音
- token_id="f828cb3c-1039-489f-b9ae-7494d1778a15"
- callback_to_user=msg_data["FromUserName"]["string"]
- app_id=msg["Appid"]
- msg_content=msg_data["Content"]["string"]
- msg_id=msg_data["MsgId"]
-
- wxid=msg["Wxid"]
- hash_key = f"__AI_OPS_WX__:MESSAGES:{wxid}"
-
- print(f'token_id {token_id}')
- print(f'app_id {app_id}')
- print(f'touser: {callback_to_user}')
- print(f'msg_id:{msg_id}')
-
-
- file_url=gewe_chat.wxchat.download_audio_msg(token_id,app_id,msg_id,msg_content)
- react_silk_path=utils.save_to_local_from_url(file_url)
- react_wav_path = os.path.splitext(react_silk_path)[0] + ".wav"
- audio_convert.any_to_wav(react_silk_path,react_wav_path)
- react_voice_text=AliVoice().voiceToText(react_wav_path)
-
- messages=get_messages_from_cache(hash_key, {"role": "user", "content": react_voice_text})
- ai_res=fast_gpt_api(messages,wxid)
- ai_res_content=ai_res["choices"][0]["message"]["content"]
- reply_text_voice=AliVoice().textToVoice(ai_res_content)
- reply_text_voice_path=os.path.join(os.getcwd(), reply_text_voice)
- reply_silk_path = os.path.splitext(reply_text_voice_path)[0] + ".silk"
- reply_silk_during=audio_convert.any_to_sil(reply_text_voice_path,reply_silk_path)
-
- # print(int(reply_silk_during))
- # print(reply_silk_path)
- oss_access_key_id="LTAI5tRTG6pLhTpKACJYoPR5"
- oss_access_key_secret="E7dMzeeMxq4VQvLg7Tq7uKf3XWpYfN"
- oss_endpoint="http://oss-cn-shanghai.aliyuncs.com"
- oss_bucket_name="cow-agent"
- oss_prefix="cow"
-
- file_path=reply_silk_path
- file_url = utils.upload_oss(oss_access_key_id, oss_access_key_secret, oss_endpoint, oss_bucket_name, file_path, oss_prefix)
- print(file_url)
- res=gewe_chat.wxchat.post_voice(token_id,app_id,callback_to_user,file_url,int(reply_silk_during))
- # 删除临时文件
- os.remove(react_silk_path)
- os.remove(react_wav_path)
- os.remove(reply_text_voice_path)
- os.remove(reply_silk_path)
-
- get_messages_from_cache(hash_key, {"role": "assistant", "content": ai_res})
-
- if msg_type == 49:
- msg_content_xml=msg_data["Content"]["string"]
- root = ET.fromstring(msg_content_xml)
- type_value = root.find(".//appmsg/type").text
- if type_value==57: # 引用消息
- '''
- # 判断此类消息的逻辑:$.Data.MsgType=49 并且 解析$.Data.Content.string中的xml msg.appmsg.type=57
- '''
- wxid=msg["Wxid"]
- hash_key = f"__AI_OPS_WX__:MESSAGES:{wxid}"
-
- app_id=msg["Appid"]
- callback_to_user=msg_data["FromUserName"]["string"]
- # towxuser=touser.get("")
-
- # token_id="ce50e2c376c843a9a281af3a1a0f4420"
- token_id="f828cb3c-1039-489f-b9ae-7494d1778a15"
-
- prompt={"role": "user", "content": [{
- "type": "text",
- "text": msg_content
- }]}
-
- # 收到的对话
- messages_to_send=get_messages_from_cache(hash_key, prompt)
- input_wx_content_dialogue_message=[{"type": "text", "text": msg_content}]
- input_message=utils.dialogue_message(callback_to_user,wxid,input_wx_content_dialogue_message)
- kafka_helper.kafka_client.produce_message(input_message)
- logger.info("发送对话 %s",input_message)
-
- # 回复的对话
- res=fast_gpt_api(messages_to_send,wxid)
- reply_content=res["choices"][0]["message"]["content"]
- input_wx_content_dialogue_message=[{"type": "text", "text": reply_content}]
- input_message=utils.dialogue_message(wxid,callback_to_user,input_wx_content_dialogue_message,True)
- kafka_helper.kafka_client.produce_message(input_message)
- logger.info("发送对话 %s",input_message)
- get_messages_from_cache(hash_key, {"role": "assistant", "content": reply_content})
- gewe_chat.wxchat.post_text(token_id,app_id,callback_to_user,reply_content)
-
- return jsonify({"message": "微信回调成功"})
-
-
- def get_messages_from_cache(hash_key,object:object)->object:
- messages=redis_helper.redis_helper.get_hash(hash_key)
- wxid=hash_key.split(':')[-1]
- if not messages:
- messages=[{"role": "system", "content": ""}]
- messages.append(object)
- redis_helper.redis_helper.set_hash(hash_key,{"data":json.dumps(messages,ensure_ascii=False)},3600)
- else:
-
- messages_str=redis_helper.redis_helper.get_hash_field(hash_key,"data")
- messages = json.loads(messages_str) if messages_str else []
- #判断是否含有图片
- last_message = messages[-1]
- # print(last_message)
- # print('~~~~~~~~~~')
- # 获取 content 并判断其是否为列表
- content = last_message.get("content", [])
- if isinstance(content, list) and content:
- last_content_type = content[-1].get("type")
- if last_content_type == 'image_url':
- content.append(object['content'][0])
- messages[-1]['content']=content
- else:
- messages.append(object)
- else:
- messages.append(object)
-
-
- # messages.append({"role": "user", "content": msg_content})
- #messages.append(object)
- # print(messages)
- redis_helper.redis_helper.set_hash(hash_key,{"data":json.dumps(messages,ensure_ascii=False)},3600)
- return messages
-
-
- def fast_gpt_api(messages:list,session_id:str):
- #api_key="sk-tdi7u0zuLsR0JpPMGBeFZxymOpL0zoFVafX8EEEvEakIDAGQ22NyQ6w"
- api_key="sk-uJDBdKmJVb2cmfldGOvlIY6Qx0AzqWMPD3lS1IzgQYzHNOXv9SKNI"
- api_url = "http://106.15.182.218:3000/api/v1/chat/completions"
- headers = {
-
- "Content-Type": "application/json",
- "Authorization": f"Bearer {api_key}"
- }
- data={
- "model": "",
- "messages":messages,
- "chatId": session_id,
- "detail": True
- }
- print(json.dumps(data,ensure_ascii=False))
- logger.info("[CHATGPT] 请求={}".format(json.dumps(data, ensure_ascii=False)))
- response = requests.post(url=api_url, headers=headers, data=json.dumps(data), timeout=600)
- response.raise_for_status()
- response_data = response.json()
- logger.info("[CHATGPT] 响应={}".format(json.dumps(response_data, separators=(',', ':'),ensure_ascii=False)))
- print(response_data)
- return response_data
|