Bläddra i källkod

refactor(wechatmp): use wechatpy to handle wechatmp messages

feat(wechatmp): add support for image and voice replies
master
lanvent 1 år sedan
förälder
incheckning
89dd8a1db6
7 ändrade filer med 270 tillägg och 340 borttagningar
  1. +26
    -31
      channel/wechatmp/active_reply.py
  2. +10
    -18
      channel/wechatmp/common.py
  3. +50
    -46
      channel/wechatmp/passive_reply.py
  4. +89
    -36
      channel/wechatmp/wechatmp_channel.py
  5. +26
    -171
      channel/wechatmp/wechatmp_client.py
  6. +53
    -38
      channel/wechatmp/wechatmp_message.py
  7. +16
    -0
      voice/audio_convert.py

+ 26
- 31
channel/wechatmp/active_reply.py Visa fil

@@ -2,15 +2,14 @@ import time

import web

from channel.wechatmp.wechatmp_message import parse_xml
from channel.wechatmp.passive_reply_message import TextMsg
from channel.wechatmp.wechatmp_message import WeChatMPMessage
from bridge.context import *
from bridge.reply import ReplyType
from channel.wechatmp.common import *
from channel.wechatmp.wechatmp_channel import WechatMPChannel
from wechatpy import parse_message
from common.log import logger
from config import conf
from wechatpy.replies import create_reply

# This class is instantiated once per query
class Query:
@@ -21,16 +20,22 @@ class Query:
# Make sure to return the instance that first created, @singleton will do that.
channel = WechatMPChannel()
try:
webData = web.data()
message = web.data() # todo crypto
# logger.debug("[wechatmp] Receive request:\n" + webData.decode("utf-8"))
wechatmp_msg = parse_xml(webData)
if (
wechatmp_msg.msg_type == "text"
or wechatmp_msg.msg_type == "voice"
# or wechatmp_msg.msg_type == "image"
):
msg = parse_message(message)
if msg.type == "event":
logger.info(
"[wechatmp] Event {} from {}".format(
msg.event, msg.source
)
)
reply_text = subscribe_msg()
replyPost = create_reply(reply_text, msg)
return replyPost.render()
wechatmp_msg = WeChatMPMessage(msg, client=channel.client)
if wechatmp_msg.ctype in [ContextType.TEXT, ContextType.IMAGE, ContextType.VOICE]:
from_user = wechatmp_msg.from_user_id
message = wechatmp_msg.content
content = wechatmp_msg.content
message_id = wechatmp_msg.msg_id

logger.info(
@@ -39,16 +44,18 @@ class Query:
web.ctx.env.get("REMOTE_PORT"),
from_user,
message_id,
message,
content,
)
)
if (wechatmp_msg.msg_type == "voice" and conf().get("voice_reply_voice") == True):
rtype = ReplyType.VOICE
if (msg.type == "voice" and wechatmp_msg.ctype == ContextType.TEXT):
origin_ctype = ContextType.VOICE
context = channel._compose_context(
wechatmp_msg.ctype, content, isgroup=False, origin_ctype=origin_ctype, msg=wechatmp_msg
)
else:
rtype = None
context = channel._compose_context(
ContextType.TEXT, message, isgroup=False, desire_rtype=rtype, msg=wechatmp_msg
)
context = channel._compose_context(
wechatmp_msg.ctype, content, isgroup=False, msg=wechatmp_msg
)
if context:
# set private openai_api_key
# if from_user is not changed in itchat, this can be placed at chat_channel
@@ -59,18 +66,6 @@ class Query:
channel.produce(context)
# The reply will be sent by channel.send() in another thread
return "success"

elif wechatmp_msg.msg_type == "event":
logger.info(
"[wechatmp] Event {} from {}".format(
wechatmp_msg.Event, wechatmp_msg.from_user_id
)
)
content = subscribe_msg()
replyMsg = TextMsg(
wechatmp_msg.from_user_id, wechatmp_msg.to_user_id, content
)
return replyMsg.send()
else:
logger.info("暂且不处理")
return "success"


+ 10
- 18
channel/wechatmp/common.py Visa fil

@@ -1,8 +1,10 @@
import hashlib
import textwrap
import web

from config import conf

from wechatpy.utils import check_signature
from wechatpy.crypto import WeChatCrypto
from wechatpy.exceptions import InvalidSignatureException
MAX_UTF8_LEN = 2048


@@ -12,27 +14,17 @@ class WeChatAPIException(Exception):

def verify_server(data):
try:
if len(data) == 0:
return "None"
signature = data.signature
timestamp = data.timestamp
nonce = data.nonce
echostr = data.echostr
token = conf().get("wechatmp_token") # 请按照公众平台官网\基本配置中信息填写

data_list = [token, timestamp, nonce]
data_list.sort()
sha1 = hashlib.sha1()
# map(sha1.update, data_list) #python2
sha1.update("".join(data_list).encode("utf-8"))
hashcode = sha1.hexdigest()
print("handle/GET func: hashcode, signature: ", hashcode, signature)
if hashcode == signature:
return echostr
else:
return ""
except Exception as Argument:
return Argument
check_signature(token, signature, timestamp, nonce)
return echostr
except InvalidSignatureException:
raise web.Forbidden("Invalid signature")
except Exception as e:
raise web.Forbidden(str(e))


def subscribe_msg():


+ 50
- 46
channel/wechatmp/passive_reply.py Visa fil

@@ -3,15 +3,14 @@ import asyncio

import web

from channel.wechatmp.wechatmp_message import parse_xml
from channel.wechatmp.passive_reply_message import TextMsg, VoiceMsg, ImageMsg
from channel.wechatmp.wechatmp_message import WeChatMPMessage
from bridge.context import *
from bridge.reply import ReplyType
from channel.wechatmp.common import *
from channel.wechatmp.wechatmp_channel import WechatMPChannel
from common.log import logger
from config import conf

from wechatpy import parse_message
from wechatpy.replies import create_reply, ImageReply, VoiceReply

# This class is instantiated once per query
class Query:
@@ -22,36 +21,49 @@ class Query:
try:
request_time = time.time()
channel = WechatMPChannel()
webData = web.data()
logger.debug("[wechatmp] Receive post data:\n" + webData.decode("utf-8"))
wechatmp_msg = parse_xml(webData)
if wechatmp_msg.msg_type == "text" or wechatmp_msg.msg_type == "voice":
message = web.data() # todo crypto
msg = parse_message(message)
logger.debug("[wechatmp] Receive post data:\n" + message.decode("utf-8"))
if msg.type == "event":
logger.info(
"[wechatmp] Event {} from {}".format(
msg.event, msg.source
)
)
reply_text = subscribe_msg()
replyPost = create_reply(reply_text, msg)
return replyPost.render()
wechatmp_msg = WeChatMPMessage(msg, client=channel.client)
if wechatmp_msg.ctype in [ContextType.TEXT, ContextType.IMAGE, ContextType.VOICE]:
from_user = wechatmp_msg.from_user_id
to_user = wechatmp_msg.to_user_id
message = wechatmp_msg.content
content = wechatmp_msg.content
message_id = wechatmp_msg.msg_id

supported = True
if "【收到不支持的消息类型,暂无法显示】" in message:
if "【收到不支持的消息类型,暂无法显示】" in content:
supported = False # not supported, used to refresh

# New request
if (
from_user not in channel.cache_dict
and from_user not in channel.running
or message.startswith("#")
or content.startswith("#")
and message_id not in channel.request_cnt # insert the godcmd
):
# The first query begin
if (wechatmp_msg.msg_type == "voice" and conf().get("voice_reply_voice") == True):
rtype = ReplyType.VOICE
if (msg.type == "voice" and wechatmp_msg.ctype == ContextType.TEXT):
origin_ctype = ContextType.VOICE
context = channel._compose_context(
wechatmp_msg.ctype, content, isgroup=False, origin_ctype=origin_ctype, msg=wechatmp_msg
)
else:
rtype = None
context = channel._compose_context(
ContextType.TEXT, message, isgroup=False, desire_rtype=rtype, msg=wechatmp_msg
)
context = channel._compose_context(
wechatmp_msg.ctype, content, isgroup=False, msg=wechatmp_msg
)
logger.debug(
"[wechatmp] context: {} {}".format(context, wechatmp_msg)
"[wechatmp] context: {} {} {}".format(context, wechatmp_msg, supported)
)

if supported and context:
@@ -65,26 +77,27 @@ class Query:
trigger_prefix = conf().get("single_chat_prefix", [""])
if trigger_prefix or not supported:
if trigger_prefix:
content = textwrap.dedent(
reply_text = textwrap.dedent(
f"""\
请输入'{trigger_prefix}'接你想说的话跟我说话。
例如:
{trigger_prefix}你好,很高兴见到你。"""
)
else:
content = textwrap.dedent(
reply_text = textwrap.dedent(
"""\
你好,很高兴见到你。
请跟我说话吧。"""
)
else:
logger.error(f"[wechatmp] unknown error")
content = textwrap.dedent(
reply_text = textwrap.dedent(
"""\
未知错误,请稍后再试"""
)
replyPost = TextMsg(wechatmp_msg.from_user_id, wechatmp_msg.to_user_id, content).send()
return replyPost
replyPost = create_reply(reply_text, msg)
return replyPost.render()


# Wechat official server will request 3 times (5 seconds each), with the same message_id.
@@ -96,7 +109,7 @@ class Query:
request_cnt,
from_user,
message_id,
message,
content,
web.ctx.env.get("REMOTE_ADDR"),
web.ctx.env.get("REMOTE_PORT"),
)
@@ -121,8 +134,8 @@ class Query:
else: # request_cnt == 3:
# return timeout message
reply_text = "【正在思考中,回复任意文字尝试获取回复】"
replyPost = TextMsg(from_user, to_user, reply_text).send()
return replyPost
replyPost = create_reply(reply_text, msg)
return replyPost.render()

# reply is ready
channel.request_cnt.pop(message_id)
@@ -158,36 +171,27 @@ class Query:
request_cnt,
from_user,
message_id,
message,
content,
reply_text,
)
)
replyPost = TextMsg(from_user, to_user, reply_text).send()
return replyPost
replyPost = create_reply(reply_text, msg)
return replyPost.render()


elif (reply_type == "voice"):
media_id = content
asyncio.run_coroutine_threadsafe(channel.delete_media(media_id), channel.delete_media_loop)
replyPost = VoiceMsg(from_user, to_user, media_id).send()
return replyPost
replyPost = VoiceReply(message=msg)
replyPost.media_id = media_id
return replyPost.render()

elif (reply_type == "image"):
media_id = content
asyncio.run_coroutine_threadsafe(channel.delete_media(media_id), channel.delete_media_loop)
replyPost = ImageMsg(from_user, to_user, media_id).send()
return replyPost

elif wechatmp_msg.msg_type == "event":
logger.info(
"[wechatmp] Event {} from {}".format(
wechatmp_msg.content, wechatmp_msg.from_user_id
)
)
content = subscribe_msg()
replyMsg = TextMsg(
wechatmp_msg.from_user_id, wechatmp_msg.to_user_id, content
)
return replyMsg.send()
replyPost = ImageReply(message=msg)
replyPost.media_id = media_id
return replyPost.render()
else:
logger.info("暂且不处理")
return "success"


+ 89
- 36
channel/wechatmp/wechatmp_channel.py Visa fil

@@ -13,10 +13,13 @@ from common.log import logger
from common.singleton import singleton
from config import conf

from wechatpy.exceptions import WeChatClientException
import asyncio
from threading import Thread

import web

from voice.audio_convert import any_to_mp3
# If using SSL, uncomment the following lines, and modify the certificate path.
# from cheroot.server import HTTPServer
# from cheroot.ssl.builtin import BuiltinSSLAdapter
@@ -31,7 +34,9 @@ class WechatMPChannel(ChatChannel):
super().__init__()
self.passive_reply = passive_reply
self.NOT_SUPPORT_REPLYTYPE = []
self.client = WechatMPClient()
appid = conf().get("wechatmp_app_id")
secret = conf().get("wechatmp_app_secret")
self.client = WechatMPClient(appid, secret)
if self.passive_reply:
# Cache the reply to the user's first message
self.cache_dict = dict()
@@ -62,7 +67,7 @@ class WechatMPChannel(ChatChannel):
async def delete_media(self, media_id):
logger.debug("[wechatmp] permanent media {} will be deleted in 10s".format(media_id))
await asyncio.sleep(10)
self.client.delete_permanent_media(media_id)
self.client.material.delete(media_id)
logger.info("[wechatmp] permanent media {} has been deleted".format(media_id))

def send(self, reply: Reply, context: Context):
@@ -73,17 +78,17 @@ class WechatMPChannel(ChatChannel):
logger.info("[wechatmp] reply to {} cached:\n{}".format(receiver, reply_text))
self.cache_dict[receiver] = ("text", reply_text)
elif reply.type == ReplyType.VOICE:
voice_file_path = reply.content
logger.info("[wechatmp] voice file path {}".format(voice_file_path))
with open(voice_file_path, 'rb') as f:
filename = receiver + "-" + context["msg"].msg_id + ".mp3"
media_id = self.client.upload_permanent_media("voice", (filename, f, "audio/mpeg"))
# 根据文件大小估计一个微信自动审核的时间,审核结束前返回将会导致语音无法播放,这个估计有待验证
f_size = os.fstat(f.fileno()).st_size
print(f_size)
time.sleep(1.0 + 2 * f_size / 1024 / 1024)
logger.info("[wechatmp] voice reply to {} uploaded: {}".format(receiver, media_id))
self.cache_dict[receiver] = ("voice", media_id)
try:
file_path = reply.content
response = self.client.material.add("voice", open(file_path, "rb"))
logger.debug("[wechatmp] upload voice response: {}".format(response))
except WeChatClientException as e:
logger.error("[wechatmp] upload voice failed: {}".format(e))
return
time.sleep(3) # todo check media_id
media_id = response["media_id"]
logger.info("[wechatmp] voice reply to {} uploaded: {}".format(receiver, media_id))
self.cache_dict[receiver] = ("voice", media_id)
elif reply.type == ReplyType.IMAGE_URL: # 从网络下载图片
img_url = reply.content
pic_res = requests.get(img_url, stream=True)
@@ -93,33 +98,64 @@ class WechatMPChannel(ChatChannel):
image_storage.write(block)
image_storage.seek(0)
image_type = imghdr.what(image_storage)
filename = receiver + "-" + context["msg"].msg_id + "." + image_type
filename = receiver + "-" + str(context['msg'].msg_id) + "." + image_type
content_type = "image/" + image_type
media_id = self.client.upload_permanent_media("image", (filename, image_storage, content_type))
logger.info("[wechatmp] image reply to {} uploaded: {}".format(receiver, media_id))
try:
response = self.client.material.add("image", (filename, image_storage, content_type))
logger.debug("[wechatmp] upload image response: {}".format(response))
except WeChatClientException as e:
logger.error("[wechatmp] upload image failed: {}".format(e))
return
media_id = response["media_id"]
logger.info(
"[wechatmp] image reply url={}, receiver={}".format(img_url, receiver)
)
self.cache_dict[receiver] = ("image", media_id)
elif reply.type == ReplyType.IMAGE: # 从文件读取图片
image_storage = reply.content
image_storage.seek(0)
image_type = imghdr.what(image_storage)
filename = receiver + "-" + context["msg"].msg_id + "." + image_type
filename = receiver + "-" + str(context['msg'].msg_id) + "." + image_type
content_type = "image/" + image_type
media_id = self.client.upload_permanent_media("image", (filename, image_storage, content_type))
logger.info("[wechatmp] image reply to {} uploaded: {}".format(receiver, media_id))
try:
response = self.client.material.add("image", (filename, image_storage, content_type))
logger.debug("[wechatmp] upload image response: {}".format(response))
except WeChatClientException as e:
logger.error("[wechatmp] upload image failed: {}".format(e))
return
media_id = response["media_id"]
logger.info(
"[wechatmp] image reply url={}, receiver={}".format(img_url, receiver)
)
self.cache_dict[receiver] = ("image", media_id)
else:
if reply.type == ReplyType.TEXT or reply.type == ReplyType.INFO or reply.type == ReplyType.ERROR:
reply_text = reply.content
self.client.send_text(receiver, reply_text)
self.client.message.send_text(receiver, reply_text)
logger.info("[wechatmp] Do send to {}: {}".format(receiver, reply_text))
elif reply.type == ReplyType.VOICE:
voice_file_path = reply.content
logger.info("[wechatmp] voice file path {}".format(voice_file_path))
with open(voice_file_path, 'rb') as f:
filename = receiver + "-" + context["msg"].msg_id + ".mp3"
media_id = self.client.upload_media("voice", (filename, f, "audio/mpeg"))
self.client.send_voice(receiver, media_id)
logger.info("[wechatmp] Do send voice to {}".format(receiver))
try:
file_path = reply.content
file_name = os.path.basename(file_path)
file_type = os.path.splitext(file_name)[1]
if file_type == ".mp3":
file_type = "audio/mpeg"
elif file_type == ".amr":
file_type = "audio/amr"
else:
mp3_file = os.path.splitext(file_path)[0] + ".mp3"
any_to_mp3(file_path, mp3_file)
file_path = mp3_file
file_name = os.path.basename(file_path)
file_type = "audio/mpeg"
logger.info("[wechatmp] file_name: {}, file_type: {} ".format(file_name, file_type))
response = self.client.media.upload("voice", (file_name, open(file_path, "rb"), file_type))
logger.debug("[wechatmp] upload voice response: {}".format(response))
except WeChatClientException as e:
logger.error("[wechatmp] upload voice failed: {}".format(e))
return
self.client.message.send_voice(receiver, response["media_id"])
logger.info("[wechatmp] Do send voice to {}".format(receiver))
elif reply.type == ReplyType.IMAGE_URL: # 从网络下载图片
img_url = reply.content
pic_res = requests.get(img_url, stream=True)
@@ -129,21 +165,38 @@ class WechatMPChannel(ChatChannel):
image_storage.write(block)
image_storage.seek(0)
image_type = imghdr.what(image_storage)
filename = receiver + "-" + context["msg"].msg_id + "." + image_type
filename = receiver + "-" + str(context['msg'].msg_id) + "." + image_type
content_type = "image/" + image_type
# content_type = pic_res.headers.get('content-type')
media_id = self.client.upload_media("image", (filename, image_storage, content_type))
self.client.send_image(receiver, media_id)
logger.info("[wechatmp] sendImage url={}, receiver={}".format(img_url, receiver))
try:
response = self.client.media.upload("image", (filename, image_storage, content_type))
logger.debug("[wechatmp] upload image response: {}".format(response))
except WeChatClientException as e:
logger.error("[wechatmp] upload image failed: {}".format(e))
return
self.client.message.send_image(
receiver, response["media_id"]
)
logger.info(
"[wechatmp] sendImage url={}, receiver={}".format(img_url, receiver)
)
elif reply.type == ReplyType.IMAGE: # 从文件读取图片
image_storage = reply.content
image_storage.seek(0)
image_type = imghdr.what(image_storage)
filename = receiver + "-" + context["msg"].msg_id + "." + image_type
filename = receiver + "-" + str(context['msg'].msg_id) + "." + image_type
content_type = "image/" + image_type
media_id = self.client.upload_media("image", (filename, image_storage, content_type))
self.client.send_image(receiver, media_id)
logger.info("[wechatmp] sendImage, receiver={}".format(receiver))
try:
response = self.client.media.upload("image", (filename, image_storage, content_type))
logger.debug("[wechatmp] upload image response: {}".format(response))
except WeChatClientException as e:
logger.error("[wechatmp] upload image failed: {}".format(e))
return
self.client.message.send_image(
receiver, response["media_id"]
)
logger.info(
"[wechatmp] sendImage, receiver={}".format(receiver)
)
return

def _success_callback(self, session_id, context, **kwargs): # 线程异常结束时的回调函数


+ 26
- 171
channel/wechatmp/wechatmp_client.py Visa fil

@@ -3,178 +3,33 @@ import json
import requests
import threading
from channel.wechatmp.common import *
from wechatpy.client import WeChatClient
from common.log import logger
from config import conf


class WechatMPClient:
def __init__(self):
self.app_id = conf().get("wechatmp_app_id")
self.app_secret = conf().get("wechatmp_app_secret")
self.access_token = None
self.access_token_expires_time = 0
self.access_token_lock = threading.Lock()
self.get_access_token()


def wechatmp_request(self, method, url, **kwargs):
r = requests.request(method=method, url=url, **kwargs)
r.raise_for_status()
r.encoding = "utf-8"
ret = r.json()
if "errcode" in ret and ret["errcode"] != 0:
if ret["errcode"] == 45009:
self.clear_quota_v2()
raise WeChatAPIException("{}".format(ret))
return ret

def get_access_token(self):
# return the access_token
if self.access_token:
if self.access_token_expires_time - time.time() > 60:
return self.access_token

# Get new access_token
# Do not request access_token in parallel! Only the last obtained is valid.
if self.access_token_lock.acquire(blocking=False):
# Wait for other threads that have previously obtained access_token to complete the request
# This happens every 2 hours, so it doesn't affect the experience very much
time.sleep(1)
self.access_token = None
url = "https://api.weixin.qq.com/cgi-bin/token"
params = {
"grant_type": "client_credential",
"appid": self.app_id,
"secret": self.app_secret,
}
ret = self.wechatmp_request(method="get", url=url, params=params)
self.access_token = ret["access_token"]
self.access_token_expires_time = int(time.time()) + ret["expires_in"]
logger.info("[wechatmp] access_token: {}".format(self.access_token))
self.access_token_lock.release()
else:
# Wait for token update
while self.access_token_lock.locked():
time.sleep(0.1)
return self.access_token


def send_text(self, receiver, reply_text):
url = "https://api.weixin.qq.com/cgi-bin/message/custom/send"
params = {"access_token": self.get_access_token()}
json_data = {
"touser": receiver,
"msgtype": "text",
"text": {"content": reply_text},
}
self.wechatmp_request(
method="post",
url=url,
params=params,
data=json.dumps(json_data, ensure_ascii=False).encode("utf8"),
)


def send_voice(self, receiver, media_id):
url="https://api.weixin.qq.com/cgi-bin/message/custom/send"
params = {"access_token": self.get_access_token()}
json_data = {
"touser": receiver,
"msgtype": "voice",
"voice": {
"media_id": media_id
}
}
self.wechatmp_request(
method="post",
url=url,
params=params,
data=json.dumps(json_data, ensure_ascii=False).encode("utf8"),
)

def send_image(self, receiver, media_id):
url="https://api.weixin.qq.com/cgi-bin/message/custom/send"
params = {"access_token": self.get_access_token()}
json_data = {
"touser": receiver,
"msgtype": "image",
"image": {
"media_id": media_id
}
}
self.wechatmp_request(
method="post",
url=url,
params=params,
data=json.dumps(json_data, ensure_ascii=False).encode("utf8"),
)


def upload_media(self, media_type, media_file):
url="https://api.weixin.qq.com/cgi-bin/media/upload"
params={
"access_token": self.get_access_token(),
"type": media_type
}
files={"media": media_file}
ret = self.wechatmp_request(
method="post",
url=url,
params=params,
files=files
)
logger.debug("[wechatmp] media {} uploaded".format(media_file))
return ret["media_id"]


def upload_permanent_media(self, media_type, media_file):
url="https://api.weixin.qq.com/cgi-bin/material/add_material"
params={
"access_token": self.get_access_token(),
"type": media_type
}
files={"media": media_file}
ret = self.wechatmp_request(
method="post",
url=url,
params=params,
files=files
)
logger.debug("[wechatmp] permanent media {} uploaded".format(media_file))
return ret["media_id"]


def delete_permanent_media(self, media_id):
url="https://api.weixin.qq.com/cgi-bin/material/del_material"
params={
"access_token": self.get_access_token()
}
self.wechatmp_request(
method="post",
url=url,
params=params,
data=json.dumps({"media_id": media_id}, ensure_ascii=False).encode("utf8")
)
logger.debug("[wechatmp] permanent media {} deleted".format(media_id))

def clear_quota(self):
url="https://api.weixin.qq.com/cgi-bin/clear_quota"
params = {
"access_token": self.get_access_token()
}
self.wechatmp_request(
method="post",
url=url,
params=params,
data={"appid": self.app_id}
)
logger.debug("[wechatmp] API quata has been cleard")

def clear_quota_v2(self):
url="https://api.weixin.qq.com/cgi-bin/clear_quota/v2"
self.wechatmp_request(
method="post",
url=url,
data={"appid": self.app_id, "appsecret": self.app_secret}
)
logger.debug("[wechatmp] API quata has been cleard")
class WechatMPClient(WeChatClient):
def __init__(self, appid, secret, access_token=None,
session=None, timeout=None, auto_retry=True):
super(WechatMPClient, self).__init__(
appid, secret, access_token, session, timeout, auto_retry
)
self.fetch_access_token_lock = threading.Lock()

def fetch_access_token(self):
"""
获取 access token
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=通用接口文档

:return: 返回的 JSON 数据包
"""
with self.fetch_access_token_lock:
access_token = self.session.get(self.access_token_key)
if access_token:
if not self.expires_at:
return access_token
timestamp = time.time()
if self.expires_at - timestamp > 60:
return access_token
return super().fetch_access_token()

+ 53
- 38
channel/wechatmp/wechatmp_message.py Visa fil

@@ -1,50 +1,65 @@
# -*- coding: utf-8 -*-#
# filename: receive.py
import xml.etree.ElementTree as ET

from bridge.context import ContextType
from channel.chat_message import ChatMessage
from common.log import logger
from common.tmp_dir import TmpDir


def parse_xml(web_data):
if len(web_data) == 0:
return None
xmlData = ET.fromstring(web_data)
return WeChatMPMessage(xmlData)


class WeChatMPMessage(ChatMessage):
def __init__(self, xmlData):
super().__init__(xmlData)
self.to_user_id = xmlData.find("ToUserName").text
self.from_user_id = xmlData.find("FromUserName").text
self.create_time = xmlData.find("CreateTime").text
self.msg_type = xmlData.find("MsgType").text
try:
self.msg_id = xmlData.find("MsgId").text
except:
self.msg_id = self.from_user_id + self.create_time
def __init__(self, msg, client=None):
super().__init__(msg)
self.msg_id = msg.id
self.create_time = msg.time
self.is_group = False

# reply to other_user_id
self.other_user_id = self.from_user_id

if self.msg_type == "text":
if msg.type == "text":
self.ctype = ContextType.TEXT
self.content = xmlData.find("Content").text
elif self.msg_type == "voice":
self.ctype = ContextType.TEXT
self.content = xmlData.find("Recognition").text # 接收语音识别结果
# other voice_to_text method not implemented yet
if self.content == None:
self.content = "你好"
elif self.msg_type == "image":
# not implemented yet
self.pic_url = xmlData.find("PicUrl").text
self.media_id = xmlData.find("MediaId").text
elif self.msg_type == "event":
self.content = xmlData.find("Event").text
else: # video, shortvideo, location, link
# not implemented
pass
self.content = msg.content
elif msg.type == "voice":
if msg.recognition == None:
self.ctype = ContextType.VOICE
self.content = (
TmpDir().path() + msg.media_id + "." + msg.format
) # content直接存临时目录路径

def download_voice():
# 如果响应状态码是200,则将响应内容写入本地文件
response = client.media.download(msg.media_id)
if response.status_code == 200:
with open(self.content, "wb") as f:
f.write(response.content)
else:
logger.info(
f"[wechatmp] Failed to download voice file, {response.content}"
)

self._prepare_fn = download_voice
else:
self.ctype = ContextType.TEXT
self.content = msg.recognition
elif msg.type == "image":
self.ctype = ContextType.IMAGE
self.content = TmpDir().path() + msg.media_id + ".png" # content直接存临时目录路径
def download_image():
# 如果响应状态码是200,则将响应内容写入本地文件
response = client.media.download(msg.media_id)
if response.status_code == 200:
with open(self.content, "wb") as f:
f.write(response.content)
else:
logger.info(
f"[wechatmp] Failed to download image file, {response.content}"
)

self._prepare_fn = download_image
else:
raise NotImplementedError(
"Unsupported message type: Type:{} ".format(msg.type)
)

self.from_user_id = msg.source
self.to_user_id = msg.target
self.other_user_id = msg.source

+ 16
- 0
voice/audio_convert.py Visa fil

@@ -33,6 +33,22 @@ def get_pcm_from_wav(wav_path):
wav = wave.open(wav_path, "rb")
return wav.readframes(wav.getnframes())

def any_to_mp3(any_path, mp3_path):
"""
把任意格式转成mp3文件
"""
if any_path.endswith(".mp3"):
shutil.copy2(any_path, mp3_path)
return
if (
any_path.endswith(".sil")
or any_path.endswith(".silk")
or any_path.endswith(".slk")
):
sil_to_wav(any_path, any_path)
any_path = mp3_path
audio = AudioSegment.from_file(any_path)
audio.export(mp3_path, format="mp3")

def any_to_wav(any_path, wav_path):
"""


Laddar…
Avbryt
Spara