@@ -1,6 +1,8 @@ | |||||
.DS_Store | .DS_Store | ||||
.idea | .idea | ||||
.vscode | .vscode | ||||
.venv | |||||
.vs | |||||
.wechaty/ | .wechaty/ | ||||
__pycache__/ | __pycache__/ | ||||
venv* | venv* | ||||
@@ -22,6 +24,8 @@ plugins/**/ | |||||
!plugins/tool | !plugins/tool | ||||
!plugins/banwords | !plugins/banwords | ||||
!plugins/banwords/**/ | !plugins/banwords/**/ | ||||
plugins/banwords/__pycache__ | |||||
plugins/banwords/lib/__pycache__ | |||||
!plugins/hello | !plugins/hello | ||||
!plugins/role | !plugins/role | ||||
!plugins/keyword | !plugins/keyword |
@@ -5,11 +5,12 @@ | |||||
最新版本支持的功能如下: | 最新版本支持的功能如下: | ||||
- [x] **多端部署:** 有多种部署方式可选择且功能完备,目前已支持个人微信,微信公众号和企业微信应用等部署方式 | - [x] **多端部署:** 有多种部署方式可选择且功能完备,目前已支持个人微信,微信公众号和企业微信应用等部署方式 | ||||
- [x] **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3,GPT-3.5,GPT-4模型 | |||||
- [x] **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3, GPT-3.5, GPT-4, 文心一言模型 | |||||
- [x] **语音识别:** 可识别语音消息,通过文字或语音回复,支持 azure, baidu, google, openai等多种语音模型 | - [x] **语音识别:** 可识别语音消息,通过文字或语音回复,支持 azure, baidu, google, openai等多种语音模型 | ||||
- [x] **图片生成:** 支持图片生成 和 图生图(如照片修复),可选择 Dell-E, stable diffusion, replicate模型 | |||||
- [x] **图片生成:** 支持图片生成 和 图生图(如照片修复),可选择 Dell-E, stable diffusion, replicate, midjourney模型 | |||||
- [x] **丰富插件:** 支持个性化插件扩展,已实现多角色切换、文字冒险、敏感词过滤、聊天记录总结等插件 | - [x] **丰富插件:** 支持个性化插件扩展,已实现多角色切换、文字冒险、敏感词过滤、聊天记录总结等插件 | ||||
- [X] **Tool工具:** 与操作系统和互联网交互,支持最新信息搜索、数学计算、天气和资讯查询、网页总结,基于 [chatgpt-tool-hub](https://github.com/goldfishh/chatgpt-tool-hub) 实现 | - [X] **Tool工具:** 与操作系统和互联网交互,支持最新信息搜索、数学计算、天气和资讯查询、网页总结,基于 [chatgpt-tool-hub](https://github.com/goldfishh/chatgpt-tool-hub) 实现 | ||||
- [x] **知识库:** 通过上传知识库文件自定义专属机器人,可作为数字分身、领域知识库、智能客服使用,基于 [LinkAI](https://chat.link-ai.tech/console) 实现 | |||||
> 欢迎接入更多应用,参考 [Terminal代码](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/terminal/terminal_channel.py)实现接收和发送消息逻辑即可接入。 同时欢迎增加新的插件,参考 [插件说明文档](https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins)。 | > 欢迎接入更多应用,参考 [Terminal代码](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/terminal/terminal_channel.py)实现接收和发送消息逻辑即可接入。 同时欢迎增加新的插件,参考 [插件说明文档](https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins)。 | ||||
@@ -27,7 +28,9 @@ Demo made by [Visionn](https://www.wangpc.cc/) | |||||
# 更新日志 | # 更新日志 | ||||
>**2023.06.12:** 接入 [LinkAI](https://chat.link-ai.tech/console) 平台,可在线创建 个人知识库,并接入微信、公众号及企业微信中。使用参考 [接入文档](https://link-ai.tech/platform/link-app/wechat)。 | |||||
>**2023.08.08:** 接入百度文心一言模型,通过 [插件](https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins/linkai) 支持 Midjourney 绘图 | |||||
>**2023.06.12:** 接入 [LinkAI](https://chat.link-ai.tech/console) 平台,可在线创建个人知识库,并接入微信、公众号及企业微信中,打造专属客服机器人。使用参考 [接入文档](https://link-ai.tech/platform/link-app/wechat)。 | |||||
>**2023.04.26:** 支持企业微信应用号部署,兼容插件,并支持语音图片交互,私人助理理想选择,[使用文档](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/wechatcom/README.md)。(contributed by [@lanvent](https://github.com/lanvent) in [#944](https://github.com/zhayujie/chatgpt-on-wechat/pull/944)) | >**2023.04.26:** 支持企业微信应用号部署,兼容插件,并支持语音图片交互,私人助理理想选择,[使用文档](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/wechatcom/README.md)。(contributed by [@lanvent](https://github.com/lanvent) in [#944](https://github.com/zhayujie/chatgpt-on-wechat/pull/944)) | ||||
@@ -151,7 +154,7 @@ pip3 install azure-cognitiveservices-speech | |||||
**4.其他配置** | **4.其他配置** | ||||
+ `model`: 模型名称,目前支持 `gpt-3.5-turbo`, `text-davinci-003`, `gpt-4`, `gpt-4-32k` (其中gpt-4 api暂未完全开放,申请通过后可使用) | |||||
+ `model`: 模型名称,目前支持 `gpt-3.5-turbo`, `text-davinci-003`, `gpt-4`, `gpt-4-32k`, `wenxin` (其中gpt-4 api暂未完全开放,申请通过后可使用) | |||||
+ `temperature`,`frequency_penalty`,`presence_penalty`: Chat API接口参数,详情参考[OpenAI官方文档。](https://platform.openai.com/docs/api-reference/chat) | + `temperature`,`frequency_penalty`,`presence_penalty`: Chat API接口参数,详情参考[OpenAI官方文档。](https://platform.openai.com/docs/api-reference/chat) | ||||
+ `proxy`:由于目前 `openai` 接口国内无法访问,需配置代理客户端的地址,详情参考 [#351](https://github.com/zhayujie/chatgpt-on-wechat/issues/351) | + `proxy`:由于目前 `openai` 接口国内无法访问,需配置代理客户端的地址,详情参考 [#351](https://github.com/zhayujie/chatgpt-on-wechat/issues/351) | ||||
+ 对于图像生成,在满足个人或群组触发条件外,还需要额外的关键词前缀来触发,对应配置 `image_create_prefix ` | + 对于图像生成,在满足个人或群组触发条件外,还需要额外的关键词前缀来触发,对应配置 `image_create_prefix ` | ||||
@@ -0,0 +1,104 @@ | |||||
# encoding:utf-8 | |||||
import requests, json | |||||
from bot.bot import Bot | |||||
from bridge.reply import Reply, ReplyType | |||||
from bot.session_manager import SessionManager | |||||
from bridge.context import ContextType | |||||
from bridge.reply import Reply, ReplyType | |||||
from common.log import logger | |||||
from config import conf | |||||
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession | |||||
BAIDU_API_KEY = conf().get("baidu_wenxin_api_key") | |||||
BAIDU_SECRET_KEY = conf().get("baidu_wenxin_secret_key") | |||||
class BaiduWenxinBot(Bot): | |||||
def __init__(self): | |||||
super().__init__() | |||||
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("baidu_wenxin_model") or "eb-instant") | |||||
def reply(self, query, context=None): | |||||
# acquire reply content | |||||
if context and context.type: | |||||
if context.type == ContextType.TEXT: | |||||
logger.info("[BAIDU] query={}".format(query)) | |||||
session_id = context["session_id"] | |||||
reply = None | |||||
if query == "#清除记忆": | |||||
self.sessions.clear_session(session_id) | |||||
reply = Reply(ReplyType.INFO, "记忆已清除") | |||||
elif query == "#清除所有": | |||||
self.sessions.clear_all_session() | |||||
reply = Reply(ReplyType.INFO, "所有人记忆已清除") | |||||
else: | |||||
session = self.sessions.session_query(query, session_id) | |||||
result = self.reply_text(session) | |||||
total_tokens, completion_tokens, reply_content = ( | |||||
result["total_tokens"], | |||||
result["completion_tokens"], | |||||
result["content"], | |||||
) | |||||
logger.debug( | |||||
"[BAIDU] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(session.messages, session_id, reply_content, completion_tokens) | |||||
) | |||||
if total_tokens == 0: | |||||
reply = Reply(ReplyType.ERROR, reply_content) | |||||
else: | |||||
self.sessions.session_reply(reply_content, session_id, total_tokens) | |||||
reply = Reply(ReplyType.TEXT, reply_content) | |||||
return reply | |||||
elif context.type == ContextType.IMAGE_CREATE: | |||||
ok, retstring = self.create_img(query, 0) | |||||
reply = None | |||||
if ok: | |||||
reply = Reply(ReplyType.IMAGE_URL, retstring) | |||||
else: | |||||
reply = Reply(ReplyType.ERROR, retstring) | |||||
return reply | |||||
def reply_text(self, session: BaiduWenxinSession, retry_count=0): | |||||
try: | |||||
logger.info("[BAIDU] model={}".format(session.model)) | |||||
access_token = self.get_access_token() | |||||
if access_token == 'None': | |||||
logger.warn("[BAIDU] access token 获取失败") | |||||
return { | |||||
"total_tokens": 0, | |||||
"completion_tokens": 0, | |||||
"content": 0, | |||||
} | |||||
url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/" + session.model + "?access_token=" + access_token | |||||
headers = { | |||||
'Content-Type': 'application/json' | |||||
} | |||||
payload = {'messages': session.messages} | |||||
response = requests.request("POST", url, headers=headers, data=json.dumps(payload)) | |||||
response_text = json.loads(response.text) | |||||
res_content = response_text["result"] | |||||
total_tokens = response_text["usage"]["total_tokens"] | |||||
completion_tokens = response_text["usage"]["completion_tokens"] | |||||
logger.info("[BAIDU] reply={}".format(res_content)) | |||||
return { | |||||
"total_tokens": total_tokens, | |||||
"completion_tokens": completion_tokens, | |||||
"content": res_content, | |||||
} | |||||
except Exception as e: | |||||
need_retry = retry_count < 2 | |||||
logger.warn("[BAIDU] Exception: {}".format(e)) | |||||
need_retry = False | |||||
self.sessions.clear_session(session.session_id) | |||||
result = {"completion_tokens": 0, "content": "出错了: {}".format(e)} | |||||
return result | |||||
def get_access_token(self): | |||||
""" | |||||
使用 AK,SK 生成鉴权签名(Access Token) | |||||
:return: access_token,或是None(如果错误) | |||||
""" | |||||
url = "https://aip.baidubce.com/oauth/2.0/token" | |||||
params = {"grant_type": "client_credentials", "client_id": BAIDU_API_KEY, "client_secret": BAIDU_SECRET_KEY} | |||||
return str(requests.post(url, params=params).json().get("access_token")) |
@@ -0,0 +1,87 @@ | |||||
from bot.session_manager import Session | |||||
from common.log import logger | |||||
""" | |||||
e.g. [ | |||||
{"role": "user", "content": "Who won the world series in 2020?"}, | |||||
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."}, | |||||
{"role": "user", "content": "Where was it played?"} | |||||
] | |||||
""" | |||||
class BaiduWenxinSession(Session): | |||||
def __init__(self, session_id, system_prompt=None, model="gpt-3.5-turbo"): | |||||
super().__init__(session_id, system_prompt) | |||||
self.model = model | |||||
# 百度文心不支持system prompt | |||||
# self.reset() | |||||
def discard_exceeding(self, max_tokens, cur_tokens=None): | |||||
# pdb.set_trace() | |||||
precise = True | |||||
try: | |||||
cur_tokens = self.calc_tokens() | |||||
except Exception as e: | |||||
precise = False | |||||
if cur_tokens is None: | |||||
raise e | |||||
logger.debug("Exception when counting tokens precisely for query: {}".format(e)) | |||||
while cur_tokens > max_tokens: | |||||
if len(self.messages) > 2: | |||||
self.messages.pop(1) | |||||
elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant": | |||||
self.messages.pop(1) | |||||
if precise: | |||||
cur_tokens = self.calc_tokens() | |||||
else: | |||||
cur_tokens = cur_tokens - max_tokens | |||||
break | |||||
elif len(self.messages) == 2 and self.messages[1]["role"] == "user": | |||||
logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens)) | |||||
break | |||||
else: | |||||
logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages))) | |||||
break | |||||
if precise: | |||||
cur_tokens = self.calc_tokens() | |||||
else: | |||||
cur_tokens = cur_tokens - max_tokens | |||||
return cur_tokens | |||||
def calc_tokens(self): | |||||
return num_tokens_from_messages(self.messages, self.model) | |||||
# refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb | |||||
def num_tokens_from_messages(messages, model): | |||||
"""Returns the number of tokens used by a list of messages.""" | |||||
import tiktoken | |||||
if model in ["gpt-3.5-turbo-0301", "gpt-35-turbo"]: | |||||
return num_tokens_from_messages(messages, model="gpt-3.5-turbo") | |||||
elif model in ["gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-35-turbo-16k"]: | |||||
return num_tokens_from_messages(messages, model="gpt-4") | |||||
try: | |||||
encoding = tiktoken.encoding_for_model(model) | |||||
except KeyError: | |||||
logger.debug("Warning: model not found. Using cl100k_base encoding.") | |||||
encoding = tiktoken.get_encoding("cl100k_base") | |||||
if model == "gpt-3.5-turbo": | |||||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n | |||||
tokens_per_name = -1 # if there's a name, the role is omitted | |||||
elif model == "gpt-4": | |||||
tokens_per_message = 3 | |||||
tokens_per_name = 1 | |||||
else: | |||||
logger.warn(f"num_tokens_from_messages() is not implemented for model {model}. Returning num tokens assuming gpt-3.5-turbo.") | |||||
return num_tokens_from_messages(messages, model="gpt-3.5-turbo") | |||||
num_tokens = 0 | |||||
for message in messages: | |||||
num_tokens += tokens_per_message | |||||
for key, value in message.items(): | |||||
num_tokens += len(encoding.encode(value)) | |||||
if key == "name": | |||||
num_tokens += tokens_per_name | |||||
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> | |||||
return num_tokens |
@@ -11,10 +11,13 @@ def create_bot(bot_type): | |||||
:return: bot instance | :return: bot instance | ||||
""" | """ | ||||
if bot_type == const.BAIDU: | if bot_type == const.BAIDU: | ||||
# Baidu Unit对话接口 | |||||
from bot.baidu.baidu_unit_bot import BaiduUnitBot | |||||
# 替换Baidu Unit为Baidu文心千帆对话接口 | |||||
# from bot.baidu.baidu_unit_bot import BaiduUnitBot | |||||
# return BaiduUnitBot() | |||||
return BaiduUnitBot() | |||||
from bot.baidu.baidu_wenxin import BaiduWenxinBot | |||||
return BaiduWenxinBot() | |||||
elif bot_type == const.CHATGPT: | elif bot_type == const.CHATGPT: | ||||
# ChatGPT 网页端web接口 | # ChatGPT 网页端web接口 | ||||
@@ -64,15 +64,16 @@ class LinkAIBot(Bot, OpenAIImage): | |||||
session_id = context["session_id"] | session_id = context["session_id"] | ||||
session = self.sessions.session_query(query, session_id) | session = self.sessions.session_query(query, session_id) | ||||
model = conf().get("model") or "gpt-3.5-turbo" | |||||
# remove system message | # remove system message | ||||
if app_code and session.messages[0].get("role") == "system": | |||||
session.messages.pop(0) | |||||
if session.messages[0].get("role") == "system": | |||||
if app_code or model == "wenxin": | |||||
session.messages.pop(0) | |||||
body = { | body = { | ||||
"app_code": app_code, | "app_code": app_code, | ||||
"messages": session.messages, | "messages": session.messages, | ||||
"model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称 | |||||
"model": model, # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin | |||||
"temperature": conf().get("temperature"), | "temperature": conf().get("temperature"), | ||||
"top_p": conf().get("top_p", 1), | "top_p": conf().get("top_p", 1), | ||||
"frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容 | "frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容 | ||||
@@ -23,6 +23,8 @@ class Bridge(object): | |||||
self.btype["chat"] = const.OPEN_AI | self.btype["chat"] = const.OPEN_AI | ||||
if conf().get("use_azure_chatgpt", False): | if conf().get("use_azure_chatgpt", False): | ||||
self.btype["chat"] = const.CHATGPTONAZURE | self.btype["chat"] = const.CHATGPTONAZURE | ||||
if model_type in ["wenxin"]: | |||||
self.btype["chat"] = const.BAIDU | |||||
if conf().get("use_linkai") and conf().get("linkai_api_key"): | if conf().get("use_linkai") and conf().get("linkai_api_key"): | ||||
self.btype["chat"] = const.LINKAI | self.btype["chat"] = const.LINKAI | ||||
self.bots = {} | self.bots = {} | ||||
@@ -1,6 +1,7 @@ | |||||
{ | { | ||||
"open_ai_api_key": "YOUR API KEY", | "open_ai_api_key": "YOUR API KEY", | ||||
"model": "gpt-3.5-turbo", | "model": "gpt-3.5-turbo", | ||||
"channel_type": "wx", | |||||
"proxy": "", | "proxy": "", | ||||
"hot_reload": false, | "hot_reload": false, | ||||
"single_chat_prefix": [ | "single_chat_prefix": [ | ||||
@@ -16,7 +16,7 @@ available_setting = { | |||||
"open_ai_api_base": "https://api.openai.com/v1", | "open_ai_api_base": "https://api.openai.com/v1", | ||||
"proxy": "", # openai使用的代理 | "proxy": "", # openai使用的代理 | ||||
# chatgpt模型, 当use_azure_chatgpt为true时,其名称为Azure上model deployment名称 | # chatgpt模型, 当use_azure_chatgpt为true时,其名称为Azure上model deployment名称 | ||||
"model": "gpt-3.5-turbo", | |||||
"model": "gpt-3.5-turbo", # 还支持 gpt-3.5-turbo-16k, gpt-4, wenxin | |||||
"use_azure_chatgpt": False, # 是否使用azure的chatgpt | "use_azure_chatgpt": False, # 是否使用azure的chatgpt | ||||
"azure_deployment_id": "", # azure 模型部署名称 | "azure_deployment_id": "", # azure 模型部署名称 | ||||
"azure_api_version": "", # azure api版本 | "azure_api_version": "", # azure api版本 | ||||
@@ -51,6 +51,10 @@ available_setting = { | |||||
"presence_penalty": 0, | "presence_penalty": 0, | ||||
"request_timeout": 60, # chatgpt请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间 | "request_timeout": 60, # chatgpt请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间 | ||||
"timeout": 120, # chatgpt重试超时时间,在这个时间内,将会自动重试 | "timeout": 120, # chatgpt重试超时时间,在这个时间内,将会自动重试 | ||||
# Baidu 文心一言参数 | |||||
"baidu_wenxin_model": "eb-instant", # 默认使用ERNIE-Bot-turbo模型 | |||||
"baidu_wenxin_api_key": "", # Baidu api key | |||||
"baidu_wenxin_secret_key": "", # Baidu secret key | |||||
# 语音设置 | # 语音设置 | ||||
"speech_recognition": False, # 是否开启语音识别 | "speech_recognition": False, # 是否开启语音识别 | ||||
"group_speech_recognition": False, # 是否开启群组语音识别 | "group_speech_recognition": False, # 是否开启群组语音识别 | ||||
@@ -47,6 +47,8 @@ | |||||
例如输入 `$linkai app Kv2fXJcH`,即将当前群聊与 app_code为 Kv2fXJcH 的应用绑定。 | 例如输入 `$linkai app Kv2fXJcH`,即将当前群聊与 app_code为 Kv2fXJcH 的应用绑定。 | ||||
另外,还可以通过 `$linkai close` 来一键关闭linkai对话,此时就会使用默认的openai接口;同理,发送 `$linkai open` 可以再次开启。 | |||||
### 2.Midjourney绘画功能 | ### 2.Midjourney绘画功能 | ||||
指令格式: | 指令格式: | ||||
@@ -54,6 +56,8 @@ | |||||
``` | ``` | ||||
- 图片生成: $mj 描述词1, 描述词2.. | - 图片生成: $mj 描述词1, 描述词2.. | ||||
- 图片放大: $mju 图片ID 图片序号 | - 图片放大: $mju 图片ID 图片序号 | ||||
- 图片变换: $mjv 图片ID 图片序号 | |||||
- 重置: $mjr 图片ID | |||||
``` | ``` | ||||
例如: | 例如: | ||||
@@ -61,6 +65,11 @@ | |||||
``` | ``` | ||||
"$mj a little cat, white --ar 9:16" | "$mj a little cat, white --ar 9:16" | ||||
"$mju 1105592717188272288 2" | "$mju 1105592717188272288 2" | ||||
"$mjv 11055927171882 2" | |||||
"$mjr 11055927171882" | |||||
``` | ``` | ||||
注:开启 `use_image_create_prefix` 配置后可直接复用全局画图触发词,以"画"开头便可以生成图片。 | |||||
注: | |||||
1. 开启 `use_image_create_prefix` 配置后可直接复用全局画图触发词,以"画"开头便可以生成图片。 | |||||
2. 提示词内容中包含敏感词或者参数格式错误可能导致绘画失败,生成失败不消耗积分 | |||||
3. 使用 `$mj open` 和 `$mj close` 指令可以快速打开和关闭绘图功能 |
@@ -130,8 +130,11 @@ class LinkAI(Plugin): | |||||
help_text = "用于集成 LinkAI 提供的知识库、Midjourney绘画等能力。\n\n" | help_text = "用于集成 LinkAI 提供的知识库、Midjourney绘画等能力。\n\n" | ||||
if not verbose: | if not verbose: | ||||
return help_text | return help_text | ||||
help_text += f'📖 知识库\n - 群聊中指定应用: {trigger_prefix}linkai app 应用编码\n\n例如: \n"$linkai app Kv2fXJcH"\n\n' | |||||
help_text += f"🎨 绘画\n - 生成: {trigger_prefix}mj 描述词1, 描述词2.. \n - 放大: {trigger_prefix}mju 图片ID 图片序号\n - 变换: mjv 图片ID 图片序号\n - 重置: mjr 图片ID" | |||||
help_text += f'📖 知识库\n - 群聊中指定应用: {trigger_prefix}linkai app 应用编码\n' | |||||
help_text += f' - {trigger_prefix}linkai open: 开启对话\n' | |||||
help_text += f' - {trigger_prefix}linkai close: 关闭对话\n' | |||||
help_text += f'\n例如: \n"{trigger_prefix}linkai app Kv2fXJcH"\n\n' | |||||
help_text += f"🎨 绘画\n - 生成: {trigger_prefix}mj 描述词1, 描述词2.. \n - 放大: {trigger_prefix}mju 图片ID 图片序号\n - 变换: {trigger_prefix}mjv 图片ID 图片序号\n - 重置: {trigger_prefix}mjr 图片ID" | |||||
help_text += f"\n\n例如:\n\"{trigger_prefix}mj a little cat, white --ar 9:16\"\n\"{trigger_prefix}mju 11055927171882 2\"" | help_text += f"\n\n例如:\n\"{trigger_prefix}mj a little cat, white --ar 9:16\"\n\"{trigger_prefix}mju 11055927171882 2\"" | ||||
help_text += f"\n\"{trigger_prefix}mjv 11055927171882 2\"\n\"{trigger_prefix}mjr 11055927171882\"" | help_text += f"\n\"{trigger_prefix}mjv 11055927171882 2\"\n\"{trigger_prefix}mjr 11055927171882\"" | ||||
return help_text | return help_text | ||||