Browse Source

Merge branch 'master' into wechatmp

master
JS00000 1 year ago
parent
commit
78d4c58b70
22 changed files with 391 additions and 56 deletions
  1. +4
    -3
      .github/ISSUE_TEMPLATE.md
  2. +2
    -1
      .gitignore
  3. +13
    -5
      README.md
  4. +2
    -2
      bot/chatgpt/chat_gpt_bot.py
  5. +1
    -1
      bot/session_manager.py
  6. +81
    -3
      channel/chat_channel.py
  7. +8
    -13
      channel/wechat/wechat_channel.py
  8. +8
    -12
      channel/wechat/wechaty_channel.py
  9. +33
    -0
      common/dequeue.py
  10. +4
    -0
      common/log.py
  11. +10
    -2
      config.py
  12. +1
    -0
      docker/Dockerfile.alpine
  13. +2
    -1
      docker/Dockerfile.debian
  14. +1
    -0
      docker/Dockerfile.debian.latest
  15. +2
    -1
      docker/Dockerfile.latest
  16. +11
    -1
      plugins/godcmd/godcmd.py
  17. +60
    -0
      plugins/tool/README.md
  18. +0
    -0
      plugins/tool/__init__.py
  19. +8
    -0
      plugins/tool/config.json.template
  20. +119
    -0
      plugins/tool/tool.py
  21. +21
    -0
      requirements-optional.txt
  22. +0
    -11
      requirements.txt

+ 4
- 3
.github/ISSUE_TEMPLATE.md View File

@@ -4,8 +4,9 @@
2. python 已安装:版本在 3.7 ~ 3.10 之间
3. `git pull` 拉取最新代码
4. 执行`pip3 install -r requirements.txt`,检查依赖是否满足
5. 在已有 issue 中未搜索到类似问题
6. [FAQS](https://github.com/zhayujie/chatgpt-on-wechat/wiki/FAQs) 中无类似问题
5. 拓展功能请执行`pip3 install -r requirements-optional.txt`,检查依赖是否满足
6. 在已有 issue 中未搜索到类似问题
7. [FAQS](https://github.com/zhayujie/chatgpt-on-wechat/wiki/FAQs) 中无类似问题


### 问题描述
@@ -18,7 +19,7 @@
### 终端日志 (如有报错)

```
[在此处粘贴终端日志]
[在此处粘贴终端日志, 可在主目录下`run.log`文件中找到]
```




+ 2
- 1
.gitignore View File

@@ -10,4 +10,5 @@ nohup.out
tmp
plugins.json
itchat.pkl
user_datas.pkl
user_datas.pkl
*.log

+ 13
- 5
README.md View File

@@ -65,7 +65,7 @@
### 2.运行环境

支持 Linux、MacOS、Windows 系统(可在Linux服务器上长期运行),同时需安装 `Python`。
> 建议Python版本在 3.7.1~3.9.X 之间,3.10及以上版本在 MacOS 可用,其他系统上不确定能否正常运行。
> 建议Python版本在 3.7.1~3.9.X 之间,推荐3.8版本,3.10及以上版本在 MacOS 可用,其他系统上不确定能否正常运行。

**(1) 克隆项目代码:**

@@ -75,14 +75,20 @@ cd chatgpt-on-wechat/
```

**(2) 安装核心依赖 (必选):**
> 能够使用`itchat`创建机器人,并具有文字交流功能所需的最小依赖集合。
```bash
pip3 install -r requirements.txt
```

其中`tiktoken`要求`python`版本在3.8以上,它用于精确计算会话使用的tokens数量,可以不装但建议安装。
**(3) 拓展依赖 (可选,建议安装):**

```bash
pip3 install -r requirements-optional.txt
```
> 如果某项依赖安装失败请注释掉对应的行再继续。

其中`tiktoken`要求`python`版本在3.8以上,它用于精确计算会话使用的tokens数量,强烈建议安装。

**(3) 拓展依赖 (可选):**

使用`google`或`baidu`语音识别需安装`ffmpeg`,

@@ -90,10 +96,12 @@ pip3 install -r requirements.txt

参考[#415](https://github.com/zhayujie/chatgpt-on-wechat/issues/415)

使用`azure`语音功能需安装依赖:
使用`azure`语音功能需安装依赖(列在`requirements-optional.txt`内,但为便于`railway`部署已注释):

```bash
pip3 install azure-cognitiveservices-speech
```

> 目前默认发布的镜像和`railway`部署,都基于`apline`,无法安装`azure`的依赖。若有需求请自行基于[`debian`](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/docker/Dockerfile.debian.latest)打包。
参考[文档](https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/quickstarts/setup-platform?pivots=programming-language-python&tabs=linux%2Cubuntu%2Cdotnet%2Cjre%2Cmaven%2Cnodejs%2Cmac%2Cpypi)



+ 2
- 2
bot/chatgpt/chat_gpt_bot.py View File

@@ -91,8 +91,8 @@ class ChatGPTBot(Bot,OpenAIImage):
"top_p":1,
"frequency_penalty":conf().get('frequency_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty":conf().get('presence_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"request_timeout": 120, # 请求超时时间
"timeout": 120, #重试超时时间,在这个时间内,将会自动重试
"request_timeout": conf().get('request_timeout', 120), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": conf().get('request_timeout', 120), #重试超时时间,在这个时间内,将会自动重试
}

def reply_text(self, session:ChatGPTSession, session_id, api_key, retry_count=0) -> dict:


+ 1
- 1
bot/session_manager.py View File

@@ -50,7 +50,7 @@ class SessionManager(object):
'''
if session_id not in self.sessions:
self.sessions[session_id] = self.sessioncls(session_id, system_prompt, **self.session_args)
elif system_prompt is not None: # 如果有新的system_prompt,更新并重置session
elif system_prompt is not None: # 如果有新的system_prompt,更新并重置session
self.sessions[session_id].set_system_prompt(system_prompt)
session = self.sessions[session_id]
return session


+ 81
- 3
channel/chat_channel.py View File

@@ -1,10 +1,12 @@



from asyncio import CancelledError
from concurrent.futures import Future, ThreadPoolExecutor
import os
import re
import threading
import time
from common.expired_dict import ExpiredDict
from common.dequeue import Dequeue
from channel.channel import Channel
from bridge.reply import *
from bridge.context import *
@@ -20,8 +22,16 @@ except Exception as e:
class ChatChannel(Channel):
name = None # 登录的用户名
user_id = None # 登录的用户id
futures = {} # 记录每个session_id提交到线程池的future对象, 用于重置会话时把没执行的future取消掉,正在执行的不会被取消
sessions = {} # 用于控制并发,每个session_id同时只能有一个context在处理
lock = threading.Lock() # 用于控制对sessions的访问
handler_pool = ThreadPoolExecutor(max_workers=8) # 处理消息的线程池

def __init__(self):
pass
_thread = threading.Thread(target=self.consume)
_thread.setDaemon(True)
_thread.start()

# 根据消息构造context,消息内容相关的触发项写在这里
def _compose_context(self, ctype: ContextType, content, **kwargs):
@@ -215,6 +225,74 @@ class ChatChannel(Channel):
time.sleep(3+3*retry_cnt)
self._send(reply, context, retry_cnt+1)

def thread_pool_callback(self, session_id):
def func(worker:Future):
try:
worker_exception = worker.exception()
if worker_exception:
logger.exception("Worker return exception: {}".format(worker_exception))
except CancelledError as e:
logger.info("Worker cancelled, session_id = {}".format(session_id))
except Exception as e:
logger.exception("Worker raise exception: {}".format(e))
with self.lock:
self.sessions[session_id][1].release()
return func

def produce(self, context: Context):
session_id = context['session_id']
with self.lock:
if session_id not in self.sessions:
self.sessions[session_id] = [Dequeue(), threading.BoundedSemaphore(conf().get("concurrency_in_session", 1))]
if context.type == ContextType.TEXT and context.content.startswith("#"):
self.sessions[session_id][0].putleft(context) # 优先处理管理命令
else:
self.sessions[session_id][0].put(context)

# 消费者函数,单独线程,用于从消息队列中取出消息并处理
def consume(self):
while True:
with self.lock:
session_ids = list(self.sessions.keys())
for session_id in session_ids:
context_queue, semaphore = self.sessions[session_id]
if semaphore.acquire(blocking = False): # 等线程处理完毕才能删除
if not context_queue.empty():
context = context_queue.get()
logger.debug("[WX] consume context: {}".format(context))
future:Future = self.handler_pool.submit(self._handle, context)
future.add_done_callback(self.thread_pool_callback(session_id))
if session_id not in self.futures:
self.futures[session_id] = []
self.futures[session_id].append(future)
elif semaphore._initial_value == semaphore._value+1: # 除了当前,没有任务再申请到信号量,说明所有任务都处理完毕
self.futures[session_id] = [t for t in self.futures[session_id] if not t.done()]
assert len(self.futures[session_id]) == 0, "thread pool error"
del self.sessions[session_id]
else:
semaphore.release()
time.sleep(0.1)

# 取消session_id对应的所有任务,只能取消排队的消息和已提交线程池但未执行的任务
def cancel_session(self, session_id):
with self.lock:
if session_id in self.sessions:
for future in self.futures[session_id]:
future.cancel()
cnt = self.sessions[session_id][0].qsize()
if cnt>0:
logger.info("Cancel {} messages in session {}".format(cnt, session_id))
self.sessions[session_id][0] = Dequeue()
def cancel_all_session(self):
with self.lock:
for session_id in self.sessions:
for future in self.futures[session_id]:
future.cancel()
cnt = self.sessions[session_id][0].qsize()
if cnt>0:
logger.info("Cancel {} messages in session {}".format(cnt, session_id))
self.sessions[session_id][0] = Dequeue()

def check_prefix(content, prefix_list):


+ 8
- 13
channel/wechat/wechat_channel.py View File

@@ -5,6 +5,7 @@ wechat channel
"""

import os
import threading
import requests
import io
import time
@@ -17,18 +18,10 @@ from lib import itchat
from lib.itchat.content import *
from bridge.reply import *
from bridge.context import *
from concurrent.futures import ThreadPoolExecutor
from config import conf
from common.time_check import time_checker
from common.expired_dict import ExpiredDict
from plugins import *
thread_pool = ThreadPoolExecutor(max_workers=8)

def thread_pool_callback(worker):
worker_exception = worker.exception()
if worker_exception:
logger.exception("Worker return exception: {}".format(worker_exception))


@itchat.msg_register(TEXT)
def handler_single_msg(msg):
@@ -73,7 +66,9 @@ def qrCallback(uuid,status,qrcode):
try:
from PIL import Image
img = Image.open(io.BytesIO(qrcode))
thread_pool.submit(img.show,"QRCode")
_thread = threading.Thread(target=img.show, args=("QRCode",))
_thread.setDaemon(True)
_thread.start()
except Exception as e:
pass

@@ -142,7 +137,7 @@ class WechatChannel(ChatChannel):
logger.debug("[WX]receive voice msg: {}".format(cmsg.content))
context = self._compose_context(ContextType.VOICE, cmsg.content, isgroup=False, msg=cmsg)
if context:
thread_pool.submit(self._handle, context).add_done_callback(thread_pool_callback)
self.produce(context)

@time_checker
@_check
@@ -150,7 +145,7 @@ class WechatChannel(ChatChannel):
logger.debug("[WX]receive text msg: {}, cmsg={}".format(json.dumps(cmsg._rawmsg, ensure_ascii=False), cmsg))
context = self._compose_context(ContextType.TEXT, cmsg.content, isgroup=False, msg=cmsg)
if context:
thread_pool.submit(self._handle, context).add_done_callback(thread_pool_callback)
self.produce(context)

@time_checker
@_check
@@ -158,7 +153,7 @@ class WechatChannel(ChatChannel):
logger.debug("[WX]receive group msg: {}, cmsg={}".format(json.dumps(cmsg._rawmsg, ensure_ascii=False), cmsg))
context = self._compose_context(ContextType.TEXT, cmsg.content, isgroup=True, msg=cmsg)
if context:
thread_pool.submit(self._handle, context).add_done_callback(thread_pool_callback)
self.produce(context)
@time_checker
@_check
@@ -168,7 +163,7 @@ class WechatChannel(ChatChannel):
logger.debug("[WX]receive voice for group msg: {}".format(cmsg.content))
context = self._compose_context(ContextType.VOICE, cmsg.content, isgroup=True, msg=cmsg)
if context:
thread_pool.submit(self._handle, context).add_done_callback(thread_pool_callback)
self.produce(context)
# 统一的发送函数,每个Channel自行实现,根据reply的type字段发送不同类型的消息
def send(self, reply: Reply, context: Context):


+ 8
- 12
channel/wechat/wechaty_channel.py View File

@@ -5,7 +5,6 @@ wechaty channel
Python Wechaty - https://github.com/wechaty/python-wechaty
"""
import base64
from concurrent.futures import ThreadPoolExecutor
import os
import time
import asyncio
@@ -18,21 +17,18 @@ from bridge.context import *
from channel.chat_channel import ChatChannel
from channel.wechat.wechaty_message import WechatyMessage
from common.log import logger
from common.singleton import singleton
from config import conf
try:
from voice.audio_convert import any_to_sil
except Exception as e:
pass

thread_pool = ThreadPoolExecutor(max_workers=8)
def thread_pool_callback(worker):
worker_exception = worker.exception()
if worker_exception:
logger.exception("Worker return exception: {}".format(worker_exception))
@singleton
class WechatyChannel(ChatChannel):

def __init__(self):
pass
super().__init__()

def startup(self):
config = conf()
@@ -41,6 +37,10 @@ class WechatyChannel(ChatChannel):
asyncio.run(self.main())

async def main(self):
loop = asyncio.get_event_loop()
#将asyncio的loop传入处理线程
self.handler_pool._initializer= lambda: asyncio.set_event_loop(loop)
self.bot = Wechaty()
self.bot.on('login', self.on_login)
self.bot.on('message', self.on_message)
@@ -122,8 +122,4 @@ class WechatyChannel(ChatChannel):
context = self._compose_context(ctype, cmsg.content, isgroup=isgroup, msg=cmsg)
if context:
logger.info('[WX] receiveMsg={}, context={}'.format(cmsg, context))
thread_pool.submit(self._handle_loop, context, asyncio.get_event_loop()).add_done_callback(thread_pool_callback)

def _handle_loop(self,context,loop):
asyncio.set_event_loop(loop)
self._handle(context)
self.produce(context)

+ 33
- 0
common/dequeue.py View File

@@ -0,0 +1,33 @@

from queue import Full, Queue
from time import monotonic as time

# add implementation of putleft to Queue
class Dequeue(Queue):
def putleft(self, item, block=True, timeout=None):
with self.not_full:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._putleft(item)
self.unfinished_tasks += 1
self.not_empty.notify()

def putleft_nowait(self, item):
return self.putleft(item, block=False)

def _putleft(self, item):
self.queue.appendleft(item)

+ 4
- 0
common/log.py View File

@@ -8,6 +8,10 @@ def _get_logger():
console_handle = logging.StreamHandler(sys.stdout)
console_handle.setFormatter(logging.Formatter('[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'))
file_handle = logging.FileHandler('run.log', encoding='utf-8')
file_handle.setFormatter(logging.Formatter('[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'))
log.addHandler(file_handle)
log.addHandler(console_handle)
return log



+ 10
- 2
config.py View File

@@ -1,6 +1,7 @@
# encoding:utf-8

import json
import logging
import os
from common.log import logger
import pickle
@@ -28,6 +29,7 @@ available_setting = {
"group_chat_in_one_session": ["ChatGPT测试群"], # 支持会话上下文共享的群名称
"trigger_by_self": False, # 是否允许机器人触发
"image_create_prefix": ["画", "看", "找"], # 开启图片回复的前缀
"concurrency_in_session": 1, # 同一会话最多有多少条消息在处理中,大于1可能乱序

# chatgpt会话参数
"expires_in_seconds": 3600, # 无操作会话的过期时间
@@ -38,12 +40,13 @@ available_setting = {
"rate_limit_chatgpt": 20, # chatgpt的调用频率限制
"rate_limit_dalle": 50, # openai dalle的调用频率限制


# chatgpt api参数 参考https://platform.openai.com/docs/api-reference/chat/create
"temperature": 0.9,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"request_timeout": 120, # chatgpt请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": 120, # chatgpt重试超时时间,在这个时间内,将会自动重试

# 语音设置
"speech_recognition": False, # 是否开启语音识别
@@ -79,11 +82,12 @@ available_setting = {
"wechatmp_token": "", # 微信公众平台的Token

# chatgpt指令自定义触发词
"clear_memory_commands": ['#清除记忆'], # 重置会话指令
"clear_memory_commands": ['#清除记忆'], # 重置会话指令,必须以#开头

# channel配置
"channel_type": "wx", # 通道类型,支持:{wx,wxy,terminal,wechatmp}

"debug": False, # 是否开启debug模式,开启后会打印更多日志

}

@@ -170,6 +174,10 @@ def load_config():
else:
config[name] = value

if config.get("debug", False):
logger.setLevel(logging.DEBUG)
logger.debug("[INIT] set log level to DEBUG")

logger.info("[INIT] load config: {}".format(config))

config.load_user_datas()


+ 1
- 0
docker/Dockerfile.alpine View File

@@ -23,6 +23,7 @@ RUN apk add --no-cache \
&& cp config-template.json ${BUILD_PREFIX}/config.json \
&& /usr/local/bin/python -m pip install --no-cache --upgrade pip \
&& pip install --no-cache -r requirements.txt \
&& pip install --no-cache -r requirements-optional.txt \
&& apk del curl wget

WORKDIR ${BUILD_PREFIX}


+ 2
- 1
docker/Dockerfile.debian View File

@@ -23,7 +23,8 @@ RUN apt-get update \
&& cd ${BUILD_PREFIX} \
&& cp config-template.json ${BUILD_PREFIX}/config.json \
&& /usr/local/bin/python -m pip install --no-cache --upgrade pip \
&& pip install --no-cache -r requirements.txt
&& pip install --no-cache -r requirements.txt \
&& pip install --no-cache -r requirements-optional.txt

WORKDIR ${BUILD_PREFIX}



+ 1
- 0
docker/Dockerfile.debian.latest View File

@@ -16,6 +16,7 @@ RUN apt-get update \
&& cp config-template.json config.json \
&& /usr/local/bin/python -m pip install --no-cache --upgrade pip \
&& pip install --no-cache -r requirements.txt \
&& pip install --no-cache -r requirements-optional.txt \
&& pip install azure-cognitiveservices-speech
WORKDIR ${BUILD_PREFIX}


+ 2
- 1
docker/Dockerfile.latest View File

@@ -13,7 +13,8 @@ RUN apk add --no-cache bash ffmpeg espeak \
&& cd ${BUILD_PREFIX} \
&& cp config-template.json config.json \
&& /usr/local/bin/python -m pip install --no-cache --upgrade pip \
&& pip install --no-cache -r requirements.txt
&& pip install --no-cache -r requirements.txt \
&& pip install --no-cache -r requirements-optional.txt

WORKDIR ${BUILD_PREFIX}



+ 11
- 1
plugins/godcmd/godcmd.py View File

@@ -147,7 +147,14 @@ class Godcmd(Plugin):
else:
with open(config_path,"r") as f:
gconf=json.load(f)
custom_commands = conf().get("clear_memory_commands", [])
for custom_command in custom_commands:
if custom_command and custom_command.startswith("#"):
custom_command = custom_command[1:]
if custom_command and custom_command not in COMMANDS["reset"]["alias"]:
COMMANDS["reset"]["alias"].append(custom_command)

self.password = gconf["password"]
self.admin_users = gconf["admin_users"] # 预存的管理员账号,这些账号不需要认证 TODO: 用户名每次都会变,目前不可用
self.isrunning = True # 机器人是否运行中
@@ -167,6 +174,7 @@ class Godcmd(Plugin):
logger.debug("[Godcmd] on_handle_context. content: %s" % content)
if content.startswith("#"):
# msg = e_context['context']['msg']
channel = e_context['channel']
user = e_context['context']['receiver']
session_id = e_context['context']['session_id']
isgroup = e_context['context']['isgroup']
@@ -216,6 +224,7 @@ class Godcmd(Plugin):
elif cmd == "reset":
if bottype in (const.CHATGPT, const.OPEN_AI):
bot.sessions.clear_session(session_id)
channel.cancel_session(session_id)
ok, result = True, "会话已重置"
else:
ok, result = False, "当前对话机器人不支持重置会话"
@@ -237,6 +246,7 @@ class Godcmd(Plugin):
ok, result = True, "配置已重载"
elif cmd == "resetall":
if bottype in (const.CHATGPT, const.OPEN_AI):
channel.cancel_all_session()
bot.sessions.clear_all_session()
ok, result = True, "重置所有会话成功"
else:


+ 60
- 0
plugins/tool/README.md View File

@@ -0,0 +1,60 @@
## 插件描述
一个能让chatgpt联网,搜索,数字运算的插件,将赋予强大且丰富的扩展能力
使用该插件需在触发机器人回复条件时,在对话内容前加$tool
### 本插件所有工具同步存放至专用仓库:[chatgpt-tool-hub](https://github.com/goldfishh/chatgpt-tool-hub)
## 使用说明
使用该插件后将默认使用4个工具, 无需额外配置长期生效:
### 1. python
###### python解释器,使用它来解释执行python指令,可以配合你想要chatgpt生成的代码输出结果或执行事务
### 2. requests
###### 往往用来获取某个网站具体内容,结果可能会被反爬策略影响

### 3. terminal
###### 在你运行的电脑里执行shell命令,可以配合你想要chatgpt生成的代码使用,给予自然语言控制手段

### 4. meteo-weather
###### 回答你有关天气的询问, 需要获取时间、地点上下文信息,本工具使用了[meteo open api](https://open-meteo.com/)
注:该工具需提供时间,地点信息,获取的数据不保证准确性

## 使用本插件对话(prompt)技巧
### 1. 有指引的询问
#### 例如:
- 总结这个链接的内容 https://github.com/goldfishh/chatgpt-tool-hub
- 使用Terminal执行curl cip.cc
- 使用python查询今天日期
### 2. 使用搜索引擎工具
- 如果有搜索工具就能让chatgpt获取到你的未传达清楚的上下文信息,比如chatgpt不知道你的地理位置,现在时间等,所以无法查询到天气
## 其他工具
###### 除上述以外还有其他工具,比如搜索联网、数学运算、新闻需要获取api-key,
###### 由于这些工具使用方法暂时还在整理中,如果你不熟悉请不要尝试使用这些工具
#### [申请方法](https://github.com/goldfishh/chatgpt-tool-hub/blob/master/docs/apply_optional_tool.md)

### 5. wikipedia
###### 可以回答你想要知道确切的人事物
## config.json 配置说明
###### 默认工具无需配置,其它工具需手动配置,一个例子:
```json
{
"tools": ["wikipedia"],
"kwargs": {
"top_k_results": 2,
"no_default": false,
"model_name": "gpt-3.5-turbo"
}
}
```
注:config.json文件非必须,未创建仍可使用本tool
- `tools`:本插件初始化时加载的工具, 目前可选集:["wikipedia", "wolfram-alpha", "bing-search", "google-search", "news"],其中后4个工具需要申请服务api
- `kwargs`:工具执行时的配置,一般在这里存放api-key,或环境配置,no_default用于配置是否默认使用4个工具,如果为false则仅使用tools列表工具
## 备注
- 虽然我会有意加入一些限制,但请不要使用本插件做危害他人的事情,请提前了解清楚某些内容是否会违反相关规定,建议提前做好过滤
- 未来一段时间我会实现一些有意思的工具,比如stable diffusion 中文prompt翻译、cv方向的模型推理,欢迎有想法的朋友关注,一起扩展这个项目

+ 0
- 0
plugins/tool/__init__.py View File


+ 8
- 0
plugins/tool/config.json.template View File

@@ -0,0 +1,8 @@
{
"tools": ["python", "requests", "terminal", "meteo-weather"],
"kwargs": {
"top_k_results": 2,
"no_default": false,
"model_name": "gpt-3.5-turbo"
}
}

+ 119
- 0
plugins/tool/tool.py View File

@@ -0,0 +1,119 @@
import json
import os

from chatgpt_tool_hub.apps import load_app
from chatgpt_tool_hub.apps.app import App

import plugins
from bridge.bridge import Bridge
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common import const
from common.log import logger
from config import conf
from plugins import *


@plugins.register(name="tool", desc="Arming your ChatGPT bot with various tools", version="0.3", author="goldfishh", desire_priority=0)
class Tool(Plugin):
def __init__(self):
super().__init__()
self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
os.environ["OPENAI_API_KEY"] = conf().get("open_ai_api_key", "")
os.environ["PROXY"] = conf().get("proxy", "")

self.app = self._reset_app()

logger.info("[tool] inited")

def get_help_text(self, **kwargs):
help_text = "这是一个能让chatgpt联网,搜索,数字运算的插件,将赋予强大且丰富的扩展能力"
return help_text

def on_handle_context(self, e_context: EventContext):
if e_context['context'].type != ContextType.TEXT:
return

# 暂时不支持未来扩展的bot
if Bridge().get_bot_type("chat") not in (const.CHATGPT, const.OPEN_AI, const.CHATGPTONAZURE):
return

content = e_context['context'].content
content_list = e_context['context'].content.split(maxsplit=1)

if not content or len(content_list) < 1:
e_context.action = EventAction.CONTINUE
return

logger.debug("[tool] on_handle_context. content: %s" % content)
reply = Reply()
reply.type = ReplyType.TEXT

# todo: 有些工具必须要api-key,需要修改config文件,所以这里没有实现query增删tool的功能
if content.startswith("$tool"):
if len(content_list) == 1:
logger.debug("[tool]: get help")
reply.content = self.get_help_text()
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
return
elif len(content_list) > 1:
if content_list[1].strip() == "reset":
logger.debug("[tool]: reset config")
self.app = self._reset_app()
reply.content = "重置工具成功"
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
return
elif content_list[1].startswith("reset"):
logger.debug("[tool]: remind")
e_context['context'].content = "请你随机用一种聊天风格,提醒用户:如果想重置tool插件,reset之后不要加任何字符"

e_context.action = EventAction.BREAK
return

query = content_list[1].strip()

# Don't modify bot name
all_sessions = Bridge().get_bot("chat").sessions
user_session = all_sessions.session_query(query, e_context['context']['session_id']).messages

# chatgpt-tool-hub will reply you with many tools
logger.debug("[tool]: just-go")
try:
_reply = self.app.ask(query, user_session)
e_context.action = EventAction.BREAK_PASS
all_sessions.session_reply(_reply, e_context['context']['session_id'])
except Exception as e:
logger.exception(e)
logger.error(str(e))

e_context['context'].content = "请你随机用一种聊天风格,提醒用户:这个问题tool插件暂时无法处理"
reply.type = ReplyType.ERROR
e_context.action = EventAction.BREAK
return

reply.content = _reply
e_context['reply'] = reply
return

def _read_json(self) -> dict:
curdir = os.path.dirname(__file__)
config_path = os.path.join(curdir, "config.json")
tool_config = {
"tools": [],
"kwargs": {}
}
if not os.path.exists(config_path):
return tool_config
else:
with open(config_path, "r") as f:
tool_config = json.load(f)
return tool_config

def _reset_app(self) -> App:
tool_config = self._read_json()
kwargs = tool_config.get("kwargs", {})
if kwargs.get("model_name", "") == "":
kwargs["model_name"] = conf().get("model", "gpt-3.5-turbo")
return load_app(tools_list=tool_config.get("tools"), **tool_config.get("kwargs"))

+ 21
- 0
requirements-optional.txt View File

@@ -0,0 +1,21 @@
tiktoken>=0.3.2 # openai calculate token

#voice
pydub>=0.25.1 # need ffmpeg
SpeechRecognition # google speech to text
gTTS>=2.3.1 # google text to speech
pyttsx3>=2.90 # pytsx text to speech
baidu_aip>=4.16.10 # baidu voice
# azure-cognitiveservices-speech # azure voice

# wechaty
wechaty>=0.10.7
wechaty_puppet>=0.4.23
pysilk_mod>=1.6.0 # needed by send voice

# sdwebui plugin
webuiapi>=0.6.2

# chatgpt-tool-hub plugin
--extra-index-url https://pypi.python.org/simple
chatgpt_tool_hub>=0.3.5

+ 0
- 11
requirements.txt View File

@@ -1,17 +1,6 @@
openai>=0.27.2
baidu_aip>=4.16.10
gTTS>=2.3.1
HTMLParser>=0.0.2
pydub>=0.25.1
PyQRCode>=1.2.1
pysilk>=0.0.1
pysilk_mod>=1.6.0
pyttsx3>=2.90
qrcode>=7.4.2
requests>=2.28.2
webuiapi>=0.6.2
wechaty>=0.10.7
wechaty_puppet>=0.4.23
chardet>=5.1.0
SpeechRecognition
tiktoken>=0.3.2

Loading…
Cancel
Save