You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

133 lines
4.9 KiB

5 months ago
import pprint
import urllib.parse
import json5
from qwen_agent.agents import Assistant
from qwen_agent.tools.base import BaseTool, register_tool
import requests
import baidusearch
import tqdm
# 使用示例
# Step 1 (Optional): Add a custom tool named `my_image_gen`.
@register_tool('my_image_gen')
class MyImageGen(BaseTool):
# The `description` tells the agent the functionality of this tool.
description = 'AI painting (image generation) service, input text description, and return the image URL drawn based on text information.'
# The `parameters` tell the agent what input parameters the tool has.
parameters = [{
'name': 'prompt',
'type': 'string',
'description': 'Detailed description of the desired image content, in English',
'required': True
}]
def call(self, params: str, **kwargs) -> str:
# `params` are the arguments generated by the LLM agent.
prompt = json5.loads(params)['prompt']
# 对提示词进行URL编码
prompt = urllib.parse.quote(prompt)
#
return json5.dumps(
{'image_url': f'https://image.pollinations.ai/prompt/{prompt}'},
ensure_ascii=False)
@register_tool('chaxun')
class MyImageGen(BaseTool):
# The `description` tells the agent the functionality of this tool.
description = '如果你不会,请使用此工具进行联网查询'
# The `parameters` tell the agent what input parameters the tool has.
parameters = [{
'name': 'prompt',
'type': 'string',
'description': '请你描述需要提问的信息,以此帮助你了解更多的信息',
'required': True
}]
def call(self, params: str, **kwargs) -> str:
# `params` are the arguments generated by the LLM agent.
prompt = json5.loads(params)['prompt']
# 对提示词进行URL编码
prompt = urllib.parse.quote(prompt)
#
search_tool = baidusearch.search(prompt, num_results=20)
print(search_tool)
return search_tool
# Step 2: Configure the LLM you are using.
# 这里是需要配置模型的地方。需要填写模型名字,以及model_server,即模型所在服务器名字,如果没有,也可以考虑使用api_key。
llm_cfg = {
# Use the model service provided by DashScope:
# model:模型名称
# model_server:模型所在的服务器
# api_key: 所使用到的api-key,可以显示的设置,也可以从环境变量中获取
'model':"qwen2-72b-instruct",
'model_server': 'DashScope', # base_url, also known as api_base
'api_key': 'sk-ea89cf04431645b185990b8af8c9bb13',
# 'api_key': 'YOUR_DASHSCOPE_API_KEY',
# It will use the `DASHSCOPE_API_KEY' environment variable if 'api_key' is not set here.
# Use a model service compatible with the OpenAI API, such as vLLM or Ollama:
# 'model': 'Qwen1.5-7B-Chat',
# 'model_server': 'http://localhost:8000/v1', # base_url, also known as api_base
# 'api_key': 'EMPTY',
# (Optional) LLM hyperparameters for generation:
# 用于调整生成参数的可选配置
'generate_cfg': {
'top_p': 0.8
}
}
# Step 3: Create an agent. Here we use the `Assistant` agent as an example, which is capable of using tools and reading files.
# agent的提示词指令
system_instruction = '''
你是一个乐于助人的助手
收到用户的请求后您应
你应该进行思考判断是否使用工具
如果遇到你不会回答,请使用工具[chaxun]
'''
# 工具列表,指定Assistant可以访问的工具,一个是自定义的工具,一个是代码执行器
tools = ["chaxun"] # `code_interpreter` is a built-in tool for executing code.
# 助理可以读取的文件路径
# files = ['./examples/resource/doc.pdf'] # Give the bot a PDF file to read.
# 初始化Assistant
bot = Assistant(llm=llm_cfg,
system_message=system_instruction,
function_list=tools,
# files=files
)
# Step 4: Run the agent as a chatbot.
messages = [] # This stores the chat history.
def getxinx(context):
# For example, enter the query "draw a dog and rotate it 90 degrees".
# query = input('user query: ')
# Append the user query to the chat history.
messages=[({'role': 'user', 'content': context})]
print(messages)
response = []
event_id = 0
for rsp in bot.run(messages=messages):
response.append(rsp)
yield "请稍等.."
# len()
# for i in bot.run(messages=messages):
# # for number in range(1, 10):
# print(i)
# print(i[len(i)-1]['content'])
# event_id += 1
# yield f"id: {event_id}\n"
# yield f"event: time-update\n"
# if(i[len(i)-1]['role']=='assistant'):
# yield "data: {}\n\n".format(str(i[len(i)-1]['content'].replace('\n\n',''))) # 每次生成一个数字就发送
# else:
# yield f"data: \n\n" # 每次生成一个数字就发送
# Streaming output.