custom_agent.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621
  1. # basic import
  2. from pathlib import Path
  3. import warnings, ast, re, os, uuid, logging, io, secrets, atexit, docker, inspect, asyncio, json
  4. from pprint import pprint
  5. from typing import Callable, Dict, Literal, Optional, Union, List, Tuple, Any
  6. #autogen import
  7. from autogen import ConversableAgent, Agent, UserProxyAgent
  8. from autogen.io.base import IOStream
  9. from autogen.formatting_utils import colored
  10. from autogen.coding.docker_commandline_code_executor import _wait_for_ready
  11. from autogen.coding.jupyter import DockerJupyterServer, JupyterCodeExecutor
  12. from autogen._pydantic import model_dump
  13. from autogen.coding.jupyter.base import JupyterConnectable, JupyterConnectionInfo
  14. from autogen.runtime_logging import logging_enabled, log_new_agent
  15. from autogen.coding.base import CodeBlock, IPythonCodeResult
  16. from autogen.coding.jupyter import JupyterCodeExecutor
  17. from autogen.coding.utils import silence_pip
  18. from qwen_function_call import Message, ASSISTANT, messages_process
  19. __all__ = ("fnc_agent",)
  20. class fnc_agent(ConversableAgent):
  21. DEFAULT_SYSTEM_MESSAGE = """You are a helpful AI assistant.
  22. Solve tasks using your coding and language skills.
  23. In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute.
  24. 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.
  25. 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.
  26. Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.
  27. When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.
  28. If you want the user to save the code in a file before executing it, put # filename: <filename> inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.
  29. If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
  30. When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.
  31. Reply "TERMINATE" in the end when everything is done.
  32. """
  33. DEFAULT_DESCRIPTION = "A helpful and general-purpose AI assistant that has strong function call skills"
  34. def __init__(
  35. self,
  36. name: str,
  37. system_message: Optional[str] = DEFAULT_SYSTEM_MESSAGE,
  38. llm_config: Optional[Union[Dict, Literal[False]]] = None,
  39. is_termination_msg: Optional[Callable[[Dict], bool]] = None,
  40. max_consecutive_auto_reply: Optional[int] = None,
  41. human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER",
  42. description: Optional[str] = None,
  43. **kwargs,
  44. ):
  45. """
  46. Args:
  47. name (str): agent name.
  48. system_message (str): system message for the ChatCompletion inference.
  49. Please override this attribute if you want to reprogram the agent.
  50. llm_config (dict or False or None): llm inference configuration.
  51. Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create)
  52. for available options.
  53. is_termination_msg (function): a function that takes a message in the form of a dictionary
  54. and returns a boolean value indicating if this received message is a termination message.
  55. The dict can contain the following keys: "content", "role", "name", "function_call".
  56. max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.
  57. default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).
  58. The limit only plays a role when human_input_mode is not "ALWAYS".
  59. **kwargs (dict): Please refer to other kwargs in
  60. [ConversableAgent](conversable_agent#__init__).
  61. """
  62. super().__init__(
  63. name,
  64. system_message,
  65. is_termination_msg,
  66. max_consecutive_auto_reply,
  67. human_input_mode,
  68. llm_config=llm_config,
  69. description=description,
  70. **kwargs,
  71. )
  72. if logging_enabled():
  73. log_new_agent(self, locals())
  74. # Update the provided description if None, and we are using the default system_message,
  75. # then use the default description.
  76. if description is None:
  77. if system_message == self.DEFAULT_SYSTEM_MESSAGE:
  78. self.description = self.DEFAULT_DESCRIPTION
  79. self.replace_reply_func(ConversableAgent.generate_tool_calls_reply, fnc_agent.generate_tool_calls_reply)
  80. self._reply_func_list = self._reply_func_list
  81. def decode_values(self, d):
  82. for key, value in d.items():
  83. if isinstance(value, list):
  84. d[key] = [v.encode('utf-8').decode('unicode_escape') if isinstance(v, str) else v for v in value]
  85. elif isinstance(value, str):
  86. result = value.encode('utf-8').decode('unicode_escape')
  87. d[key] = result
  88. elif isinstance(value, dict):
  89. self.decode_values(value)
  90. def generate_tool_calls_reply(
  91. self,
  92. messages: Optional[List[Dict]] = None,
  93. sender: Optional[Agent] = None,
  94. config: Optional[Any] = None,
  95. ) -> Tuple[bool, Union[Dict, None]]:
  96. """Generate a reply using tool call."""
  97. if config is None:
  98. config = self
  99. if messages is None:
  100. messages = self._oai_messages[sender]
  101. message = messages[-1]
  102. tool_returns = []
  103. for tool_call in message.get("tool_calls", []):
  104. function_call = tool_call.get("function", {})
  105. print(f'function_call*********************{function_call}')
  106. self.decode_values(function_call)
  107. print(f'function_call+++++++++++++++++++{function_call}')
  108. func = self._function_map.get(function_call.get("name", None), None)
  109. if inspect.iscoroutinefunction(func):
  110. try:
  111. # get the running loop if it was already created
  112. loop = asyncio.get_running_loop()
  113. close_loop = False
  114. except RuntimeError:
  115. # create a loop if there is no running loop
  116. loop = asyncio.new_event_loop()
  117. close_loop = True
  118. _, func_return = loop.run_until_complete(self.a_execute_function(function_call))
  119. if close_loop:
  120. loop.close()
  121. else:
  122. _, func_return = self.execute_function(function_call)
  123. content = func_return.get("content", "")
  124. if content is None:
  125. content = ""
  126. tool_call_id = tool_call.get("id", None)
  127. if tool_call_id is not None:
  128. tool_call_response = {
  129. "tool_call_id": tool_call_id,
  130. "role": "tool",
  131. "content": content,
  132. }
  133. else:
  134. # Do not include tool_call_id if it is not present.
  135. # This is to make the tool call object compatible with Mistral API.
  136. tool_call_response = {
  137. "role": "tool",
  138. "content": content,
  139. }
  140. tool_returns.append(tool_call_response)
  141. if tool_returns:
  142. return True, {
  143. "role": "tool",
  144. "tool_responses": tool_returns,
  145. "content": "\n\n".join([self._str_for_tool_response(tool_return) for tool_return in tool_returns]),
  146. }
  147. return False, None
  148. def _generate_oai_reply_from_client(self, llm_client, messages, cache) -> Union[str, Dict, None]:
  149. use_tool, tool_name, tool_args = None, None, None
  150. flag = False
  151. func = [s['function'] for s in self.llm_config['tools']]
  152. generate_cfg = {'stop': ['✿RESULT✿', '✿RETURN✿']}
  153. all_messages = []
  154. for message in messages:
  155. tool_responses = message.get("tool_responses", [])
  156. if tool_responses:
  157. all_messages += tool_responses
  158. flag = True
  159. # tool role on the parent message means the content is just concatenation of all of the tool_responses
  160. if message.get("role") != "tool":
  161. all_messages.append({key: message[key] for key in message if key != "tool_responses"})
  162. else:
  163. all_messages.append(message)
  164. if not flag:
  165. messages = messages_process().preprocess(messages, func=func)
  166. response = llm_client.create(
  167. context=messages[-1].pop("context", None), messages=messages, cache=None, agent=self)
  168. response_new = [Message(ASSISTANT, response.choices[0].message.content)]
  169. output = messages_process().post_process(response_new, generate_cfg=generate_cfg)
  170. for out in output:
  171. use_tool, tool_name, tool_args, _ = messages_process.detect_tool(out)
  172. if use_tool:
  173. extracted_response = messages_process.create_chat_completion_message("assistant", tool_calls=[
  174. {
  175. "name": tool_name,
  176. "arguments": tool_args,
  177. }])
  178. else:
  179. extracted_response = llm_client.extract_text_or_completion_object(response)[0]
  180. extracted_response = llm_client.extract_text_or_completion_object(response)[0]
  181. else:
  182. response = llm_client.create(
  183. context=messages[-1].pop("context", None), messages=all_messages, cache=cache, agent=self
  184. )
  185. extracted_response = llm_client.extract_text_or_completion_object(response)[0]
  186. if extracted_response is None:
  187. warnings.warn(f"Extracted_response from {response} is None.", UserWarning)
  188. return None
  189. # ensure function and tool calls will be accepted when sent back to the LLM
  190. if not isinstance(extracted_response, str) and hasattr(extracted_response, "model_dump"):
  191. extracted_response = model_dump(extracted_response)
  192. if isinstance(extracted_response, dict):
  193. if extracted_response.get("function_call"):
  194. extracted_response["function_call"]["name"] = self._normalize_name(
  195. extracted_response["function_call"]["name"]
  196. )
  197. for tool_call in extracted_response.get("tool_calls") or []:
  198. tool_call["function"]["name"] = self._normalize_name(tool_call["function"]["name"])
  199. # Remove id and type if they are not present.
  200. # This is to make the tool call object compatible with Mistral API.
  201. if tool_call.get("id") is None:
  202. tool_call.pop("id")
  203. if tool_call.get("type") is None:
  204. tool_call.pop("type")
  205. return extracted_response
  206. class jupyter_agent(ConversableAgent):
  207. def __init__(self, name: str, system_message: str | List | None = "You are a helpful AI Assistant.", is_termination_msg: Callable[[Dict], bool] | None = None, max_consecutive_auto_reply: int | None = None, human_input_mode: Literal['ALWAYS'] | Literal['NEVER'] | Literal['TERMINATE'] = "TERMINATE", function_map: Dict[str, Callable[..., Any]] | None = None, code_execution_config: Dict | Literal[False] = False, llm_config: Dict | None | Literal[False] = None, default_auto_reply: str | Dict = "", description: str | None = None, chat_messages: Dict[Agent, List[Dict]] | None = None):
  208. self.var_dict = {}
  209. super().__init__(
  210. name,
  211. system_message,
  212. is_termination_msg,
  213. max_consecutive_auto_reply,
  214. human_input_mode,
  215. function_map,
  216. code_execution_config,
  217. llm_config=llm_config,
  218. default_auto_reply=default_auto_reply,
  219. description=description,
  220. chat_messages=chat_messages,
  221. )
  222. self.replace_reply_func(ConversableAgent._generate_code_execution_reply_using_executor, jupyter_agent._generate_code_execution_reply_using_executor)
  223. def remove_comments(self, code):
  224. # 移除多行注释
  225. code = re.sub(r'"""[\s\S]*?"""', '', code)
  226. code = re.sub(r"'''[\s\S]*?'''", '', code)
  227. # 移除单行注释
  228. lines = code.split('\n')
  229. lines = [line.split('#')[0] for line in lines]
  230. lines = [i for i in lines if 'pip' not in i]
  231. return '\n'.join(lines)
  232. def parse_expr(self, expr):
  233. if isinstance(expr, ast.Str):
  234. return expr.s
  235. elif isinstance(expr, ast.Name):
  236. return self.var_dict.get(expr.id, expr.id)
  237. elif isinstance(expr, ast.BinOp) and isinstance(expr.op, ast.Add):
  238. left = self.parse_expr(expr.left)
  239. right = self.parse_expr(expr.right)
  240. return str(left) + str(right)
  241. return None
  242. def collect_variables(self, code):
  243. tree = ast.parse(code)
  244. for node in ast.walk(tree):
  245. if isinstance(node, ast.Assign):
  246. for target in node.targets:
  247. if isinstance(target, ast.Name):
  248. self.var_dict[target.id] = self.parse_expr(node.value)
  249. def parse_filename(self, match):
  250. try:
  251. # 尝试直接解析第一个参数
  252. expr = ast.parse(match.split(',')[0]).body[0].value
  253. return self.parse_expr(expr)
  254. except SyntaxError:
  255. # 如果失败,可能是因为有变量,尝试解析整个表达式
  256. try:
  257. expr = ast.parse(match).body[0].value
  258. if isinstance(expr, ast.Call):
  259. # 如果是函数调用,返回第一个参数
  260. if expr.args:
  261. return self.parse_expr(expr.args[0])
  262. elif expr.keywords:
  263. # 如果没有位置参数,查找关键字参数
  264. for keyword in expr.keywords:
  265. if keyword.arg in ['filename', 'fname']:
  266. return self.parse_expr(keyword.value)
  267. return None
  268. except:
  269. return None
  270. def run_detect(self, code_blocks):
  271. # 移除注释
  272. if not isinstance(code_blocks, list):
  273. code_blocks = [code_blocks]
  274. df_files_all = []
  275. img_files_all = []
  276. html_files_all = []
  277. for code_block in code_blocks:
  278. if code_block.language == 'python':
  279. code = code_block.code
  280. code = self.remove_comments(code)
  281. code = '\n'.join(line.rstrip() for line in code.split('\n'))
  282. # 收集变量赋值
  283. self.collect_variables(code)
  284. # 定义正则表达式模式
  285. df_pattern = r'\.to_(csv|excel|parquet|pickle|hdf|feather|sql)\s*\((.*?)\)'
  286. img_pattern = r'\.savefig\s*\((.*?)\)'
  287. html_pattern = r'\.to_html\s*\((.*?)\)'
  288. pyecharts_pattern = r'\.render\s*\((.*?)\)'
  289. # 查找所有匹配项
  290. df_matches = re.findall(df_pattern, code)
  291. img_matches = re.findall(img_pattern, code)
  292. html_matches = re.findall(html_pattern, code)
  293. pyecharts_matches = re.findall(pyecharts_pattern, code) # 新增的 pyecharts 匹配
  294. # 解析文件名
  295. df_files = [self.parse_filename(match[1]) for match in df_matches]
  296. img_files = [self.parse_filename(match) for match in img_matches]
  297. html_files = [self.parse_filename(match) for match in html_matches]
  298. html_files += [self.parse_filename(match) for match in pyecharts_matches]
  299. df_files = [f for f in df_files if f]
  300. img_files = [f for f in img_files if f]
  301. html_files = [f for f in html_files if f]
  302. df_files_all.extend(df_files)
  303. img_files_all.extend(img_files)
  304. html_files_all.extend(html_files)
  305. data = {'data':df_files_all, 'image':img_files_all, 'html':html_files_all}
  306. return data
  307. async def _generate_code_execution_reply_using_executor(
  308. self,
  309. messages: Optional[List[Dict]] = None,
  310. sender: Optional[Agent] = None,
  311. config: Optional[Union[Dict, Literal[False]]] = None,
  312. ):
  313. """Generate a reply using code executor."""
  314. iostream = IOStream.get_default()
  315. if config is not None:
  316. raise ValueError("config is not supported for _generate_code_execution_reply_using_executor.")
  317. if self._code_execution_config is False:
  318. return False, None
  319. if messages is None:
  320. messages = self._oai_messages[sender]
  321. last_n_messages = self._code_execution_config.get("last_n_messages", "auto")
  322. if not (isinstance(last_n_messages, (int, float)) and last_n_messages >= 0) and last_n_messages != "auto":
  323. raise ValueError("last_n_messages must be either a non-negative integer, or the string 'auto'.")
  324. num_messages_to_scan = last_n_messages
  325. if last_n_messages == "auto":
  326. # Find when the agent last spoke
  327. num_messages_to_scan = 0
  328. for message in reversed(messages):
  329. if "role" not in message:
  330. break
  331. elif message["role"] != "user":
  332. break
  333. else:
  334. num_messages_to_scan += 1
  335. num_messages_to_scan = min(len(messages), num_messages_to_scan)
  336. messages_to_scan = messages[-num_messages_to_scan:]
  337. # iterate through the last n messages in reverse
  338. # if code blocks are found, execute the code blocks and return the output
  339. # if no code blocks are found, continue
  340. for message in reversed(messages_to_scan):
  341. if not message["content"]:
  342. continue
  343. code_blocks = await asyncio.to_thread(self._code_executor.code_extractor.extract_code_blocks, message["content"])
  344. if len(code_blocks) == 0:
  345. continue
  346. num_code_blocks = len(code_blocks)
  347. if num_code_blocks == 1:
  348. iostream.print(
  349. colored(
  350. f"\n>>>>>>>> EXECUTING CODE BLOCK (inferred language is {code_blocks[0].language})...",
  351. "green",
  352. ),
  353. flush=True,
  354. )
  355. else:
  356. iostream.print(
  357. colored(
  358. f"\n>>>>>>>> EXECUTING {num_code_blocks} CODE BLOCKS (inferred languages are [{', '.join([x.language for x in code_blocks])}])...",
  359. "green",
  360. ),
  361. flush=True,
  362. )
  363. # found code blocks, execute code.
  364. code_result = await asyncio.to_thread(self._code_executor.execute_code_blocks, code_blocks)
  365. try:
  366. save_file = self.run_detect(code_blocks) if code_result.exit_code == 0 else None
  367. except Exception as e:
  368. save_file = None
  369. exitcode2str = "execution succeeded" if code_result.exit_code == 0 else "execution failed"
  370. if code_result.exit_code == 0:
  371. return True, f"exitcode: {code_result.exit_code} ({exitcode2str})\nCode output: {code_result.output} \n File output: {save_file}"
  372. else:
  373. return True, f"exitcode: {code_result.exit_code} ({exitcode2str})\nCode output: {code_result.output} \n"
  374. return False, None
  375. class CustomDockerJupyterServer(DockerJupyterServer):
  376. DEFAULT_DOCKERFILE = """FROM quay.io/jupyter/docker-stacks-foundation
  377. SHELL ["/bin/bash", "-o", "pipefail", "-c"]
  378. USER ${NB_UID}
  379. RUN mamba install --yes jupyter_kernel_gateway ipykernel && \
  380. mamba clean --all -f -y && \
  381. fix-permissions "${CONDA_DIR}" && \
  382. fix-permissions "/home/${NB_USER}"
  383. ENV TOKEN="UNSET"
  384. CMD python -m jupyter kernelgateway --KernelGatewayApp.ip=0.0.0.0 \
  385. --KernelGatewayApp.port=8888 \
  386. --KernelGatewayApp.auth_token="${TOKEN}" \
  387. --JupyterApp.answer_yes=true \
  388. --JupyterWebsocketPersonality.list_kernels=true
  389. EXPOSE 8888
  390. WORKDIR "${HOME}"
  391. """
  392. class GenerateToken:
  393. pass
  394. def __init__(
  395. self,
  396. *,
  397. custom_image_name: Optional[str] = None,
  398. container_name: Optional[str] = None,
  399. work_dir: Union[Path, str] = Path("."),
  400. bind_dir: Optional[Union[Path, str]] = None,
  401. auto_remove: bool = True,
  402. stop_container: bool = True,
  403. docker_env: Dict[str, str] = {},
  404. token: Union[str, GenerateToken] = GenerateToken(),
  405. ):
  406. """Start a Jupyter kernel gateway server in a Docker container.
  407. Args:
  408. custom_image_name (Optional[str], optional): Custom image to use. If this is None,
  409. then the bundled image will be built and used. The default image is based on
  410. quay.io/jupyter/docker-stacks-foundation and extended to include jupyter_kernel_gateway
  411. container_name (Optional[str], optional): Name of the container to start.
  412. A name will be generated if None.
  413. auto_remove (bool, optional): If true the Docker container will be deleted
  414. when it is stopped.
  415. stop_container (bool, optional): If true the container will be stopped,
  416. either by program exit or using the context manager
  417. docker_env (Dict[str, str], optional): Extra environment variables to pass
  418. to the running Docker container.
  419. token (Union[str, GenerateToken], optional): Token to use for authentication.
  420. If GenerateToken is used, a random token will be generated. Empty string
  421. will be unauthenticated.
  422. """
  423. if isinstance(work_dir, str):
  424. work_dir = Path(work_dir)
  425. work_dir.mkdir(parents=True, exist_ok=True)
  426. if bind_dir is None:
  427. bind_dir = work_dir
  428. elif isinstance(bind_dir, str):
  429. bind_dir = Path(bind_dir)
  430. # 设置 work_dir 目录及其父目录的权限
  431. os.chmod(work_dir, 0o777)
  432. if container_name is None:
  433. container_name = f"autogen-jupyterkernelgateway-{uuid.uuid4()}"
  434. client = docker.from_env()
  435. if custom_image_name is None:
  436. image_name = "autogen-jupyterkernelgateway"
  437. # Make sure the image exists
  438. try:
  439. client.images.get(image_name)
  440. except docker.errors.ImageNotFound:
  441. # Build the image
  442. # Get this script directory
  443. here = Path(__file__).parent
  444. dockerfile = io.BytesIO(self.DEFAULT_DOCKERFILE.encode("utf-8"))
  445. logging.info(f"Image {image_name} not found. Building it now.")
  446. client.images.build(path=here, fileobj=dockerfile, tag=image_name)
  447. logging.info(f"Image {image_name} built successfully.")
  448. else:
  449. image_name = custom_image_name
  450. # Check if the image exists
  451. try:
  452. client.images.get(image_name)
  453. except docker.errors.ImageNotFound:
  454. raise ValueError(f"Custom image {image_name} does not exist")
  455. if isinstance(token, DockerJupyterServer.GenerateToken):
  456. self._token = secrets.token_hex(32)
  457. else:
  458. self._token = token
  459. # Run the container
  460. env = {"TOKEN": self._token}
  461. env.update(docker_env)
  462. container = client.containers.run(
  463. image_name,
  464. detach=True,
  465. auto_remove=auto_remove,
  466. environment=env,
  467. publish_all_ports=True,
  468. name=container_name,
  469. volumes={str(bind_dir.resolve()): {"bind": "/workspace", "mode": "rw"}},
  470. working_dir="/workspace",
  471. )
  472. _wait_for_ready(container)
  473. container_ports = container.ports
  474. self._port = int(container_ports["8888/tcp"][0]["HostPort"])
  475. self._container_id = container.id
  476. def cleanup() -> None:
  477. try:
  478. inner_container = client.containers.get(container.id)
  479. inner_container.stop()
  480. except docker.errors.NotFound:
  481. pass
  482. atexit.unregister(cleanup)
  483. if stop_container:
  484. atexit.register(cleanup)
  485. self._cleanup_func = cleanup
  486. self._stop_container = stop_container
  487. class CustomJupyterCodeExecutor(JupyterCodeExecutor):
  488. def __init__(self, jupyter_server: JupyterConnectable | JupyterConnectionInfo, kernel_name: str = "python3", timeout: int = 60, output_dir: Path | str = ...):
  489. super().__init__(jupyter_server, kernel_name, timeout, output_dir)
  490. def execute_code_blocks(self, code_blocks: List[CodeBlock]) -> IPythonCodeResult:
  491. """(Experimental) Execute a list of code blocks and return the result.
  492. This method executes a list of code blocks as cells in the Jupyter kernel.
  493. See: https://jupyter-client.readthedocs.io/en/stable/messaging.html
  494. for the message protocol.
  495. Args:
  496. code_blocks (List[CodeBlock]): A list of code blocks to execute.
  497. Returns:
  498. IPythonCodeResult: The result of the code execution.
  499. """
  500. self._jupyter_kernel_client.wait_for_ready()
  501. outputs = []
  502. output_files = []
  503. for code_block in code_blocks:
  504. code = silence_pip(code_block.code, code_block.language)
  505. print(f'{code} \n')
  506. result = self._jupyter_kernel_client.execute(code, timeout_seconds=self._timeout)
  507. if result.is_ok:
  508. outputs.append(result.output)
  509. for data in result.data_items:
  510. if data.mime_type == "image/png":
  511. path = self._save_image(data.data)
  512. outputs.append(f"Image data saved to {path}")
  513. output_files.append(path)
  514. elif data.mime_type == "text/html":
  515. path = self._save_html(data.data)
  516. outputs.append(f"HTML data saved to {path}")
  517. output_files.append(path)
  518. else:
  519. outputs.append(json.dumps(data.data))
  520. else:
  521. error_message = result.output.split('\n')[0]
  522. return IPythonCodeResult(
  523. exit_code=1,
  524. output=error_message,
  525. )
  526. pattern = r"Error executing code: invalid syntax \((.+?), line \d+\)"
  527. result_output = "\n".join([re.sub(pattern, "", str(output)).strip('\n') for output in outputs])
  528. print(f'代码运行结果: \n \n{result_output} \n')
  529. return IPythonCodeResult(
  530. exit_code=0, output=result_output, output_files=output_files
  531. )
  532. class custom_proxy(UserProxyAgent, fnc_agent):
  533. # Default UserProxyAgent.description values, based on human_input_mode
  534. DEFAULT_USER_PROXY_AGENT_DESCRIPTIONS = {
  535. "ALWAYS": "An attentive HUMAN user who can answer questions about the task, and can perform tasks such as running Python code or inputting command line commands at a Linux terminal and reporting back the execution results.",
  536. "TERMINATE": "A user that can run Python code or input command line commands at a Linux terminal and report back the execution results.",
  537. "NEVER": "A computer terminal that performs no other action than running Python scripts (provided to it quoted in ```python code blocks), or sh shell scripts (provided to it quoted in ```sh code blocks).",
  538. }
  539. def __init__(
  540. self,
  541. name: str,
  542. is_termination_msg: Optional[Callable[[Dict], bool]] = None,
  543. max_consecutive_auto_reply: Optional[int] = None,
  544. human_input_mode: Literal["ALWAYS", "TERMINATE", "NEVER"] = "ALWAYS",
  545. function_map: Optional[Dict[str, Callable]] = None,
  546. code_execution_config: Union[Dict, Literal[False]] = {},
  547. default_auto_reply: Optional[Union[str, Dict, None]] = "",
  548. llm_config: Optional[Union[Dict, Literal[False]]] = False,
  549. system_message: Optional[Union[str, List]] = "",
  550. description: Optional[str] = None,
  551. ):
  552. super().__init__(
  553. name=name,
  554. system_message=system_message,
  555. is_termination_msg=is_termination_msg,
  556. max_consecutive_auto_reply=max_consecutive_auto_reply,
  557. human_input_mode=human_input_mode,
  558. function_map=function_map,
  559. code_execution_config=code_execution_config,
  560. llm_config=llm_config,
  561. default_auto_reply=default_auto_reply,
  562. description=(
  563. description if description is not None else self.DEFAULT_USER_PROXY_AGENT_DESCRIPTIONS[human_input_mode]
  564. ),
  565. )
  566. self.replace_reply_func(ConversableAgent.generate_tool_calls_reply, fnc_agent.generate_tool_calls_reply)
  567. if __name__ == '__main__':
  568. pass