import autogen, json from config import llm_config, llm_config_ds from prompt import get_summary_system search_answer = autogen.AssistantAgent( name="summary_content", llm_config=llm_config_ds, system_message=get_summary_system, code_execution_config=False, human_input_mode="NEVER", ) async def get_content_summary(question, res_info, final_data): try: data = list(final_data.items()) final_chunks = {} prompt = "问题:\n" + question + '\n资料信息:\n' + res_info answer = await search_answer.a_generate_reply(messages=[{'role':'user', 'content': prompt}]) print(answer) print(type(answer)) if '```json' in answer: answer = answer.split('```json')[1].split('```')[0] answer = json.loads(answer) elif '{' in answer: answer = answer.split('{')[1].split('}')[0] answer = json.loads("{" + answer + "}") print(answer) for k,v in answer.items(): final_chunks[data[int(k)-1][0]] = v search_str = "\n".join([f"[{i+1}]: \n {k}: \n{s} \n" for i, (k,s) in enumerate(final_chunks.items())]) return final_chunks, search_str except Exception as e: print(f'重写报错: {str(e)}') return {}, res_info if __name__ == '__main__': import asyncio answer = asyncio.run(search_answer.a_generate_reply(messages=[{'role':'user', 'content': '英国的首都在哪里'}])) print(answer) pass