tools.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462
  1. import os, asyncio
  2. import pandas as pd
  3. import numpy as np
  4. from datetime import datetime
  5. from typing_extensions import Annotated
  6. from agents import fnc_user_proxy, fnc_chatbot
  7. from config import STATIC_DIR, BASE_UPLOAD_DIRECTORY
  8. from typing import Tuple, Dict
  9. from concurrent.futures import ThreadPoolExecutor
  10. import gc
  11. from datetime import datetime, timedelta
  12. # @fnc_user_proxy.register_for_execution()
  13. # @fnc_chatbot.register_for_llm(description="A dictionary representing a weather with location, date and unit of temperature")
  14. # def get_current_weather(date: Annotated[str, "the date"], location: Annotated[str, "the location"],
  15. # unit: Annotated[str, "the unit of temperature"]) -> dict:
  16. # """Get the current weather in a given location"""
  17. # return {
  18. # "location": location,
  19. # "unit": unit,
  20. # "date": date,
  21. # "temperature": 23
  22. # }
  23. # @fnc_user_proxy.register_for_execution()
  24. # @fnc_chatbot.register_for_llm(description="change_currency")
  25. # def currency_calculator(base_amount: Annotated[float, "Amount of currency in base_currency"]) -> str:
  26. # base_currency = "USD"
  27. # quote_currency = "EUR"
  28. # if base_currency == quote_currency:
  29. # return f"{1.0 * base_amount} {quote_currency}"
  30. # elif base_currency == "USD" and quote_currency == "EUR":
  31. # return f"{1 / 1.1 * base_amount} {quote_currency}"
  32. # elif base_currency == "EUR" and quote_currency == "USD":
  33. # return f"{1.1 * base_amount} {quote_currency}"
  34. # 生成预售欠发货报表
  35. @fnc_user_proxy.register_for_execution()
  36. @fnc_chatbot.register_for_llm(description="只做'预售欠发货报表'的工具, 只有用户明确要做'预售欠发货报表'的时候才调用")
  37. def generate_presale_shipping(filepaths: Annotated[list, "文件路径"],) -> str:
  38. def process_sheet(file_path, sheet_name, date_column, merge_columns, df_gen_detail):
  39. try:
  40. df = pd.read_excel(file_path, sheet_name=sheet_name, engine='openpyxl')
  41. df['款色码'] = df['货号'].astype(str) + df['颜色编号'].astype(str) + '-' + df['尺码'].astype(str)
  42. df['款色号'] = df['货号'].astype(str) + df['颜色编号'].astype(str)
  43. freq = df['款色号'].value_counts()
  44. df[date_column] = pd.to_datetime(df[date_column])
  45. # 日期减去1天
  46. df[date_column] = df[date_column] - pd.Timedelta(days=1)
  47. date_range = pd.date_range(start=df[date_column].min(), end=df[date_column].max(), freq='D')
  48. result = df.pivot_table(index='款色码', columns=pd.Grouper(key=date_column, freq='D'), aggfunc='size').reset_index()
  49. result['款色号'] = result['款色码'].apply(lambda x: x.split('-')[0])
  50. result = result.join(df_gen_detail[merge_columns].set_index('款色号'), on='款色号', rsuffix='_new')
  51. result['款色数'] = result['款色号'].map(freq)
  52. result['细码数'] = result[[col for col in result.columns if col in date_range]].sum(axis=1)
  53. tmp = result.duplicated(subset=['款色号', '款色数'], keep='first')
  54. tmp_1 = result.duplicated(subset=['款色号', '生产在途'], keep='first')
  55. result.loc[tmp, ['款色号', '款色数']] = np.nan
  56. result.loc[tmp_1, ['生产在途']] = np.nan
  57. result.loc[result['生产部门'].isna(), '生产部门'] = '无在途'
  58. # 重新排列列顺序
  59. result = result.loc[:, ['款色号', '款色数', '款色码', '细码数'] + [col for col in result.columns if col not in ['款色号', '款色数', '款色码', '细码数']]]
  60. # 添加合计行
  61. total_row = pd.DataFrame([['合计', None, None, None] +
  62. [result[col].sum() if isinstance(col, pd.Timestamp) else None
  63. for col in result.columns[4:]]],
  64. columns=result.columns)
  65. result = pd.concat([result, total_row], ignore_index=True)
  66. # 提取需要的五列作为单独的表
  67. summary_columns = ['款色号', '款色数', '生产部门', '厂家&班组', '生产在途']
  68. summary_table = result[summary_columns].copy()
  69. summary_table = summary_table[~summary_table['款色号'].isna()] # 移除空值行
  70. summary_table = summary_table[summary_table['款色号']!='合计'] # 移除空值行
  71. # 重命名时间戳列
  72. for col in result.columns:
  73. if isinstance(col, pd.Timestamp):
  74. result.rename(columns={col: col.strftime('%Y-%m-%d')}, inplace=True)
  75. except Exception as e:
  76. return pd.DataFrame(), pd.DataFrame(), str(e)
  77. return result, summary_table, ''
  78. file_path = filepaths[0].strip().strip("'")
  79. if not file_path.startswith('/'):
  80. file_path = '/' + file_path
  81. if not os.path.exists(file_path):
  82. print(file_path)
  83. return "文件路径不存在,请重新上传"
  84. try:
  85. df_gen_detail = pd.read_excel(file_path, sheet_name='款色数', engine='openpyxl')
  86. merge_columns = ['款色号', '生产部门', '厂家&班组', '生产在途']
  87. directory = os.path.dirname(os.path.dirname(file_path))
  88. oversea_path = os.path.join(directory, 'oversea_result.xlsx')
  89. domestic_path = os.path.join(directory, 'domestic_result.xlsx')
  90. timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
  91. merged_path = os.path.join(directory, f'{timestamp}_merged_file.xlsx')
  92. result_overseas, summary_overseas, e_overseas = process_sheet(
  93. file_path, '海外明细', '预计发货日期', merge_columns, df_gen_detail)
  94. result_domestic, summary_domestic, e_domestic = process_sheet(
  95. file_path, '工厂预售发货跟进报表', '预计发货时间', merge_columns, df_gen_detail)
  96. error = e_overseas + '\n' + e_domestic
  97. if not result_overseas.empty and not result_domestic.empty:
  98. with pd.ExcelWriter(merged_path) as writer:
  99. result_overseas.to_excel(writer, sheet_name='海外结果', index=False)
  100. result_domestic.to_excel(writer, sheet_name='国内结果', index=False)
  101. summary_overseas.to_excel(writer, sheet_name='海外汇总', index=False)
  102. summary_domestic.to_excel(writer, sheet_name='国内汇总', index=False)
  103. return f"报表生成成功!{merged_path.replace(BASE_UPLOAD_DIRECTORY, STATIC_DIR)}"
  104. elif not result_domestic.empty:
  105. with pd.ExcelWriter(domestic_path) as writer:
  106. result_domestic.to_excel(writer, sheet_name='国内结果', index=False)
  107. summary_domestic.to_excel(writer, sheet_name='国内汇总', index=False)
  108. return f"仅国内报表生成成功!{domestic_path.replace(BASE_UPLOAD_DIRECTORY, STATIC_DIR)}"
  109. elif not result_overseas.empty:
  110. with pd.ExcelWriter(oversea_path) as writer:
  111. result_overseas.to_excel(writer, sheet_name='海外结果', index=False)
  112. summary_overseas.to_excel(writer, sheet_name='海外汇总', index=False)
  113. return f"仅海外报表生成成功!{oversea_path.replace(BASE_UPLOAD_DIRECTORY, STATIC_DIR)}"
  114. else:
  115. return error
  116. except Exception as e:
  117. return f"处理失败: {str(e)}"
  118. # 处理库存数据并生成汇总报表
  119. @fnc_user_proxy.register_for_execution()
  120. @fnc_chatbot.register_for_llm(description="只做'商品发货报表'的工具, 只有用户明确要做'商品发货报表'的时候才调用")
  121. def process_inventory_data(filepaths: Annotated[list, "文件路径"]) -> str:
  122. """
  123. 处理库存数据并生成汇总报表 - 性能优化版本
  124. Args:
  125. input_path (str): 输入Excel文件路径
  126. output_path (str): 输出Excel文件路径
  127. Returns:
  128. str: 输出文件路径
  129. """
  130. # 读取所有工作表时优化内存使用
  131. dtypes = {
  132. '数量': 'float32',
  133. '实裁数量': 'float32',
  134. '库存合计': 'float32',
  135. '退货库存数量': 'float32',
  136. '前置仓全渠道配单|数量': 'float32'
  137. }
  138. file_path = filepaths[0].strip().strip("'")
  139. if not file_path.startswith('/'):
  140. file_path = '/' + file_path
  141. if not os.path.exists(file_path):
  142. print(file_path)
  143. return "文件路径不存在,请重新上传"
  144. directory = os.path.dirname(os.path.dirname(file_path))
  145. output_path = os.path.join(directory, 'output.xlsx')
  146. sheets = {}
  147. with pd.ExcelFile(file_path) as xls:
  148. for sheet_name in xls.sheet_names:
  149. # 只读取必要的列
  150. if sheet_name == '1制单款号明细表':
  151. cols = ['制单号', '款号', '成品颜色编码', '尺码', '数量', '跟单组']
  152. elif sheet_name == '2外发加工单明细分析报表':
  153. cols = ['生产单号', '款号', '尺码', '实裁数量']
  154. elif sheet_name == '3成品外发入库明细分析报表':
  155. cols = ['制单号', '款号', '尺码', '数量']
  156. else:
  157. cols = None
  158. sheets[sheet_name] = pd.read_excel(
  159. xls,
  160. sheet_name=sheet_name,
  161. usecols=cols,
  162. dtype=dtypes,
  163. engine='openpyxl'
  164. )
  165. # 基础配置
  166. sizes = ['XXS', 'XS', 'S', 'M', 'L', 'XL', 'XXL']
  167. def create_pivot(df: pd.DataFrame, value_col: str, skc_col: str = 'SKC',
  168. size_col: str = '尺码', prefix: str = '') -> pd.DataFrame:
  169. """优化的透视表创建函数"""
  170. # 使用groupby替代pivot_table以提高性能
  171. grouped = df.groupby([skc_col, size_col])[value_col].sum().unstack(fill_value=0)
  172. grouped = grouped.reindex(columns=sizes, fill_value=0)
  173. if prefix:
  174. grouped = grouped.rename(columns={size: f'{prefix}{size}' for size in sizes})
  175. # 使用sum(axis=1)替代apply来提高性能
  176. grouped[f'{prefix}合计'] = grouped[[f'{prefix}{size}' for size in sizes]].sum(axis=1)
  177. return grouped
  178. def process_transit_data(sheets: Dict[str, pd.DataFrame]) -> pd.DataFrame:
  179. """优化的在途数据处理函数"""
  180. # 创建副本以避免SettingWithCopyWarning
  181. df_1 = sheets['1制单款号明细表'][['制单号', '款号', '成品颜色编码', '尺码', '数量', '跟单组']].copy()
  182. df_2 = sheets['2外发加工单明细分析报表'][['生产单号', '款号', '尺码', '实裁数量']].copy()
  183. df_3 = sheets['3成品外发入库明细分析报表'][['制单号', '款号', '尺码', '数量']].copy()
  184. df_4 = sheets['10在途单次'][['款色号', '要求交期']].copy()
  185. df_5 = sheets['11已完成单号'][['制单号']].copy()
  186. # 使用loc进行赋值操作以避免警告
  187. df_1.loc[:, 'SKC'] = df_1['款号'] + df_1['成品颜色编码']
  188. df_1.loc[:, '款色号'] = df_1['SKC']
  189. df_1.loc[:, '色号'] = df_1['成品颜色编码']
  190. df_1.loc[:, '下单数'] = df_1['数量']
  191. # 重命名和合并操作
  192. df_2.rename(columns={'生产单号': '制单号', '实裁数量': '裁数'}, inplace=True)
  193. df_3.rename(columns={'数量': '入仓'}, inplace=True)
  194. # 使用merge_asof或者高效的合并策略
  195. df_merged = pd.merge(df_1, df_2[['制单号', '款号', '尺码', '裁数']],
  196. on=['制单号', '款号', '尺码'], how='left')
  197. # 使用loc进行赋值
  198. df_merged.loc[:, '裁数'] = df_merged['裁数'].fillna(0)
  199. df_merged.loc[:, '裁数最终'] = df_merged['裁数'].where(df_merged['裁数'] != 0, df_merged['下单数'])
  200. df_merged = pd.merge(df_merged, df_3[['制单号', '款号', '尺码', '入仓']],
  201. on=['制单号', '款号', '尺码'], how='left')
  202. df_merged.loc[:, '入仓'] = df_merged['入仓'].fillna(0)
  203. df_merged.loc[:, '裁-入仓'] = (df_merged['裁数最终'] - df_merged['入仓']).clip(lower=0)
  204. df_merged = pd.merge(df_merged, df_4, on=['款色号'], how='left')
  205. df_merged.loc[:, '是否在途'] = df_merged['要求交期'].notna().map({True: '是', False: '否'})
  206. df_merged.loc[:, '是否完成'] = df_merged['制单号'].isin(df_5['制单号']).map({True: '是', False: '否'})
  207. return df_merged.rename(columns={'跟单组': '营业组别', '要求交期': '预计交货日期'})
  208. # 主要数据处理流程
  209. transit_data = process_transit_data(sheets)
  210. # 创建各种透视表
  211. pivot_tasks = [
  212. (sheets['6预售欠发'], '前置仓全渠道配单|数量', 'SKC', '尺码', '预售数量'),
  213. (transit_data[(transit_data['是否在途'] == '是') & (transit_data['是否完成'] == '否')],
  214. '裁-入仓', 'SKC', '尺码', '在途数')
  215. ]
  216. # 并行处理透视表
  217. presale_pivot = create_pivot(*pivot_tasks[0])
  218. transit_pivot = create_pivot(*pivot_tasks[1])
  219. # 计算缺货数
  220. shortage_pivot = pd.DataFrame(index=presale_pivot.index)
  221. for size in sizes:
  222. shortage_pivot[f'缺货数{size}'] = transit_pivot[f'在途数{size}'] - presale_pivot[f'预售数量{size}']
  223. shortage_pivot['缺货数合计'] = shortage_pivot[[f'缺货数{size}' for size in sizes]].sum(axis=1)
  224. # 处理库存数据
  225. inventory = sheets['4库存']
  226. qudao = sheets['7渠道分类'][['渠道编号', '渠道']]
  227. qudao = qudao.rename(columns={'渠道':'是否店铺'})
  228. inventory = inventory.drop(columns=['是否店铺'])
  229. inventory = pd.merge(inventory,qudao,on=['渠道编号'], how='left')
  230. inventory_filters = [
  231. ((inventory['是否店铺'].isin(['实物仓', '海外调入'])) &
  232. (inventory['状态'] == '已审核'), '补货仓'),
  233. ((inventory['业态'] == 'QC仓') &
  234. (inventory['是否店铺'].isin(['退货仓', '海外调入'])) &
  235. (inventory['状态'] == '已审核'), '退货仓'),
  236. ((inventory['业态'].isin(['加盟店铺', '零售店铺'])) &
  237. (inventory['是否店铺'] == '店铺') &
  238. (inventory['状态'] == '已审核'), '店铺'),
  239. ((inventory['业态'].str.contains('海外仓')) &
  240. (inventory['状态'] == '已审核'), '海外仓')
  241. ]
  242. # 并行处理库存透视表
  243. replenish_pivot = create_pivot(inventory[inventory_filters[0][0]], '库存合计', prefix=inventory_filters[0][1])
  244. return_pivot = create_pivot(inventory[inventory_filters[1][0]], '库存合计', prefix=inventory_filters[1][1])
  245. shop_pivot = create_pivot(inventory[inventory_filters[2][0]], '库存合计', prefix=inventory_filters[2][1])
  246. overseas_pivot = create_pivot(inventory[inventory_filters[3][0]], '库存合计', prefix=inventory_filters[3][1])
  247. # 处理退货在途
  248. return_transit_pivot = create_pivot(sheets['12退货在途'], '退货库存数量', prefix='退货在途')
  249. # 准备补充数据
  250. frc_table = sheets['9班组'][["款色", "厂家&班组"]].drop_duplicates(subset=["款色"]).set_index("款色")
  251. time_table = sheets['13货品资料'][["货号", '颜色编号', "年份", "季节"]]
  252. time_table['SKC'] = time_table['货号'].fillna('').astype(str) + time_table['颜色编号'].fillna('').astype(str)
  253. time_table.loc[:, '年份季节'] = time_table.loc[:, '年份'].fillna('').astype(str) + time_table.loc[:, '季节'].fillna('').astype(str)
  254. time_table = time_table[['SKC', '年份季节']].drop_duplicates(subset=["SKC"]).set_index("SKC")
  255. time_send_table = sheets['8工厂预售表'][["SKC", "预售发货时间备注"]].drop_duplicates(subset=["SKC"]).set_index("SKC")
  256. # 计算最终缺货
  257. final_shortage_pivot = pd.DataFrame(index=presale_pivot.index)
  258. for size in sizes:
  259. final_shortage_pivot[f'最终缺货{size}'] = (
  260. shortage_pivot[f'缺货数{size}'] +
  261. replenish_pivot[f'补货仓{size}'] +
  262. return_pivot[f'退货仓{size}'] +
  263. return_transit_pivot[f'退货在途{size}'] +
  264. shop_pivot[f'店铺{size}']
  265. )
  266. final_shortage_pivot['最终缺货合计'] = final_shortage_pivot[[f'最终缺货{size}' for size in sizes]].sum(axis=1)
  267. # 合并所有数据
  268. final_summary = pd.concat([
  269. presale_pivot, transit_pivot, frc_table, time_table, time_send_table,
  270. shortage_pivot, replenish_pivot, return_pivot, return_transit_pivot,
  271. shop_pivot, overseas_pivot, final_shortage_pivot
  272. ], axis=1)
  273. summary_2 = final_summary.groupby('年份季节')['预售数量合计'].sum().reset_index()
  274. # 保存结果
  275. with pd.ExcelWriter(output_path, engine='openpyxl') as writer:
  276. final_summary.to_excel(writer, sheet_name='汇总', index=True)
  277. summary_2.to_excel(writer, sheet_name='季节汇总', index=False)
  278. transit_data.to_excel(writer, sheet_name='在途库存', index=True)
  279. # 清理内存
  280. del sheets, transit_data, final_summary, summary_2
  281. gc.collect()
  282. return f"报表生成成功!{output_path}"
  283. # 生成调拨小表
  284. @fnc_user_proxy.register_for_execution()
  285. @fnc_chatbot.register_for_llm(description="只做'调拨小表'的工具, 只有用户明确要做'调拨小表'的时候才调用")
  286. def allocate(filepaths: Annotated[list, "文件路径"]):
  287. # 读取 Excel 文件
  288. file_path = filepaths[0].strip().strip("'")
  289. if not file_path.startswith('/'):
  290. file_path = '/' + file_path
  291. if not os.path.exists(file_path):
  292. print(file_path)
  293. return "文件路径不存在,请重新上传"
  294. directory = os.path.dirname(os.path.dirname(file_path))
  295. output_path = os.path.join(directory, 'output.xlsx')
  296. inventory_df = pd.read_excel(file_path, sheet_name='库存')
  297. inventory_df['总计库存'] = inventory_df['可用库存'].fillna(0).astype(int) + inventory_df['在途库存'].fillna(0).astype(int)
  298. sales_df = pd.read_excel(file_path, sheet_name='销售')
  299. # 将 '核销日期' 列转换为日期格式
  300. sales_df['核销日期'] = pd.to_datetime(sales_df['核销日期'])
  301. # 获取今天的日期
  302. today = datetime.today()
  303. # 计算上周的起始和结束日期
  304. last_week_start = today - timedelta(days=today.weekday() + 7)
  305. last_week_end = last_week_start + timedelta(days=6)
  306. # 计算上两周的起始和结束日期
  307. two_weeks_ago_start = today - timedelta(days=today.weekday() + 14)
  308. two_weeks_ago_end = last_week_start + timedelta(days=6)
  309. # 过滤出上周和上两周的销售数据
  310. last_week_sales = sales_df[(sales_df['核销日期'] >= last_week_start) & (sales_df['核销日期'] <= last_week_end)]
  311. two_weeks_ago_sales = sales_df[(sales_df['核销日期'] >= two_weeks_ago_start) & (sales_df['核销日期'] <= two_weeks_ago_end)]
  312. # 按 '货号', '颜色编号', '尺码' 分组,统计销售数量
  313. last_week_sales_grouped = last_week_sales.groupby(['货号', '颜色编号', '尺码']).size().reset_index(name='上周销')
  314. two_weeks_ago_sales_grouped = two_weeks_ago_sales.groupby(['货号', '颜色编号', '尺码']).size().reset_index(name='上两周销')
  315. print(last_week_sales_grouped)
  316. print(two_weeks_ago_sales_grouped)
  317. # 合并销售数据
  318. sales_summary = pd.merge(last_week_sales_grouped, two_weeks_ago_sales_grouped, on=['货号', '颜色编号', '尺码'], how='outer')
  319. # 按 '货号', '颜色编号', '尺码' 分组,取第一个值
  320. inventory_summary = inventory_df.groupby(['货号', '颜色编号', '尺码']).first().reset_index()
  321. # 合并库存和销售数据
  322. final_df = pd.merge(inventory_summary, sales_summary, on=['货号', '颜色编号','尺码'], how='outer')
  323. # 创建SKC字段
  324. final_df['SKC'] = final_df['货号'] + final_df['颜色编号']
  325. print(final_df.columns)
  326. # 创建数据透视表
  327. pivot_df = pd.pivot_table(
  328. final_df,
  329. index=['年份', '季节', '类别', 'SKC', '渠道编号', '渠道简称'],
  330. columns='尺码',
  331. values=['上周销', '上两周销', '总计库存'],
  332. aggfunc='sum',
  333. fill_value=None
  334. )
  335. # 计算每个SKC的所有尺码的总和
  336. skc_totals = final_df.groupby(['年份', '季节', '类别', 'SKC', '渠道编号', '渠道简称']).agg({
  337. '上周销': 'sum',
  338. '上两周销': 'sum',
  339. '总计库存': 'sum'
  340. })
  341. # 添加总和列到透视表
  342. pivot_df[('上周销', '总和')] = skc_totals['上周销']
  343. pivot_df[('上两周销', '总和')] = skc_totals['上两周销']
  344. pivot_df[('总计库存', '总和')] = skc_totals['总计库存']
  345. # 重置索引并保存
  346. pivot_df = pivot_df.reset_index()
  347. # 保存结果到新的 Excel 文件
  348. with pd.ExcelWriter(output_path) as writer:
  349. pivot_df.to_excel(writer, sheet_name='透视表')
  350. final_df.to_excel(writer, sheet_name='汇总表')
  351. return f"报表生成成功!{output_path}"
  352. # 验证用户请求的工具是否可用
  353. async def validate_use_tools(prompt):
  354. messages = [{'role': 'user', 'content': prompt}]
  355. response = await fnc_chatbot.a_generate_reply(messages=messages)
  356. print(f'tool_response******{response}')
  357. if isinstance(response,str):
  358. return False
  359. if response['tool_calls']:
  360. return True
  361. else:
  362. return False
  363. # 生成用户请求的结果
  364. def generate_result(prompt):
  365. msg = fnc_user_proxy.initiate_chat(fnc_chatbot, message=prompt, max_turns=4)
  366. result = []
  367. for item in msg.chat_history:
  368. if item.get('role') == 'tool':
  369. result.append(item.get('content'))
  370. final_result = [x.split('!')[1] for x in result if '报表生成成功!' in x]
  371. if final_result:
  372. return '报表生成成功', final_result
  373. return '报表生成失败', None
  374. if __name__ == '__main__':
  375. # x = generate_presale_shipping(['/tokenicin/xkwas/wwsad/xsasd/data-analyze/test/工厂预售发货跟进报表_test.xlsx'])
  376. # x = generate_result('帮我做一张调拨小表' + '\n file_path: /tokenicin/xkwas/wwsad/xsasd/data-analyze/data/销售与库存12.25.xlsx')
  377. # print(x)
  378. x = process_inventory_data(filepaths=['/tokenicin/xkwas/wwsad/xsasd/data-analyze/data/预售欠发上传数据2.7.xlsx'])
  379. print(x)
  380. # x = validate_use_tools('帮我做一张预售欠发货表' + '\n file_path: /workspace/wangdalin/upload/工厂预售发货跟进报表_test.xlsx')
  381. # print(x)