| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259 |
- import io
- import os
- import time
- import base64
- import logging
- # 首先导入日志配置模块,确保日志系统在导入其他模块之前就配置好
- # 导入logger_config以自动配置日志系统(如果还没有配置)
- try:
- import logger_config
- except ImportError:
- pass
- # 使用标准logging
- # 使用__name__会得到"llm",确保日志能正确输出
- logger = logging.getLogger(__name__)
- # 确保logger正确配置 - 强制添加handler,确保日志能输出
- logger.setLevel(logging.INFO)
- # 为llm logger强制添加handler(直接输出,不依赖传播)
- # 这样即使gunicorn重置了根logger,llm的logger仍能输出
- # 清除已有handler,重新添加(应对gunicorn worker重启的情况)
- for h in logger.handlers[:]:
- logger.removeHandler(h)
- import sys
- handler = logging.StreamHandler(sys.stderr)
- formatter = logging.Formatter(
- '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
- datefmt='%Y-%m-%d %H:%M:%S'
- )
- handler.setFormatter(formatter)
- handler.setLevel(logging.INFO)
- logger.addHandler(handler)
- logger.propagate = False # 不传播,直接使用自己的handler
- # 现在导入其他模块
- from PIL import Image
- import numpy as np
- from openai import OpenAI
- from conf import *
- from tos import HttpMethodType
- import requests
- from requests.adapters import HTTPAdapter
- from urllib3.util.retry import Retry
- def image_to_base64(image):
- # 将Image对象转换为BytesIO对象
- image_io = io.BytesIO()
- image.save(image_io, format='JPEG', quality=95)
- image_io.seek(0)
- # 使用base64编码
- image_base64 = base64.b64encode(image_io.read()).decode('utf-8')
- return image_base64
- def download_image_with_retry(url, max_retries=3, timeout=30):
- """下载图片并重试机制"""
- session = requests.Session()
- retry_strategy = Retry(
- total=max_retries,
- backoff_factor=1,
- status_forcelist=[429, 500, 502, 503, 504],
- )
- adapter = HTTPAdapter(max_retries=retry_strategy)
- session.mount("http://", adapter)
- session.mount("https://", adapter)
-
- try:
- logger.info(f"正在下载图片: {url}")
- response = session.get(url, timeout=timeout)
- response.raise_for_status()
- logger.info("图片下载成功")
- return Image.open(io.BytesIO(response.content))
- except Exception as e:
- logger.error(f"下载图片失败: {e}")
- return None
- def image_reader(image):
- """图片读取器,输出PIL.Image格式的图片"""
- if isinstance(image,str):
- if image.startswith("http"):
- # 先尝试下载图片到本地处理
- out_image = download_image_with_retry(image)
- if out_image is None:
- # 如果下载失败,抛出异常
- raise Exception(f"无法下载图片: {image}")
- else:
- image_path = image
- out_image = Image.open(image_path)
- elif isinstance(image,np.ndarray):
- out_image = Image.fromarray(image)
- else:
- out_image = image
- out_image=out_image.convert('RGB')
- base64_img=image_to_base64(out_image)
- # 返回完整的data URI格式
- return f"data:image/jpeg;base64,{base64_img}"
- def get_lm_text(sys_prompt,user_prompt):
- completion = LMConfig.lm_client.chat.completions.create(
- messages = [
- {"role": "system", "content": sys_prompt},
- {"role": "user", "content": user_prompt},
- ],
- model=LMConfig.model,
- )
- return completion.choices[0].message.content
- ## 多模态的输入
- def compress_image(input_path, output_path):
- img = Image.open(input_path)
- current_size = os.path.getsize(input_path)
- # 粗略的估计压缩质量,也可以从常量开始,逐步减小压缩质量,直到文件大小小于目标大小
- image_quality = int(float(MMMConfig.target_size / current_size) * 100)
- img.save(output_path, optimize=True, quality=int(float(MMMConfig.target_size / current_size) * 100))
- # 如果压缩后文件大小仍然大于目标大小,则继续压缩
- # 压缩质量递减,直到文件大小小于目标大小
- while os.path.getsize(output_path) > MMMConfig.target_size:
- img = Image.open(output_path)
- image_quality -= 10
- if image_quality <= 0:
- break
- img.save(output_path, optimize=True, quality=image_quality)
- return image_quality
- def upload_tos(filename, tos_object_key):
- tos_client, inner_tos_client = MMMConfig.tos_client, MMMConfig.inner_tos_client
- try:
- # 将本地文件上传到目标桶中, filename为本地压缩后图片的完整路径
- tos_client.put_object_from_file(MMMConfig.tos_bucket_name, tos_object_key, filename)
- # 获取上传后预签名的 url
- return inner_tos_client.pre_signed_url(HttpMethodType.Http_Method_Get, MMMConfig.tos_bucket_name, tos_object_key)
- except Exception as e:
- if isinstance(e, tos.exceptions.TosClientError):
- # 操作失败,捕获客户端异常,一般情况为非法请求参数或网络异常
- logger.error('TOS客户端错误, message:{}, cause: {}'.format(e.message, e.cause))
- elif isinstance(e, tos.exceptions.TosServerError):
- # 操作失败,捕获服务端异常,可从返回信息中获取详细错误信息
- logger.error('TOS服务端错误, code: {}'.format(e.code))
- # request id 可定位具体问题,强烈建议日志中保存
- logger.error('error with request id: {}'.format(e.request_id))
- logger.error('error with message: {}'.format(e.message))
- logger.error('error with http code: {}'.format(e.status_code))
- else:
- logger.error('TOS上传失败,未知错误: {}'.format(e))
- raise e
- def doubao_MMM_request(pre_signed_url_output, prompt):
- client = MMMConfig.client
-
- response = client.chat.completions.create(
- model=MMMConfig.model,
- messages=[{"role": "user","content": [
- {"type": "text", "text": prompt},
- {"type": "image_url", "image_url": {"url": pre_signed_url_output.signed_url}}
- ],
- }],
- temperature=0.8,
- extra_headers={"x-ark-beta-vision": "true"}
- )
- result = response.choices[0].message.content
- return result
- class llm_request:
- def __init__(self,api_key,base_url,model) -> None:
- self.api_key=api_key
- self.base_url=base_url
- self.model=model
- def llm_mm_request(self,usr_text,img,sys_text="You are a helpful assistant."):
- client = OpenAI(
- # 若没有配置环境变量,请用百炼API Key将下行替换为:api_key="sk-xxx"
- api_key=self.api_key,
- base_url=self.base_url
- )
- completion = client.chat.completions.create(
- model=self.model,#
- messages=[
- {
- "role": "system",
- "content": [{"type":"text","text": sys_text}]},
- {
- "role": "user",
- "content": [
- {
- "type": "image_url",
- # 需要注意,传入Base64,图像格式(即image/{format})需要与支持的图片列表中的Content Type保持一致。"f"是字符串格式化的方法。
- # PNG图像: f"data:image/png;base64,{base64_image}"
- # JPEG图像: f"data:image/jpeg;base64,{base64_image}"
- # WEBP图像: f"data:image/webp;base64,{base64_image}"
- "image_url": {"url": image_reader(img)},
- },
- {"type": "text", "text": usr_text},
- ],
- }
- ],
- temperature=1.5,
- top_p=0.85,
- presence_penalty=1.5,
- frequency_penalty=1.5,
- timeout=120.0
- )
- return completion.choices[0].message.content
-
- def llm_text_request(self,text,sys_text="You are a helpful assistant."):
- client = OpenAI(
- # 若没有配置环境变量,请用百炼API Key将下行替换为:api_key="sk-xxx"
- api_key=self.api_key,
- base_url=self.base_url
- )
- completion = client.chat.completions.create(
- model=self.model,#
- messages=[
- {
- "role": "system",
- "content": sys_text},
- {
- "role": "user",
- "content": text,
- }
- ],
- temperature=0.9,
- timeout=120.0
- )
- return completion.choices[0].message.content
-
- if __name__=="__main__":
- ##ali
- ky="sk-04b63960983445f980d85ff185a17876"
- baseurl="https://dashscope.aliyuncs.com/compatible-mode/v1"
- model="qwen-vl-max-latest"
- ##doubao
- # ky='817dff39-5586-4f9b-acba-55004167c0b1'
- # baseurl="https://ark.cn-beijing.volces.com/api/v3"
- # model="doubao-1-5-vision-pro-32k-250115"
- llm=llm_request(ky,baseurl,model)
- res1=llm.llm_mm_request("描述一下图片中的衣服","/data/data/Mia/product_env_project/gen_sellpoint/企业微信截图_17372766091671.png")
- print(res1)
- res2=llm.llm_text_request("你好!你是谁")
- print(res2)
|