| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196 |
- import io
- from PIL import Image
- import os
- from openai import OpenAI
- from conf import *
- from tos import HttpMethodType
- import time
- from openai import OpenAI
- import os
- import base64
- from PIL import Image
- def image_to_base64(image):
- # 将Image对象转换为BytesIO对象
- image_io = io.BytesIO()
- image.save(image_io, format='PNG')
- image_io.seek(0)
- # 使用base64编码
- image_base64 = base64.b64encode(image_io.read()).decode('utf-8')
- return f"data:image/png;base64,{image_base64}"
- def image_reader(image):
- """图片读取器,输出PIL.Image格式的图片"""
- if isinstance(image,str):
- if image.startswith("http"):
- return image
- else:
- image_path = image
- out_image = Image.open(image_path)
- elif isinstance(image,np.ndarray):
- out_image = Image.fromarray(image)
- else:
- out_image = image
- out_image=out_image.convert('RGB')
- base64_img=image_to_base64(out_image)
- return base64_img
- def get_lm_text(sys_prompt,user_prompt):
- completion = LMConfig.lm_client.chat.completions.create(
- messages = [
- {"role": "system", "content": sys_prompt},
- {"role": "user", "content": user_prompt},
- ],
- model=LMConfig.model,
- )
- return completion.choices[0].message.content
- ## 多模态的输入
- def compress_image(input_path, output_path):
- img = Image.open(input_path)
- current_size = os.path.getsize(input_path)
- # 粗略的估计压缩质量,也可以从常量开始,逐步减小压缩质量,直到文件大小小于目标大小
- image_quality = int(float(MMMConfig.target_size / current_size) * 100)
- img.save(output_path, optimize=True, quality=int(float(MMMConfig.target_size / current_size) * 100))
- # 如果压缩后文件大小仍然大于目标大小,则继续压缩
- # 压缩质量递减,直到文件大小小于目标大小
- while os.path.getsize(output_path) > MMMConfig.target_size:
- img = Image.open(output_path)
- image_quality -= 10
- if image_quality <= 0:
- break
- img.save(output_path, optimize=True, quality=image_quality)
- return image_quality
- def upload_tos(filename, tos_object_key):
- tos_client, inner_tos_client = MMMConfig.tos_client, MMMConfig.inner_tos_client
- try:
- # 将本地文件上传到目标桶中, filename为本地压缩后图片的完整路径
- tos_client.put_object_from_file(MMMConfig.tos_bucket_name, tos_object_key, filename)
- # 获取上传后预签名的 url
- return inner_tos_client.pre_signed_url(HttpMethodType.Http_Method_Get, MMMConfig.tos_bucket_name, tos_object_key)
- except Exception as e:
- if isinstance(e, tos.exceptions.TosClientError):
- # 操作失败,捕获客户端异常,一般情况为非法请求参数或网络异常
- print('fail with client error, message:{}, cause: {}'.format(e.message, e.cause))
- elif isinstance(e, tos.exceptions.TosServerError):
- # 操作失败,捕获服务端异常,可从返回信息中获取详细错误信息
- print('fail with server error, code: {}'.format(e.code))
- # request id 可定位具体问题,强烈建议日志中保存
- print('error with request id: {}'.format(e.request_id))
- print('error with message: {}'.format(e.message))
- print('error with http code: {}'.format(e.status_code))
- else:
- print('fail with unknown error: {}'.format(e))
- raise e
- def doubao_MMM_request(pre_signed_url_output, prompt):
- client = MMMConfig.client
-
- response = client.chat.completions.create(
- model=MMMConfig.model,
- messages=[{"role": "user","content": [
- {"type": "text", "text": prompt},
- {"type": "image_url", "image_url": {"url": pre_signed_url_output.signed_url}}
- ],
- }],
- temperature=0.8,
- extra_headers={"x-ark-beta-vision": "true"}
- )
- result = response.choices[0].message.content
- return result
- class llm_request:
- def __init__(self,api_key,base_url,model) -> None:
- self.api_key=api_key
- self.base_url=base_url
- self.model=model
- def llm_mm_request(self,usr_text,img,sys_text="You are a helpful assistant."):
- client = OpenAI(
- # 若没有配置环境变量,请用百炼API Key将下行替换为:api_key="sk-xxx"
- api_key=self.api_key,
- base_url=self.base_url
- )
- completion = client.chat.completions.create(
- model=self.model,#
- messages=[
- {
- "role": "system",
- "content": [{"type":"text","text": sys_text}]},
- {
- "role": "user",
- "content": [
- {
- "type": "image_url",
- # 需要注意,传入Base64,图像格式(即image/{format})需要与支持的图片列表中的Content Type保持一致。"f"是字符串格式化的方法。
- # PNG图像: f"data:image/png;base64,{base64_image}"
- # JPEG图像: f"data:image/jpeg;base64,{base64_image}"
- # WEBP图像: f"data:image/webp;base64,{base64_image}"
- "image_url": {"url": image_reader(img)},
- },
- {"type": "text", "text": usr_text},
- ],
- }
- ],
- temperature=1.5,
- top_p=0.85,
- presence_penalty=1.5,
- frequency_penalty=1.5,
- )
- return completion.choices[0].message.content
-
- def llm_text_request(self,text,sys_text="You are a helpful assistant."):
- client = OpenAI(
- # 若没有配置环境变量,请用百炼API Key将下行替换为:api_key="sk-xxx"
- api_key=self.api_key,
- base_url=self.base_url
- )
- completion = client.chat.completions.create(
- model=self.model,#
- messages=[
- {
- "role": "system",
- "content": sys_text},
- {
- "role": "user",
- "content": text,
- }
- ],
- temperature=0.9,
- )
- return completion.choices[0].message.content
-
- if __name__=="__main__":
- ##ali
- ky="sk-04b63960983445f980d85ff185a17876"
- baseurl="https://dashscope.aliyuncs.com/compatible-mode/v1"
- model="qwen-vl-max-latest"
- ##doubao
- # ky='817dff39-5586-4f9b-acba-55004167c0b1'
- # baseurl="https://ark.cn-beijing.volces.com/api/v3"
- # model="doubao-1-5-vision-pro-32k-250115"
- llm=llm_request(ky,baseurl,model)
- res1=llm.llm_mm_request("描述一下图片中的衣服","/data/data/Mia/product_env_project/gen_sellpoint/企业微信截图_17372766091671.png")
- print(res1)
- res2=llm.llm_text_request("你好!你是谁")
- print(res2)
|