jiangchengchengNLP's picture
Upload 3 files
bcd2e51 verified
import os
os.environ['HF_ENDPOINT']="https://hf-mirror.com"
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor,AutoConfig
from qwen_vl_utils import process_vision_info
from quantization import apply_AWQ
import torch
#load base_model
model_path="/root/autodl-tmp/Model/hub/Qwen/Qwen2___5-VL-7B-Instruct"
model_name="Qwen/Qwen2.5-VL-7B-Instruct"
config=AutoConfig.from_pretrained(model_path)
import subprocess
#although the bfloat si in the config, but model still initializes based on float32. So it make few minutes to load model to cpu.
model =Qwen2_5_VLForConditionalGeneration(config)
model.to(torch.bfloat16)
# 获取 nvidia-smi 的输出
output = get_nvidia_smi_output()
print(output)
#print("model has loaded")
processor = AutoProcessor.from_pretrained(model_path)
#apply AWQ
quantization_config_path="./weights/AWQ_config.json"
quantization_weight_path="./weights/AWQ_weights.pth"
res=apply_AWQ(model,quantization_config_path,quantization_weight_path=quantization_weight_path)
# 获取 nvidia-smi 的输出
output = get_nvidia_smi_output()
print(output)
#inference
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
},
{"type": "text", "text": "What does this photo show ?"},
],
}
]
# Preparation for inference
text = processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
text=[text],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt",
)
device="cuda"
inputs = inputs.to(device)
model.to(device)
model.eval()
# 获取 nvidia-smi 的输出
output = get_nvidia_smi_output()
print(output)
# Inference: Generation of the output
with torch.no_grad():
generated_ids = model.generate(**inputs, max_new_tokens=128)
generated_ids_trimmed = [
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)