File size: 1,205 Bytes
d710f4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
from PIL import Image
import requests
from transformers import Blip2Processor, Blip2ForConditionalGeneration
import torch
from peft import LoraConfig, get_peft_model, PeftModel

config = LoraConfig(
    r=16,
    lora_alpha=32,
    lora_dropout=0.05,
    bias="none",
)

device = "cuda" if torch.cuda.is_available() else "cpu"
model_name = "./blip2_fakenews_all"

# device_map = {"": 0}
device_map = "auto"
processor = Blip2Processor.from_pretrained("blip2")
model = Blip2ForConditionalGeneration.from_pretrained("blip2",
                                                      load_in_8bit=True,
                                                      device_map=device_map)
model = PeftModel.from_pretrained(model, model_name)
model = get_peft_model(model, config)


url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)

prompt = "Question: Is this real or fake? Answer: real. Question: Why? "
inputs = processor(images=image, text=prompt, return_tensors="pt").to(device, torch.float16)

generated_ids = model.generate(**inputs)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
print(generated_text)