ved1beta
commited on
Commit
·
fa73fe7
1
Parent(s):
cb872ce
appready
Browse files
app.py
CHANGED
@@ -1,115 +1,93 @@
|
|
1 |
import subprocess
|
2 |
-
|
3 |
-
# subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
4 |
|
5 |
import gradio as gr
|
6 |
from PIL import Image
|
7 |
from transformers import AutoModelForCausalLM
|
8 |
from transformers import AutoProcessor
|
9 |
from transformers import TextIteratorStreamer
|
10 |
-
import time
|
11 |
from threading import Thread
|
12 |
import torch
|
13 |
import spaces
|
14 |
|
15 |
-
model_id = "microsoft/Phi-3-vision-128k-instruct"
|
16 |
model = AutoModelForCausalLM.from_pretrained(
|
17 |
model_id,
|
18 |
device_map="cpu",
|
19 |
trust_remote_code=True,
|
20 |
-
torch_dtype=torch.
|
21 |
-
|
|
|
22 |
)
|
23 |
-
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
|
24 |
|
25 |
PLACEHOLDER = """
|
26 |
-
<div style="padding: 30px; text-align: center;
|
27 |
-
<
|
28 |
-
<
|
29 |
-
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Phi-3-Vision is a 4.2B parameter multimodal model that brings together language and vision capabilities.</p>
|
30 |
</div>
|
31 |
"""
|
32 |
|
33 |
@spaces.CPU
|
34 |
def bot_streaming(message, history):
|
35 |
-
print(f'message is - {message}')
|
36 |
-
print(f'history is - {history}')
|
37 |
-
if message["files"]:
|
38 |
-
if type(message["files"][-1]) == dict:
|
39 |
-
image = message["files"][-1]["path"]
|
40 |
-
else:
|
41 |
-
image = message["files"][-1]
|
42 |
-
else:
|
43 |
-
for hist in history:
|
44 |
-
if type(hist[0]) == tuple:
|
45 |
-
image = hist[0][0]
|
46 |
try:
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
51 |
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
conversation[0]['content'] = f"<|image_1|>\n{user}"
|
61 |
-
conversation.extend([{"role": "assistant", "content": assistant}])
|
62 |
-
flag=False
|
63 |
-
continue
|
64 |
-
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
|
65 |
-
|
66 |
-
if len(history) == 0:
|
67 |
conversation.append({"role": "user", "content": f"<|image_1|>\n{message['text']}"})
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
inputs = processor(prompt, image, return_tensors="pt")
|
74 |
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
-
|
86 |
-
|
87 |
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
yield buffer
|
92 |
|
93 |
-
|
94 |
-
|
95 |
-
with gr.Blocks(fill_height=True, ) as demo:
|
96 |
gr.ChatInterface(
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
{"text": "I want to find a seat close to windows, where can I sit?", "files": ["./office1.jpg"]},
|
104 |
-
],
|
105 |
-
description="Try the [Phi3-Vision model](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct) from Microsoft. Upload an image and start chatting about it, or simply try one of the examples below. If you won't upload an image, you will receive an error. This is not the official demo.",
|
106 |
-
stop_btn="Stop Generation",
|
107 |
-
multimodal=True,
|
108 |
-
textbox=chat_input,
|
109 |
-
chatbot=chatbot,
|
110 |
-
cache_examples=False,
|
111 |
-
examples_per_page=3
|
112 |
)
|
113 |
|
114 |
-
demo.queue()
|
115 |
-
demo.launch(debug=
|
|
|
1 |
import subprocess
|
2 |
+
subprocess.run('pip install bitsandbytes', shell=True)
|
|
|
3 |
|
4 |
import gradio as gr
|
5 |
from PIL import Image
|
6 |
from transformers import AutoModelForCausalLM
|
7 |
from transformers import AutoProcessor
|
8 |
from transformers import TextIteratorStreamer
|
|
|
9 |
from threading import Thread
|
10 |
import torch
|
11 |
import spaces
|
12 |
|
13 |
+
model_id = "microsoft/Phi-3-vision-128k-instruct"
|
14 |
model = AutoModelForCausalLM.from_pretrained(
|
15 |
model_id,
|
16 |
device_map="cpu",
|
17 |
trust_remote_code=True,
|
18 |
+
torch_dtype=torch.float16, # Reduced precision
|
19 |
+
load_in_8bit=True, # 8-bit quantization
|
20 |
+
_attn_implementation="eager"
|
21 |
)
|
22 |
+
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
|
23 |
|
24 |
PLACEHOLDER = """
|
25 |
+
<div style="padding: 30px; text-align: center;">
|
26 |
+
<h1>Phi3 Vision Model</h1>
|
27 |
+
<p>Upload an image and ask a question</p>
|
|
|
28 |
</div>
|
29 |
"""
|
30 |
|
31 |
@spaces.CPU
|
32 |
def bot_streaming(message, history):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
try:
|
34 |
+
# Image extraction
|
35 |
+
image = (message["files"][-1]["path"] if isinstance(message["files"][-1], dict) else message["files"][-1]) if message["files"] else None
|
36 |
+
|
37 |
+
if not image:
|
38 |
+
raise ValueError("No image uploaded")
|
39 |
|
40 |
+
# Conversation preparation
|
41 |
+
conversation = []
|
42 |
+
for user, assistant in history:
|
43 |
+
conversation.extend([
|
44 |
+
{"role": "user", "content": user},
|
45 |
+
{"role": "assistant", "content": assistant or ""}
|
46 |
+
])
|
47 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
conversation.append({"role": "user", "content": f"<|image_1|>\n{message['text']}"})
|
49 |
+
|
50 |
+
# Prompt and image processing
|
51 |
+
prompt = processor.tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
52 |
+
image = Image.open(image)
|
53 |
+
inputs = processor(prompt, image, return_tensors="pt")
|
|
|
54 |
|
55 |
+
# Streaming generation with reduced tokens
|
56 |
+
streamer = TextIteratorStreamer(processor, skip_special_tokens=True, skip_prompt=True)
|
57 |
+
generation_kwargs = dict(
|
58 |
+
inputs,
|
59 |
+
streamer=streamer,
|
60 |
+
max_new_tokens=256, # Reduced token generation
|
61 |
+
do_sample=False,
|
62 |
+
temperature=0.1,
|
63 |
+
eos_token_id=processor.tokenizer.eos_token_id
|
64 |
+
)
|
65 |
+
|
66 |
+
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
67 |
+
thread.start()
|
68 |
+
|
69 |
+
buffer = ""
|
70 |
+
for new_text in streamer:
|
71 |
+
buffer += new_text
|
72 |
+
yield buffer
|
73 |
|
74 |
+
except Exception as e:
|
75 |
+
yield f"Error: {str(e)}"
|
76 |
|
77 |
+
# Gradio Interface Configuration
|
78 |
+
chatbot = gr.Chatbot(scale=1, placeholder=PLACEHOLDER)
|
79 |
+
chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Upload image and ask a question")
|
|
|
80 |
|
81 |
+
demo = gr.Blocks()
|
82 |
+
with demo:
|
|
|
83 |
gr.ChatInterface(
|
84 |
+
fn=bot_streaming,
|
85 |
+
title="Phi3 Vision 128K",
|
86 |
+
description="Multimodal AI Vision Model",
|
87 |
+
multimodal=True,
|
88 |
+
textbox=chat_input,
|
89 |
+
chatbot=chatbot
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
)
|
91 |
|
92 |
+
demo.queue(max_size=10) # Limit queue size
|
93 |
+
demo.launch(debug=False, show_error=True)
|