Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
import time
|
2 |
from threading import Thread
|
3 |
from llava_llama3.serve.cli import chat_llava
|
4 |
from llava_llama3.model.builder import load_pretrained_model
|
@@ -8,6 +7,7 @@ from PIL import Image
|
|
8 |
import argparse
|
9 |
import spaces
|
10 |
import os
|
|
|
11 |
|
12 |
root_path = os.path.dirname(os.path.abspath(__file__))
|
13 |
print(root_path)
|
@@ -29,7 +29,8 @@ tokenizer, llava_model, image_processor, context_len = load_pretrained_model(
|
|
29 |
'llava_llama3',
|
30 |
args.load_8bit,
|
31 |
args.load_4bit,
|
32 |
-
device=args.device
|
|
|
33 |
|
34 |
@spaces.GPU
|
35 |
def bot_streaming(message, history):
|
@@ -53,8 +54,9 @@ def bot_streaming(message, history):
|
|
53 |
if image is None:
|
54 |
raise gr.Error("You need to upload an image for LLaVA to work.")
|
55 |
|
56 |
-
# Load the image
|
57 |
-
|
|
|
58 |
|
59 |
# Generate the prompt for the model
|
60 |
prompt = message['text']
|
@@ -62,8 +64,6 @@ def bot_streaming(message, history):
|
|
62 |
# Use a streamer to generate the output in a streaming fashion
|
63 |
streamer = []
|
64 |
|
65 |
-
image_file = image if isinstance(image, str) else image.filename
|
66 |
-
|
67 |
# Define a function to call chat_llava in a separate thread
|
68 |
def generate_output():
|
69 |
output = chat_llava(
|
@@ -98,20 +98,19 @@ def bot_streaming(message, history):
|
|
98 |
yield buffer
|
99 |
|
100 |
|
101 |
-
chatbot=gr.Chatbot(scale=1)
|
102 |
chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
|
103 |
-
with gr.Blocks(fill_height=True
|
104 |
gr.ChatInterface(
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
chatbot=chatbot,
|
114 |
)
|
115 |
|
116 |
demo.queue(api_open=False)
|
117 |
-
demo.launch(show_api=False, share=False)
|
|
|
|
|
1 |
from threading import Thread
|
2 |
from llava_llama3.serve.cli import chat_llava
|
3 |
from llava_llama3.model.builder import load_pretrained_model
|
|
|
7 |
import argparse
|
8 |
import spaces
|
9 |
import os
|
10 |
+
import time
|
11 |
|
12 |
root_path = os.path.dirname(os.path.abspath(__file__))
|
13 |
print(root_path)
|
|
|
29 |
'llava_llama3',
|
30 |
args.load_8bit,
|
31 |
args.load_4bit,
|
32 |
+
device=args.device
|
33 |
+
)
|
34 |
|
35 |
@spaces.GPU
|
36 |
def bot_streaming(message, history):
|
|
|
54 |
if image is None:
|
55 |
raise gr.Error("You need to upload an image for LLaVA to work.")
|
56 |
|
57 |
+
# Load the image if it's a path, otherwise use the existing PIL image
|
58 |
+
if isinstance(image, str):
|
59 |
+
image = Image.open(image).convert('RGB')
|
60 |
|
61 |
# Generate the prompt for the model
|
62 |
prompt = message['text']
|
|
|
64 |
# Use a streamer to generate the output in a streaming fashion
|
65 |
streamer = []
|
66 |
|
|
|
|
|
67 |
# Define a function to call chat_llava in a separate thread
|
68 |
def generate_output():
|
69 |
output = chat_llava(
|
|
|
98 |
yield buffer
|
99 |
|
100 |
|
101 |
+
chatbot = gr.Chatbot(scale=1)
|
102 |
chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
|
103 |
+
with gr.Blocks(fill_height=True) as demo:
|
104 |
gr.ChatInterface(
|
105 |
+
fn=bot_streaming,
|
106 |
+
title="FinLLaVA",
|
107 |
+
examples=[{"text": "What is on the flower?", "files": ["./bee.jpg"]},
|
108 |
+
{"text": "How to make this pastry?", "files": ["./baklava.png"]}],
|
109 |
+
stop_btn="Stop Generation",
|
110 |
+
multimodal=True,
|
111 |
+
textbox=chat_input,
|
112 |
+
chatbot=chatbot,
|
|
|
113 |
)
|
114 |
|
115 |
demo.queue(api_open=False)
|
116 |
+
demo.launch(show_api=False, share=False)
|