Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -47,18 +47,13 @@ ocr_model = ocr_predictor(
|
|
| 47 |
|
| 48 |
|
| 49 |
if torch.cuda.is_available():
|
| 50 |
-
# Load the processor (no need to move to GPU)
|
| 51 |
processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
|
| 52 |
-
|
| 53 |
-
# Load the model and move it to GPU
|
| 54 |
vision_model = LlavaNextForConditionalGeneration.from_pretrained(
|
| 55 |
"llava-hf/llava-v1.6-mistral-7b-hf",
|
| 56 |
torch_dtype=torch.float16,
|
| 57 |
low_cpu_mem_usage=True,
|
| 58 |
load_in_4bit=True,
|
| 59 |
-
)
|
| 60 |
-
|
| 61 |
-
|
| 62 |
|
| 63 |
|
| 64 |
@spaces.GPU
|
|
|
|
| 47 |
|
| 48 |
|
| 49 |
if torch.cuda.is_available():
|
|
|
|
| 50 |
processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
|
|
|
|
|
|
|
| 51 |
vision_model = LlavaNextForConditionalGeneration.from_pretrained(
|
| 52 |
"llava-hf/llava-v1.6-mistral-7b-hf",
|
| 53 |
torch_dtype=torch.float16,
|
| 54 |
low_cpu_mem_usage=True,
|
| 55 |
load_in_4bit=True,
|
| 56 |
+
)
|
|
|
|
|
|
|
| 57 |
|
| 58 |
|
| 59 |
@spaces.GPU
|