Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,10 @@
|
|
1 |
import spaces
|
2 |
-
print('spaces.....................................')
|
3 |
import os
|
4 |
import argparse
|
5 |
from functools import partial
|
6 |
|
7 |
import torch
|
8 |
import random
|
9 |
-
|
10 |
-
# os.system('export CUDA_HOME=$(dirname $(dirname $(which nvcc)))')
|
11 |
-
# os.system('pip install -r running_requirements.txt')
|
12 |
-
|
13 |
import gradio as gr
|
14 |
from src import MindOmni
|
15 |
|
@@ -38,7 +33,7 @@ def build_model(args):
|
|
38 |
return MindOmni_model
|
39 |
|
40 |
|
41 |
-
@spaces.GPU
|
42 |
def understand_func(
|
43 |
MindOmni_model, text, do_sample, temperature,
|
44 |
max_new_tokens, input_llm_images):
|
@@ -50,7 +45,7 @@ def understand_func(
|
|
50 |
return answer
|
51 |
|
52 |
|
53 |
-
@spaces.GPU
|
54 |
def generate_func(
|
55 |
MindOmni_model, text, use_cot, height, width, guidance_scale, inference_steps, seed, separate_cfg_infer, offload_model, max_input_image_size, randomize_seed, save_images, do_sample, temperature, max_new_tokens, input_llm_images, only_understand):
|
56 |
if input_llm_images is not None and not isinstance(input_llm_images, list):
|
@@ -59,14 +54,12 @@ def generate_func(
|
|
59 |
if randomize_seed:
|
60 |
seed = random.randint(0, 10000000)
|
61 |
|
62 |
-
|
63 |
-
with open('/tmp/.unhold', 'w') as f:
|
64 |
-
f.write('')
|
65 |
output, prompt_ = MindOmni_model.generate_image(
|
66 |
height, width, guidance_scale, inference_steps, separate_cfg_infer, offload_model, seed, max_input_image_size,
|
67 |
text, NEGATIVE_PROMPT, input_llm_images, do_sample, temperature, max_new_tokens, only_understand, use_cot=use_cot)
|
68 |
-
|
69 |
-
|
70 |
img = output[0]
|
71 |
|
72 |
if save_images:
|
|
|
1 |
import spaces
|
|
|
2 |
import os
|
3 |
import argparse
|
4 |
from functools import partial
|
5 |
|
6 |
import torch
|
7 |
import random
|
|
|
|
|
|
|
|
|
8 |
import gradio as gr
|
9 |
from src import MindOmni
|
10 |
|
|
|
33 |
return MindOmni_model
|
34 |
|
35 |
|
36 |
+
@spaces.GPU
|
37 |
def understand_func(
|
38 |
MindOmni_model, text, do_sample, temperature,
|
39 |
max_new_tokens, input_llm_images):
|
|
|
45 |
return answer
|
46 |
|
47 |
|
48 |
+
@spaces.GPU
|
49 |
def generate_func(
|
50 |
MindOmni_model, text, use_cot, height, width, guidance_scale, inference_steps, seed, separate_cfg_infer, offload_model, max_input_image_size, randomize_seed, save_images, do_sample, temperature, max_new_tokens, input_llm_images, only_understand):
|
51 |
if input_llm_images is not None and not isinstance(input_llm_images, list):
|
|
|
54 |
if randomize_seed:
|
55 |
seed = random.randint(0, 10000000)
|
56 |
|
57 |
+
print(f'Generate image prompt: {text}')
|
|
|
|
|
58 |
output, prompt_ = MindOmni_model.generate_image(
|
59 |
height, width, guidance_scale, inference_steps, separate_cfg_infer, offload_model, seed, max_input_image_size,
|
60 |
text, NEGATIVE_PROMPT, input_llm_images, do_sample, temperature, max_new_tokens, only_understand, use_cot=use_cot)
|
61 |
+
print('Generation finished.')
|
62 |
+
|
63 |
img = output[0]
|
64 |
|
65 |
if save_images:
|