Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -10,6 +10,7 @@ import random
|
|
10 |
import numpy as np
|
11 |
import re
|
12 |
import requests
|
|
|
13 |
|
14 |
# Import necessary functions and classes
|
15 |
from utils import load_t5, load_clap
|
@@ -108,6 +109,7 @@ def load_model(model_name, device, model_url=None):
|
|
108 |
print(f"Loading {model_size} model: {model_name}")
|
109 |
|
110 |
try:
|
|
|
111 |
global_model = build_model(model_size).to(device)
|
112 |
state_dict = torch.load(model_path, map_location=device, weights_only=True)
|
113 |
global_model.load_state_dict(state_dict['ema'], strict=False)
|
@@ -115,7 +117,9 @@ def load_model(model_name, device, model_url=None):
|
|
115 |
|
116 |
global_model.model_path = model_path
|
117 |
current_model_name = model_name
|
118 |
-
|
|
|
|
|
119 |
except Exception as e:
|
120 |
global_model = None
|
121 |
current_model_name = None
|
@@ -126,6 +130,7 @@ def load_resources(device):
|
|
126 |
global global_t5, global_clap, global_vae, global_vocoder, global_diffusion
|
127 |
|
128 |
try:
|
|
|
129 |
print("Loading T5 and CLAP models...")
|
130 |
global_t5 = load_t5(device, max_length=256)
|
131 |
global_clap = load_clap(device, max_length=256)
|
@@ -137,13 +142,15 @@ def load_resources(device):
|
|
137 |
print("Initializing diffusion...")
|
138 |
global_diffusion = RF()
|
139 |
|
140 |
-
|
141 |
-
|
|
|
|
|
142 |
except Exception as e:
|
143 |
print(f"Error loading resources: {str(e)}")
|
144 |
return f"Failed to load resources. Error: {str(e)}"
|
145 |
|
146 |
-
def generate_music(prompt, seed, cfg_scale, steps, duration, device, batch_size=
|
147 |
global global_model, global_t5, global_clap, global_vae, global_vocoder, global_diffusion
|
148 |
|
149 |
if global_model is None:
|
@@ -181,7 +188,7 @@ def generate_music(prompt, seed, cfg_scale, steps, duration, device, batch_size=
|
|
181 |
img, conds = prepare(global_t5, global_clap, init_noise, conds_txt)
|
182 |
_, unconds = prepare(global_t5, global_clap, init_noise, unconds_txt)
|
183 |
|
184 |
-
# Implement batching for
|
185 |
images = []
|
186 |
for batch_start in range(0, img.shape[0], batch_size):
|
187 |
batch_end = min(batch_start + batch_size, img.shape[0])
|
|
|
10 |
import numpy as np
|
11 |
import re
|
12 |
import requests
|
13 |
+
import time
|
14 |
|
15 |
# Import necessary functions and classes
|
16 |
from utils import load_t5, load_clap
|
|
|
109 |
print(f"Loading {model_size} model: {model_name}")
|
110 |
|
111 |
try:
|
112 |
+
start_time = time.time()
|
113 |
global_model = build_model(model_size).to(device)
|
114 |
state_dict = torch.load(model_path, map_location=device, weights_only=True)
|
115 |
global_model.load_state_dict(state_dict['ema'], strict=False)
|
|
|
117 |
|
118 |
global_model.model_path = model_path
|
119 |
current_model_name = model_name
|
120 |
+
end_time = time.time()
|
121 |
+
load_time = end_time - start_time
|
122 |
+
return f"Successfully loaded model: {model_name} in {load_time:.2f} seconds"
|
123 |
except Exception as e:
|
124 |
global_model = None
|
125 |
current_model_name = None
|
|
|
130 |
global global_t5, global_clap, global_vae, global_vocoder, global_diffusion
|
131 |
|
132 |
try:
|
133 |
+
start_time = time.time()
|
134 |
print("Loading T5 and CLAP models...")
|
135 |
global_t5 = load_t5(device, max_length=256)
|
136 |
global_clap = load_clap(device, max_length=256)
|
|
|
142 |
print("Initializing diffusion...")
|
143 |
global_diffusion = RF()
|
144 |
|
145 |
+
end_time = time.time()
|
146 |
+
load_time = end_time - start_time
|
147 |
+
print(f"Base resources loaded successfully in {load_time:.2f} seconds!")
|
148 |
+
return f"Resources loaded successfully in {load_time:.2f} seconds!"
|
149 |
except Exception as e:
|
150 |
print(f"Error loading resources: {str(e)}")
|
151 |
return f"Failed to load resources. Error: {str(e)}"
|
152 |
|
153 |
+
def generate_music(prompt, seed, cfg_scale, steps, duration, device, batch_size=1, progress=gr.Progress()):
|
154 |
global global_model, global_t5, global_clap, global_vae, global_vocoder, global_diffusion
|
155 |
|
156 |
if global_model is None:
|
|
|
188 |
img, conds = prepare(global_t5, global_clap, init_noise, conds_txt)
|
189 |
_, unconds = prepare(global_t5, global_clap, init_noise, unconds_txt)
|
190 |
|
191 |
+
# Implement batching for inference
|
192 |
images = []
|
193 |
for batch_start in range(0, img.shape[0], batch_size):
|
194 |
batch_end = min(batch_start + batch_size, img.shape[0])
|