Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -18,7 +18,6 @@ import yt_dlp
|
|
18 |
import ffmpeg
|
19 |
import subprocess
|
20 |
import sys
|
21 |
-
import pickle
|
22 |
import io
|
23 |
import wave
|
24 |
from datetime import datetime
|
@@ -175,79 +174,43 @@ def infer(name, path, index, vc_input, vc_upload, tts_text, tts_voice, f0_up_key
|
|
175 |
info = traceback.format_exc()
|
176 |
print(info)
|
177 |
return info, (None, None)
|
|
|
178 |
def load_model():
|
179 |
categories = []
|
180 |
-
|
181 |
-
|
182 |
-
with open("/weights/folder_info.json", "r", encoding="utf-8") as f:
|
183 |
-
folder_info = json.load(f)
|
184 |
-
except Exception as e:
|
185 |
-
raise RuntimeError(f"Failed to load folder_info.json: {e}")
|
186 |
-
|
187 |
-
# カテゴリの処理
|
188 |
for category_name, category_info in folder_info.items():
|
189 |
-
if not category_info
|
190 |
continue
|
191 |
-
|
192 |
-
|
193 |
-
category_folder = category_info.get('folder_path', '')
|
194 |
models = []
|
195 |
-
|
196 |
print(f"Creating category {category_title}...")
|
197 |
-
|
198 |
-
|
199 |
-
# model_info.jsonの読み込み
|
200 |
-
try:
|
201 |
-
with open(model_info_path, "r", encoding="utf-8") as f:
|
202 |
-
models_info = json.load(f)
|
203 |
-
except Exception as e:
|
204 |
-
print(f"Failed to load model_info.json for category {category_title}: {e}")
|
205 |
-
continue
|
206 |
-
|
207 |
-
# モデルの処理
|
208 |
for character_name, info in models_info.items():
|
209 |
-
if not info
|
210 |
continue
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
# モデルインデックスの存在確認
|
219 |
-
if info.get('feature_retrieval_library', '') == "None":
|
220 |
model_index = None
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
# 作者情報の処理
|
226 |
-
if model_author not in authors and not ("/" in model_author or "&" in model_author):
|
227 |
authors.append(model_author)
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
continue
|
235 |
-
|
236 |
-
# モデルのロード
|
237 |
-
try:
|
238 |
-
cpt = torch.load(model_path, map_location=torch.device('cpu'))
|
239 |
-
model_version = cpt.get("version", "v1")
|
240 |
-
print(f"Indexed model {model_title} by {model_author} ({model_version})")
|
241 |
-
models.append((character_name, model_title, model_author, model_cover, model_version, model_path, model_index))
|
242 |
-
del cpt
|
243 |
-
except Exception as e:
|
244 |
-
print(f"Error loading model {model_title} from {model_path}: {e}")
|
245 |
-
continue
|
246 |
-
|
247 |
-
# カテゴリにモデルを追加
|
248 |
categories.append([category_title, category_folder, models])
|
249 |
-
|
250 |
return categories
|
|
|
251 |
def cut_vocal_and_inst(url, audio_provider, split_model):
|
252 |
if url != "":
|
253 |
if not os.path.exists("dl_audio"):
|
@@ -474,7 +437,8 @@ if __name__ == '__main__':
|
|
474 |
"#### <center>Model creators:\n"
|
475 |
f"<center>{authStr}\n"
|
476 |
)
|
|
|
477 |
if limitation is True:
|
478 |
-
app.queue(max_size=20, api_open=config.api).launch(
|
479 |
else:
|
480 |
-
app.queue(max_size=20, api_open=config.api).launch(
|
|
|
18 |
import ffmpeg
|
19 |
import subprocess
|
20 |
import sys
|
|
|
21 |
import io
|
22 |
import wave
|
23 |
from datetime import datetime
|
|
|
174 |
info = traceback.format_exc()
|
175 |
print(info)
|
176 |
return info, (None, None)
|
177 |
+
|
178 |
def load_model():
|
179 |
categories = []
|
180 |
+
with open("/weights/folder_info.json", "r", encoding="utf-8") as f:
|
181 |
+
folder_info = json.load(f)
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
for category_name, category_info in folder_info.items():
|
183 |
+
if not category_info['enable']:
|
184 |
continue
|
185 |
+
category_title = category_info['title']
|
186 |
+
category_folder = category_info['folder_path']
|
|
|
187 |
models = []
|
|
|
188 |
print(f"Creating category {category_title}...")
|
189 |
+
with open(f"/weights/{category_folder}/model_info.json", "r", encoding="utf-8") as f:
|
190 |
+
models_info = json.load(f)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
for character_name, info in models_info.items():
|
192 |
+
if not info['enable']:
|
193 |
continue
|
194 |
+
model_title = info['title']
|
195 |
+
model_name = info['model_path']
|
196 |
+
model_author = info.get("author", None)
|
197 |
+
model_cover = f"/weights/{category_folder}/{character_name}/{info['cover']}"
|
198 |
+
model_index = f"/weights/{category_folder}/{character_name}/{info['feature_retrieval_library']}"
|
199 |
+
if info['feature_retrieval_library'] == "None":
|
|
|
|
|
|
|
200 |
model_index = None
|
201 |
+
if model_index:
|
202 |
+
assert os.path.exists(model_index), f"Model {model_title} failed to load index."
|
203 |
+
if not (model_author in authors or "/" in model_author or "&" in model_author):
|
|
|
|
|
|
|
204 |
authors.append(model_author)
|
205 |
+
model_path = f"/weights/{category_folder}/{character_name}/{model_name}"
|
206 |
+
cpt = torch.load(f"/weights/{category_folder}/{character_name}/{model_name}", map_location="cpu")
|
207 |
+
model_version = cpt.get("version", "v1")
|
208 |
+
print(f"Indexed model {model_title} by {model_author} ({model_version})")
|
209 |
+
models.append((character_name, model_title, model_author, model_cover, model_version, model_path, model_index))
|
210 |
+
del cpt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
211 |
categories.append([category_title, category_folder, models])
|
|
|
212 |
return categories
|
213 |
+
|
214 |
def cut_vocal_and_inst(url, audio_provider, split_model):
|
215 |
if url != "":
|
216 |
if not os.path.exists("dl_audio"):
|
|
|
437 |
"#### <center>Model creators:\n"
|
438 |
f"<center>{authStr}\n"
|
439 |
)
|
440 |
+
|
441 |
if limitation is True:
|
442 |
+
app.queue(max_size=20, api_open=config.api).launch(allowed_paths=["/"], )
|
443 |
else:
|
444 |
+
app.queue(max_size=20, api_open=config.api).launch(allowed_paths=["/"], share=False)
|