Spaces:
Paused
Paused
Update web-demos/hugging_face/app.py
Browse files- web-demos/hugging_face/app.py +48 -24
web-demos/hugging_face/app.py
CHANGED
|
@@ -297,40 +297,64 @@ def vos_tracking_video(video_state, interactive_state, mask_dropdown):
|
|
| 297 |
return video_output, video_state, interactive_state, operation_log, operation_log
|
| 298 |
|
| 299 |
# inpaint
|
| 300 |
-
def inpaint_video(video_state,
|
| 301 |
-
operation_log = [("",""), ("Inpainting
|
| 302 |
|
|
|
|
| 303 |
frames = np.asarray(video_state["origin_images"])
|
| 304 |
fps = video_state["fps"]
|
| 305 |
inpaint_masks = np.asarray(video_state["masks"])
|
|
|
|
|
|
|
|
|
|
| 306 |
if len(mask_dropdown) == 0:
|
| 307 |
mask_dropdown = ["mask_001"]
|
| 308 |
mask_dropdown.sort()
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 329 |
output_path = "./result/inpaint/{}".format(video_state["video_name"])
|
| 330 |
-
video_output = generate_video_from_frames(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 331 |
|
| 332 |
return video_output, operation_log, operation_log
|
| 333 |
-
print("Inpainting resolution:", inpainted_frames[0].shape)
|
| 334 |
|
| 335 |
|
| 336 |
# generate video after vos inference
|
|
|
|
| 297 |
return video_output, video_state, interactive_state, operation_log, operation_log
|
| 298 |
|
| 299 |
# inpaint
|
| 300 |
+
def inpaint_video(video_state, *_args):
|
| 301 |
+
operation_log = [("",""), ("Inpainting started in safe high-quality mode.","Normal")]
|
| 302 |
|
| 303 |
+
# Получаем кадры и маски
|
| 304 |
frames = np.asarray(video_state["origin_images"])
|
| 305 |
fps = video_state["fps"]
|
| 306 |
inpaint_masks = np.asarray(video_state["masks"])
|
| 307 |
+
|
| 308 |
+
# Маскировка (если не выбраны вручную)
|
| 309 |
+
mask_dropdown = _args[-1]
|
| 310 |
if len(mask_dropdown) == 0:
|
| 311 |
mask_dropdown = ["mask_001"]
|
| 312 |
mask_dropdown.sort()
|
| 313 |
+
inpaint_mask_numbers = [int(name.split("_")[1]) for name in mask_dropdown]
|
| 314 |
+
for i in range(1, np.max(inpaint_masks) + 1):
|
| 315 |
+
if i not in inpaint_mask_numbers:
|
| 316 |
+
inpaint_masks[inpaint_masks == i] = 0
|
| 317 |
+
|
| 318 |
+
# Фиксированные безопасные настройки
|
| 319 |
+
chunk_size = 20 # обрабатываем по 20 кадров
|
| 320 |
+
fixed_resize_ratio = 1.0 # не уменьшаем разрешение
|
| 321 |
+
fixed_dilate_radius = 4
|
| 322 |
+
fixed_raft_iter = 20
|
| 323 |
+
fixed_neighbor_length = 5
|
| 324 |
+
fixed_ref_stride = 10
|
| 325 |
+
|
| 326 |
+
total_len = len(frames)
|
| 327 |
+
inpainted_all = []
|
| 328 |
+
|
| 329 |
+
for start in range(0, total_len, chunk_size):
|
| 330 |
+
end = min(start + chunk_size, total_len)
|
| 331 |
+
print(f"Inpainting chunk {start} → {end}")
|
| 332 |
+
chunk_frames = frames[start:end]
|
| 333 |
+
chunk_masks = inpaint_masks[start:end]
|
| 334 |
+
|
| 335 |
+
# Инференс inpainting
|
| 336 |
+
chunk_result = model.baseinpainter.inpaint(
|
| 337 |
+
chunk_frames,
|
| 338 |
+
chunk_masks,
|
| 339 |
+
ratio=fixed_resize_ratio,
|
| 340 |
+
dilate_radius=fixed_dilate_radius,
|
| 341 |
+
raft_iter=fixed_raft_iter,
|
| 342 |
+
subvideo_length=chunk_size,
|
| 343 |
+
neighbor_length=fixed_neighbor_length,
|
| 344 |
+
ref_stride=fixed_ref_stride
|
| 345 |
+
)
|
| 346 |
+
inpainted_all.extend(chunk_result)
|
| 347 |
+
|
| 348 |
+
# Сохраняем с повышенным битрейтом
|
| 349 |
output_path = "./result/inpaint/{}".format(video_state["video_name"])
|
| 350 |
+
video_output = generate_video_from_frames(
|
| 351 |
+
inpainted_all,
|
| 352 |
+
output_path=output_path,
|
| 353 |
+
fps=fps,
|
| 354 |
+
bitrate="30M"
|
| 355 |
+
)
|
| 356 |
|
| 357 |
return video_output, operation_log, operation_log
|
|
|
|
| 358 |
|
| 359 |
|
| 360 |
# generate video after vos inference
|