Spaces:
Runtime error
Runtime error
File size: 3,275 Bytes
c3d0293 9ae6df4 c3d0293 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
import torch
from motion.visual_api import Visualize
import moviepy.editor as mpy
import os, sys
import time
import json
import imageio
import argparse
def interface(prompt, mode="cadm", render_mode="pyrender", out_size=1024, tada_role=None):
os.makedirs("results/motion", exist_ok=True)
os.makedirs("results/joints", exist_ok=True)
os.makedirs("results/smpls", exist_ok=True)
name = prompt.replace("/", "_").replace(" ", "_").replace(",", "_").replace("#", "_").replace("|", "_").replace(".npy", "").replace(".txt", "").replace(".csv", "").replace(".", "").replace("'", "_")
name = "_".join(name.split("_")[:25])
out_path = os.path.join("results/motion", name + ".mp4")
gif_path = os.path.join("results/motion", name + ".gif")
joint_path = os.path.join("results/joints", name + ".npy")
smpl_path = os.path.join("results/smpls", name + ".npy")
'''
prompt 输入为 length, prompt, 如果只输入 prompt, length 默认为 196
mode 指不同的模型
'''
assert mode in ["cadm", "cadm-augment", "mdm"]
assert render_mode in ["joints", "pyrender_fast", "pyrender_slow"]
path = None
with open("motion/path.json", "r") as f:
json_dict = json.load(f)
t1 = time.time()
kargs = {
"mode":mode,
"device":"cuda" if torch.cuda.is_available() else "cpu",
"rotate":0,
"condition":"text",
"smpl_path":json_dict["smpl_path"],
"skip_steps":0,
"path":json_dict,
"tada_base":json_dict["tada_base"],
"tada_role":tada_role
}
visual = Visualize(**kargs)
t2 = time.time()
output = visual.predict(prompt, path, render_mode, joint_path, smpl_path)
t3 = time.time()
if render_mode == "joints":
pics = visual.joints_process(output, prompt, out_size, out_size)
elif render_mode.startswith("pyrender"):
meshes, _ = visual.get_mesh(output)
pics = visual.pyrender_process(meshes, out_size, out_size)
vid = mpy.ImageSequenceClip([x[:, :, :] for x in pics], fps=20)
vid.write_videofile(out_path, remove_temp=True)
imageio.mimsave(gif_path, pics, duration= 1000 / 20, loop=0)
t4 = time.time()
cost_init = t2 - t1
cost_infer = t3 - t2
cost_render = t4 - t3
print("initial model cost time: %.4f, infer and fit cost time: %.4f, render cost time: %.4f, total cost time: %.4f"%(cost_init, cost_infer, cost_render, t4 - t1))
return out_path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='visualize demo')
############################ basic_setings ########################
parser.add_argument('--prompt', type=str, default="120, A person walks forward and does a handstand.")
parser.add_argument('--mode', type=str, default="cadm", choices=['cadm', 'cadm-augment', "mdm"], help="choose model")
parser.add_argument("--render_mode", default="pyrender_slow", type=str, choices=["pyrender_slow", "pyrender_fast", "joints"])
parser.add_argument("--size", default=1024, type=int)
parser.add_argument("--tada_role", default=None, type=str)
opt = parser.parse_args()
out_path = interface(opt.prompt, mode=opt.mode, render_mode=opt.render_mode, out_size=opt.size, tada_role=opt.tada_role) |