Spaces:
Runtime error
Runtime error
Comming Soon
Browse files- app.py +3 -3
- app_pose.py +1 -53
app.py
CHANGED
|
@@ -4,7 +4,7 @@ import torch
|
|
| 4 |
from model import Model, ModelType
|
| 5 |
|
| 6 |
# from app_canny import create_demo as create_demo_canny
|
| 7 |
-
|
| 8 |
from app_text_to_video import create_demo as create_demo_text_to_video
|
| 9 |
from app_pix2pix_video import create_demo as create_demo_pix2pix_video
|
| 10 |
# from app_canny_db import create_demo as create_demo_canny_db
|
|
@@ -44,8 +44,8 @@ with gr.Blocks(css='style.css') as demo:
|
|
| 44 |
# pass
|
| 45 |
create_demo_pix2pix_video(model)
|
| 46 |
with gr.Tab('Pose Conditional'):
|
| 47 |
-
pass
|
| 48 |
-
|
| 49 |
with gr.Tab('Edge Conditional'):
|
| 50 |
pass
|
| 51 |
# create_demo_canny(model)
|
|
|
|
| 4 |
from model import Model, ModelType
|
| 5 |
|
| 6 |
# from app_canny import create_demo as create_demo_canny
|
| 7 |
+
from app_pose import create_demo as create_demo_pose
|
| 8 |
from app_text_to_video import create_demo as create_demo_text_to_video
|
| 9 |
from app_pix2pix_video import create_demo as create_demo_pix2pix_video
|
| 10 |
# from app_canny_db import create_demo as create_demo_canny_db
|
|
|
|
| 44 |
# pass
|
| 45 |
create_demo_pix2pix_video(model)
|
| 46 |
with gr.Tab('Pose Conditional'):
|
| 47 |
+
# pass
|
| 48 |
+
create_demo_pose(model)
|
| 49 |
with gr.Tab('Edge Conditional'):
|
| 50 |
pass
|
| 51 |
# create_demo_canny(model)
|
app_pose.py
CHANGED
|
@@ -3,61 +3,9 @@ import os
|
|
| 3 |
|
| 4 |
from model import Model
|
| 5 |
|
| 6 |
-
examples = [
|
| 7 |
-
['Motion 1', "A Robot is dancing in Sahara desert"],
|
| 8 |
-
['Motion 2', "A Robot is dancing in Sahara desert"],
|
| 9 |
-
['Motion 3', "A Robot is dancing in Sahara desert"],
|
| 10 |
-
['Motion 4', "A Robot is dancing in Sahara desert"],
|
| 11 |
-
['Motion 5', "A Robot is dancing in Sahara desert"],
|
| 12 |
-
]
|
| 13 |
-
|
| 14 |
def create_demo(model: Model):
|
| 15 |
with gr.Blocks() as demo:
|
| 16 |
with gr.Row():
|
| 17 |
-
gr.Markdown('## Text and Pose Conditional Video Generation')
|
| 18 |
-
|
| 19 |
-
with gr.Row():
|
| 20 |
-
gr.Markdown('### You must select one pose sequence shown below, or use the examples')
|
| 21 |
-
with gr.Column():
|
| 22 |
-
gallery_pose_sequence = gr.Gallery(label="Pose Sequence", value=[('__assets__/poses_skeleton_gifs/dance1.gif', "Motion 1"), ('__assets__/poses_skeleton_gifs/dance2.gif', "Motion 2"), ('__assets__/poses_skeleton_gifs/dance3.gif', "Motion 3"), ('__assets__/poses_skeleton_gifs/dance4.gif', "Motion 4"), ('__assets__/poses_skeleton_gifs/dance5.gif', "Motion 5")]).style(grid=[2], height="auto")
|
| 23 |
-
input_video_path = gr.Textbox(label="Pose Sequence",visible=False,value="Motion 1")
|
| 24 |
-
gr.Markdown("## Selection")
|
| 25 |
-
pose_sequence_selector = gr.Markdown('Pose Sequence: **Motion 1**')
|
| 26 |
-
with gr.Column():
|
| 27 |
-
prompt = gr.Textbox(label='Prompt')
|
| 28 |
-
run_button = gr.Button(label='Run')
|
| 29 |
-
with gr.Column():
|
| 30 |
-
# result = gr.Video(label="Generated Video")
|
| 31 |
-
result = gr.Image(label="Generated Video")
|
| 32 |
-
|
| 33 |
-
input_video_path.change(on_video_path_update, None, pose_sequence_selector)
|
| 34 |
-
gallery_pose_sequence.select(pose_gallery_callback, None, input_video_path)
|
| 35 |
-
inputs = [
|
| 36 |
-
input_video_path,
|
| 37 |
-
#pose_sequence,
|
| 38 |
-
prompt,
|
| 39 |
-
]
|
| 40 |
-
|
| 41 |
-
gr.Examples(examples=examples,
|
| 42 |
-
inputs=inputs,
|
| 43 |
-
outputs=result,
|
| 44 |
-
# cache_examples=os.getenv('SYSTEM') == 'spaces',
|
| 45 |
-
fn=model.process_controlnet_pose,
|
| 46 |
-
run_on_click=False,
|
| 47 |
-
)
|
| 48 |
-
#fn=process,
|
| 49 |
-
#)
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
run_button.click(fn=model.process_controlnet_pose,
|
| 53 |
-
inputs=inputs,
|
| 54 |
-
outputs=result,)
|
| 55 |
|
| 56 |
return demo
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
def on_video_path_update(evt: gr.EventData):
|
| 60 |
-
return f'Pose Sequence: **{evt._data}**'
|
| 61 |
-
|
| 62 |
-
def pose_gallery_callback(evt: gr.SelectData):
|
| 63 |
-
return f"Motion {evt.index+1}"
|
|
|
|
| 3 |
|
| 4 |
from model import Model
|
| 5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
def create_demo(model: Model):
|
| 7 |
with gr.Blocks() as demo:
|
| 8 |
with gr.Row():
|
| 9 |
+
gr.Markdown('## Text and Pose Conditional Video Generation (coming soon)')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
return demo
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|