anzorq commited on
Commit
8ef6a7b
·
1 Parent(s): c5921a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -3
app.py CHANGED
@@ -6,6 +6,8 @@ import numpy as np
6
  from scipy.io.wavfile import read
7
  import gradio as gr
8
 
 
 
9
 
10
  os.system('git clone https://github.com/hmartiro/riffusion-inference.git riffusion')
11
 
@@ -227,7 +229,7 @@ def on_submit(prompt_1, prompt_2, steps, num_iterations, feel, seed):
227
  if prompt_1 == "":
228
  return None, gr.update(value="First prompt is required.")
229
  if prompt_2 == "":
230
- return generate(prompt_1, steps, num_iterations, feel, seed), None
231
  else:
232
  return generate_riffuse(prompt_1, steps, num_iterations, feel, prompt_end=prompt_2, seed_start=seed), None
233
 
@@ -242,7 +244,27 @@ def on_num_iterations_change(n, prompt_2):
242
  total_length = 2.5 + 2.5 * n
243
  return gr.update(value=f"Total length: {total_length:.2f} seconds")
244
 
245
- with gr.Blocks() as app:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
  gr.Markdown("## Riffusion Demo")
247
  gr.Markdown("""Generate audio using the [Riffusion](https://huggingface.co/riffusion/riffusion-model-v1) model.<br>
248
  In single prompt mode you can generate up to ~1 minute of audio with smooth transitions between sections. (beta)<br>
@@ -267,14 +289,21 @@ with gr.Blocks() as app:
267
  with gr.Column():
268
  video = gr.Video()
269
 
 
 
 
 
 
270
  inputs = [prompt_1, prompt_2, steps, num_iterations, feel, seed]
271
- outputs = [video, info]
272
 
273
  num_iterations.change(on_num_iterations_change, [num_iterations, prompt_2], [info])
274
  prompt_1.submit(on_submit, inputs, outputs)
275
  prompt_2.submit(on_submit, inputs, outputs)
276
  btn_generate.click(on_submit, inputs, outputs)
277
 
 
 
278
  examples = gr.Examples(
279
  examples=[
280
  ["typing", "dance beat", "og_beat", 10],
 
6
  from scipy.io.wavfile import read
7
  import gradio as gr
8
 
9
+ from share_btn import community_icon_html, loading_icon_html, share_js
10
+
11
 
12
  os.system('git clone https://github.com/hmartiro/riffusion-inference.git riffusion')
13
 
 
229
  if prompt_1 == "":
230
  return None, gr.update(value="First prompt is required.")
231
  if prompt_2 == "":
232
+ return generate(prompt_1, steps, num_iterations, feel, seed), None, gr.update(visible=True)
233
  else:
234
  return generate_riffuse(prompt_1, steps, num_iterations, feel, prompt_end=prompt_2, seed_start=seed), None
235
 
 
244
  total_length = 2.5 + 2.5 * n
245
  return gr.update(value=f"Total length: {total_length:.2f} seconds")
246
 
247
+
248
+ css = '''
249
+ #share-btn-container {
250
+ display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
251
+ }
252
+ #share-btn {
253
+ all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0;
254
+ }
255
+ #share-btn * {
256
+ all: unset;
257
+ }
258
+ #share-btn-container div:nth-child(-n+2){
259
+ width: auto !important;
260
+ min-height: 0px !important;
261
+ }
262
+ #share-btn-container .wrap {
263
+ display: none !important;
264
+ }
265
+ '''
266
+
267
+ with gr.Blocks(css=css) as app:
268
  gr.Markdown("## Riffusion Demo")
269
  gr.Markdown("""Generate audio using the [Riffusion](https://huggingface.co/riffusion/riffusion-model-v1) model.<br>
270
  In single prompt mode you can generate up to ~1 minute of audio with smooth transitions between sections. (beta)<br>
 
289
  with gr.Column():
290
  video = gr.Video()
291
 
292
+ with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
293
+ community_icon = gr.HTML(community_icon_html, visible=False)
294
+ loading_icon = gr.HTML(loading_icon_html, visible=False)
295
+ share_button = gr.Button("Share to community", elem_id="share-btn", visible=False)
296
+
297
  inputs = [prompt_1, prompt_2, steps, num_iterations, feel, seed]
298
+ outputs = [video, info, share_group]
299
 
300
  num_iterations.change(on_num_iterations_change, [num_iterations, prompt_2], [info])
301
  prompt_1.submit(on_submit, inputs, outputs)
302
  prompt_2.submit(on_submit, inputs, outputs)
303
  btn_generate.click(on_submit, inputs, outputs)
304
 
305
+ share_button.click(None, [], [], _js=share_js)
306
+
307
  examples = gr.Examples(
308
  examples=[
309
  ["typing", "dance beat", "og_beat", 10],