Spaces:
Paused
Paused
added credits
Browse files
app.py
CHANGED
@@ -37,6 +37,9 @@ Community examples consist of both inference and training examples that have bee
|
|
37 |
<a href='https://github.com/huggingface/diffusers/tree/main/examples/community#speech-to-image' target='_blank'> Click here for more information about community pipelines </a>
|
38 |
</p>
|
39 |
"""
|
|
|
|
|
|
|
40 |
audio_input = gr.Audio(source="microphone", type="filepath")
|
41 |
image_output = gr.Image()
|
42 |
|
@@ -53,5 +56,5 @@ def speech_to_text(audio_sample):
|
|
53 |
|
54 |
return output.images[0]
|
55 |
|
56 |
-
demo = gr.Interface(fn=speech_to_text, inputs=audio_input, outputs=image_output, title=title, description=description)
|
57 |
demo.launch()
|
|
|
37 |
<a href='https://github.com/huggingface/diffusers/tree/main/examples/community#speech-to-image' target='_blank'> Click here for more information about community pipelines </a>
|
38 |
</p>
|
39 |
"""
|
40 |
+
article = """
|
41 |
+
<p style='text-align: center;'>Community pipeline by Mikail Duzenli • Gradio demo by Sylvain Filoni & Ahsen Khaliq<p>
|
42 |
+
"""
|
43 |
audio_input = gr.Audio(source="microphone", type="filepath")
|
44 |
image_output = gr.Image()
|
45 |
|
|
|
56 |
|
57 |
return output.images[0]
|
58 |
|
59 |
+
demo = gr.Interface(fn=speech_to_text, inputs=audio_input, outputs=image_output, title=title, description=description, article=article)
|
60 |
demo.launch()
|