| import os | |
| os.system("pip install gradio==2.4.6") | |
| import gradio as gr | |
| title = "FastSpeech2" | |
| description = "Gradio Demo for fairseq S^2: A Scalable and Integrable Speech Synthesis Toolkit. To use it, simply add your text, or click one of the examples to load them. Read more at the links below." | |
| article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.06912' target='_blank'>fairseq S^2: A Scalable and Integrable Speech Synthesis Toolkit</a> | <a href='https://github.com/pytorch/fairseq/tree/main/examples/speech_synthesis' target='_blank'>Github Repo</a></p>" | |
| examples = [ | |
| ["Hello this is a test run","fastspeech2-en-200_speaker-cv4"] | |
| ] | |
| io1 = gr.Interface.load("huggingface/facebook/fastspeech2-en-200_speaker-cv4") | |
| io2 = gr.Interface.load("huggingface/facebook/tts_transformer-en-200_speaker-cv4") | |
| io3 = gr.Interface.load("huggingface/facebook/tts_transformer-zh-cv7_css10") | |
| io4 = gr.Interface.load("huggingface/facebook/tts_transformer-fr-cv7_css10") | |
| io5 = gr.Interface.load("huggingface/facebook/tts_transformer-ru-cv7_css10") | |
| io6 = gr.Interface.load("huggingface/facebook/tts_transformer-tr-cv7") | |
| def inference(text,model): | |
| if model == "fastspeech2-en-200_speaker-cv4": | |
| outtext = io1(text) | |
| elif model == "tts_transformer-en-200_speaker-cv4": | |
| outtext = io2(text) | |
| elif model == "tts_transformer-zh-cv7_css10": | |
| outtext = io3(text) | |
| elif model == "tts_transformer-fr-cv7_css10": | |
| outtext = io4(text) | |
| elif model == "tts_transformer-ru-cv7_css10": | |
| outtext = io5(text) | |
| else: | |
| outtext = io6(text) | |
| return outtext | |
| gr.Interface( | |
| inference, | |
| [gr.inputs.Textbox(label="Input",lines=5),gr.inputs.Dropdown(choices=["fastspeech2-en-200_speaker-cv4","tts_transformer-en-200_speaker-cv4","tts_transformer-zh-cv7_css10","tts_transformer-zh-cv7_css10","tts_transformer-fr-cv7_css10","tts_transformer-ru-cv7_css10"], type="value", default="fastspeech2-en-200_speaker-cv4", label="model") | |
| ], | |
| gr.outputs.Audio(label="Output"), | |
| examples=examples, | |
| article=article, | |
| title=title, | |
| description=description).launch(enable_queue=True, cache_examples=True) |