aliabd HF Staff commited on
Commit
f9fcde5
·
verified ·
1 Parent(s): 3307e95

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. run.ipynb +1 -1
  2. run.py +2 -2
run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: automatic-speech-recognition\n", "### Automatic speech recognition English. Record from your microphone and the app will transcribe the audio.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting\n", "hf_token = os.getenv(\"hf_token\")\n", "\n", "# automatically load the interface from a HF model \n", "# you can remove the hf_token parameter if you don't care about rate limiting. \n", "demo = gr.load(\n", " \"huggingface/facebook/wav2vec2-base-960h\",\n", " title=\"Speech-to-text\",\n", " inputs=\"mic\",\n", " description=\"Let me try to guess what you're saying!\",\n", " hf_token=hf_token\n", ")\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: automatic-speech-recognition\n", "### Automatic speech recognition English. Record from your microphone and the app will transcribe the audio.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting\n", "hf_token = os.getenv(\"hf_token\")\n", "\n", "# automatically load the interface from a HF model\n", "# you can remove the hf_token parameter if you don't care about rate limiting.\n", "demo = gr.load(\n", " \"huggingface/facebook/wav2vec2-base-960h\",\n", " title=\"Speech-to-text\",\n", " inputs=\"mic\",\n", " description=\"Let me try to guess what you're saying!\",\n", " hf_token=hf_token\n", ")\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
run.py CHANGED
@@ -4,8 +4,8 @@ import os
4
  # save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting
5
  hf_token = os.getenv("hf_token")
6
 
7
- # automatically load the interface from a HF model
8
- # you can remove the hf_token parameter if you don't care about rate limiting.
9
  demo = gr.load(
10
  "huggingface/facebook/wav2vec2-base-960h",
11
  title="Speech-to-text",
 
4
  # save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting
5
  hf_token = os.getenv("hf_token")
6
 
7
+ # automatically load the interface from a HF model
8
+ # you can remove the hf_token parameter if you don't care about rate limiting.
9
  demo = gr.load(
10
  "huggingface/facebook/wav2vec2-base-960h",
11
  title="Speech-to-text",