ajayarora1235 commited on
Commit
35a24d7
·
1 Parent(s): aa9ee54

fix extra dependencies in app

Browse files
Files changed (1) hide show
  1. app.py +2 -62
app.py CHANGED
@@ -31,10 +31,6 @@ os.environ["TEMP"] = tmp
31
  warnings.filterwarnings("ignore")
32
  torch.manual_seed(114514)
33
 
34
- from ilariatts import tts_order_voice
35
- language_dict = tts_order_voice
36
- ilariavoices = language_dict.keys()
37
-
38
  import signal
39
 
40
  import math
@@ -1407,9 +1403,6 @@ def mouth(size, face, voice, faces):
1407
  process = subprocess.Popen(command, shell=True, cwd='/content/wav2lip-HD/Wav2Lip-master')
1408
  stdout, stderr = process.communicate()
1409
  return '/content/wav2lip-HD/outputs/result.mp4', 'Animation completed.'
1410
- eleven_voices = ['Adam','Antoni','Josh','Arnold','Sam','Bella','Rachel','Domi','Elli']
1411
- eleven_voices_ids=['pNInz6obpgDQGcFmaJgB','ErXwobaYiN019PkySvjV','TxGEqnHWrfWFTfGW9XjX','VR6AewLTigWG4xSOukaG','yoZ06aMxZJJ28mfd3POQ','EXAVITQu4vr4xnSDxMaL','21m00Tcm4TlvDq8ikWAM','AZnzlk1XvdvUeBnXmlld','MF3mGyEYCl7XYWbV9V6O']
1412
- chosen_voice = dict(zip(eleven_voices, eleven_voices_ids))
1413
 
1414
  def stoptraining(mim):
1415
  if int(mim) == 1:
@@ -1424,53 +1417,7 @@ def stoptraining(mim):
1424
  )
1425
 
1426
 
1427
- # def elevenTTS(xiapi, text, id, lang):
1428
- # if xiapi!= '' and id !='':
1429
- # choice = chosen_voice[id]
1430
- # CHUNK_SIZE = 1024
1431
- # url = f"https://api.elevenlabs.io/v1/text-to-speech/{choice}"
1432
- # headers = {
1433
- # "Accept": "audio/mpeg",
1434
- # "Content-Type": "application/json",
1435
- # "xi-api-key": xiapi
1436
- # }
1437
- # if lang == 'en':
1438
- # data = {
1439
- # "text": text,
1440
- # "model_id": "eleven_monolingual_v1",
1441
- # "voice_settings": {
1442
- # "stability": 0.5,
1443
- # "similarity_boost": 0.5
1444
- # }
1445
- # }
1446
- # else:
1447
- # data = {
1448
- # "text": text,
1449
- # "model_id": "eleven_multilingual_v1",
1450
- # "voice_settings": {
1451
- # "stability": 0.5,
1452
- # "similarity_boost": 0.5
1453
- # }
1454
- # }
1455
-
1456
- # response = requests.post(url, json=data, headers=headers)
1457
- # with open('./temp_eleven.mp3', 'wb') as f:
1458
- # for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
1459
- # if chunk:
1460
- # f.write(chunk)
1461
- # aud_path = save_to_wav('./temp_eleven.mp3')
1462
- # return aud_path, aud_path
1463
- # else:
1464
- # tts = gTTS(text, lang=lang)
1465
- # tts.save('./temp_gTTS.mp3')
1466
- # aud_path = save_to_wav('./temp_gTTS.mp3')
1467
- # return aud_path, aud_path
1468
-
1469
- # def ilariaTTS(text, ttsvoice):
1470
- # vo=language_dict[ttsvoice]
1471
- # asyncio.run(edge_tts.Communicate(text, vo).save("./temp_ilaria.mp3"))
1472
- # aud_path = save_to_wav('./temp_ilaria.mp3')
1473
- # return aud_path, aud_path
1474
 
1475
  def transcribe_btn_click(audio_choice):
1476
  batch_size = 1 # Adjust based on your GPU memory availability
@@ -1619,7 +1566,7 @@ for root, dirs, files in os.walk(index_root, topdown=False):
1619
  if name.endswith(".index") and "trained" not in name:
1620
  index_paths.append("%s/%s" % (root, name))
1621
 
1622
- with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose"), title="Ilaria RVC 💖") as app:
1623
  with gr.Tabs():
1624
  with gr.TabItem("Inference"):
1625
  # gr.HTML("<h1> Ilaria RVC 💖 </h1>")
@@ -1744,13 +1691,6 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose")
1744
  interactive=False,
1745
  )
1746
 
1747
- # with gr.Accordion('IlariaTTS', open=True):
1748
- # with gr.Column():
1749
- # ilariaid=gr.Dropdown(label="Voice:", choices=ilariavoices, value="English-Jenny (Female)")
1750
- # ilariatext = gr.Textbox(label="Input your Text", interactive=True, value="This is a test.")
1751
- # ilariatts_button = gr.Button(value="Speak")
1752
- # ilariatts_button.click(fn=ilariaTTS, inputs=[ilariatext, ilariaid], outputs=[record_button, input_audio0])
1753
-
1754
  #with gr.Column():
1755
  with gr.Accordion("Index Settings", open=False):
1756
  #with gr.Row():
 
31
  warnings.filterwarnings("ignore")
32
  torch.manual_seed(114514)
33
 
 
 
 
 
34
  import signal
35
 
36
  import math
 
1403
  process = subprocess.Popen(command, shell=True, cwd='/content/wav2lip-HD/Wav2Lip-master')
1404
  stdout, stderr = process.communicate()
1405
  return '/content/wav2lip-HD/outputs/result.mp4', 'Animation completed.'
 
 
 
1406
 
1407
  def stoptraining(mim):
1408
  if int(mim) == 1:
 
1417
  )
1418
 
1419
 
1420
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1421
 
1422
  def transcribe_btn_click(audio_choice):
1423
  batch_size = 1 # Adjust based on your GPU memory availability
 
1566
  if name.endswith(".index") and "trained" not in name:
1567
  index_paths.append("%s/%s" % (root, name))
1568
 
1569
+ with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose"), title="HITGEN AI") as app:
1570
  with gr.Tabs():
1571
  with gr.TabItem("Inference"):
1572
  # gr.HTML("<h1> Ilaria RVC 💖 </h1>")
 
1691
  interactive=False,
1692
  )
1693
 
 
 
 
 
 
 
 
1694
  #with gr.Column():
1695
  with gr.Accordion("Index Settings", open=False):
1696
  #with gr.Row():