KaiChen1998 commited on
Commit
0c388a7
·
1 Parent(s): 7218693

update html

Browse files
Files changed (2) hide show
  1. app.py +3 -4
  2. requirements.txt +1 -1
app.py CHANGED
@@ -358,9 +358,8 @@ title_markdown = ("""
358
  <div>
359
  <h1 style="margin: 0;">EMOVA: Empowering Language Models to See, Hear and Speak with Vivid Emotion</h2>
360
  <p style="margin: 10px 0;">
361
- 1. Note that to use the Webcam and Microphone, open <a href="chrome://flags/#unsafely-treat-insecure-origin-as-secure">chrome://flags/#unsafely-treat-insecure-origin-as-secure</a> and put this link into the box.<br/>
362
- 2. To chat with EMOVA, upload images, enter texts or record audios and then do not forget to <mark>Click 💬 Chat Button</mark> ^v^!<br/>
363
- 3. Heighten the <code>Max output tokens</code> if necessary to talk longer with EMOVA.
364
  </p>
365
  </div>
366
  </div>
@@ -404,7 +403,7 @@ def build_demo(embed_mode, cur_dir=None):
404
  with gr.Blocks(title="EMOVA", theme=gr.themes.Default(), css=block_css) as demo:
405
  state = gr.State()
406
  if not embed_mode:
407
- gr.Markdown(title_markdown)
408
 
409
  ##############
410
  # Chatbot
 
358
  <div>
359
  <h1 style="margin: 0;">EMOVA: Empowering Language Models to See, Hear and Speak with Vivid Emotion</h2>
360
  <p style="margin: 10px 0;">
361
+ 1. To chat with EMOVA, upload images, enter texts or record audios and then do not forget to <mark>Click 💬 Chat Button</mark> ^v^!<br/>
362
+ 2. Heighten the <code>Max output tokens</code> if necessary to talk longer with EMOVA.
 
363
  </p>
364
  </div>
365
  </div>
 
403
  with gr.Blocks(title="EMOVA", theme=gr.themes.Default(), css=block_css) as demo:
404
  state = gr.State()
405
  if not embed_mode:
406
+ gr.HTML(title_markdown)
407
 
408
  ##############
409
  # Chatbot
requirements.txt CHANGED
@@ -10,7 +10,7 @@ timm==0.6.13
10
  # flash_attn
11
  https://github.com/Dao-AILab/flash-attention/releases/download/v2.6.3/flash_attn-2.6.3+cu123torch2.4cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
12
  scipy
13
- gradio==4.44.0
14
 
15
  monotonic_align
16
  librosa==0.8.0
 
10
  # flash_attn
11
  https://github.com/Dao-AILab/flash-attention/releases/download/v2.6.3/flash_attn-2.6.3+cu123torch2.4cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
12
  scipy
13
+ gradio
14
 
15
  monotonic_align
16
  librosa==0.8.0