Dorjzodovsuren commited on
Commit
3ae66c6
·
verified ·
1 Parent(s): 4639d8f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -0
app.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import edge_tts
3
+ import tempfile
4
+ import gradio as gr
5
+ from huggingface_hub import InferenceClient
6
+
7
+ """
8
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
9
+ """
10
+ client = InferenceClient("google/gemma-3-27b-it", token=os.getenv("TOKEN"))
11
+
12
+ # client = InferenceClient(
13
+ # provider="fireworks-ai",
14
+ # api_key=os.getenv("TOKEN"),
15
+ # )
16
+
17
+ global history
18
+ history = []
19
+ async def respond(
20
+ message,
21
+ history=[],
22
+ system_message="You are a DorjGPT, created by Dorjzodovsuren. You is a helpful assistant and always reply back in Mongolian, and only return Mongolian text within 50 words.",
23
+ max_tokens=512,
24
+ temperature=0.001,
25
+ top_p=0.95,
26
+ ):
27
+ messages = [{"role": "system", "content": system_message}]
28
+
29
+ for val in history:
30
+ if val[0]:
31
+ messages.append({"role": "user", "content": val[0]})
32
+ if val[1]:
33
+ messages.append({"role": "assistant", "content": val[1]})
34
+
35
+ messages.append({"role": "user", "content": message})
36
+
37
+ response = ""
38
+
39
+ for message in client.chat_completion(
40
+ model="google/gemma-3-27b-it",
41
+ messages=messages,
42
+ max_tokens=max_tokens,
43
+ stream=True,
44
+ temperature=temperature,
45
+ top_p=top_p,
46
+ ):
47
+ token = message.choices[0].delta.content
48
+ response += token
49
+
50
+ # completion = client.chat.completions.create(
51
+ # model="deepseek-ai/DeepSeek-R1",
52
+ # messages=messages,
53
+ # max_tokens=500,
54
+ # )
55
+ # response = completion.choices[0].message.content
56
+ # print(response)
57
+
58
+ communicate = edge_tts.Communicate(response, voice="mn-MN-YesuiNeural")
59
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
60
+ tmp_path = tmp_file.name
61
+ await communicate.save(tmp_path)
62
+ yield tmp_path
63
+
64
+
65
+ with gr.Blocks(theme="gradio/monochrome", title="Dorj Assistant") as demo:
66
+ gr.HTML("""
67
+ <h1 style="text-align: center; style="font-size: 3m;">
68
+ DorjGPT
69
+ </h1>
70
+ """)
71
+ with gr.Column():
72
+ output_audio = gr.Audio(label="DorjGPT", type="filepath",
73
+ interactive=True,
74
+ visible=True,
75
+ autoplay=True,
76
+ elem_classes="audio")
77
+
78
+ user_input = gr.Textbox(label="Question", value="What is this application?")
79
+
80
+ with gr.Tab():
81
+ with gr.Row():
82
+ translate_btn = gr.Button("Submit")
83
+ translate_btn.click(fn=respond, inputs=user_input,
84
+ outputs=output_audio, api_name="translate")
85
+
86
+ if __name__ == "__main__":
87
+ demo.queue(max_size=30).launch()