Spaces:
Runtime error
Runtime error
Commit
·
f7a0c83
1
Parent(s):
13aff77
Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,6 @@ from huggingface_hub import InferenceClient
|
|
2 |
import gradio as gr
|
3 |
from deep_translator import GoogleTranslator
|
4 |
|
5 |
-
# Initialize the Hugging Face Inference Client with the specific model
|
6 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
7 |
|
8 |
# Function to translate Arabic text to English
|
@@ -13,7 +12,6 @@ def translate_to_english(text):
|
|
13 |
def translate_to_arabic(text):
|
14 |
return GoogleTranslator(source='english', target='arabic').translate(text)
|
15 |
|
16 |
-
# Function to format the prompt with conversation history
|
17 |
def format_prompt(message, history):
|
18 |
prompt = "<s>"
|
19 |
for user_prompt, bot_response in history:
|
@@ -22,8 +20,9 @@ def format_prompt(message, history):
|
|
22 |
prompt += f"[INST] {message} [/INST]"
|
23 |
return prompt
|
24 |
|
25 |
-
|
26 |
-
|
|
|
27 |
# Initialize history as an empty list if it's None
|
28 |
if history is None:
|
29 |
history = []
|
@@ -45,26 +44,23 @@ def generate(prompt, history=None, temperature=0.1, max_new_tokens=256, top_p=0.
|
|
45 |
# Translate the Arabic prompt to English
|
46 |
english_prompt = translate_to_english(prompt)
|
47 |
|
48 |
-
# Format the prompt with the conversation history
|
49 |
formatted_prompt = format_prompt(english_prompt, history)
|
50 |
-
|
51 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
52 |
output = ""
|
53 |
-
|
54 |
for response in stream:
|
55 |
output += response.token.text
|
56 |
-
|
57 |
# Translate the English response back to Arabic
|
58 |
arabic_output = translate_to_arabic(output)
|
59 |
|
60 |
# Update the history state with the latest exchange
|
61 |
history.append((prompt, arabic_output))
|
62 |
|
63 |
-
# Return the response and the updated state
|
64 |
return arabic_output, history
|
65 |
|
66 |
-
|
67 |
-
additional_inputs = [
|
68 |
gr.Slider(
|
69 |
label="Temperature",
|
70 |
value=0.9,
|
@@ -87,7 +83,7 @@ additional_inputs = [
|
|
87 |
label="Top-p (nucleus sampling)",
|
88 |
value=0.90,
|
89 |
minimum=0.0,
|
90 |
-
maximum=1
|
91 |
step=0.05,
|
92 |
interactive=True,
|
93 |
info="Higher values sample more low-probability tokens",
|
@@ -103,20 +99,9 @@ additional_inputs = [
|
|
103 |
)
|
104 |
]
|
105 |
|
106 |
-
|
107 |
-
iface = gr.Interface(
|
108 |
fn=generate,
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
outputs=[
|
114 |
-
gr.Textbox(placeholder="Generated response in Arabic"),
|
115 |
-
gr.State() # State output to maintain the conversation history
|
116 |
-
],
|
117 |
-
title="Try Arabic Misteral",
|
118 |
-
description="Interact with an advanced AI model in Arabic. Adjust the settings below to tailor the responses. Your prompts will be translated to English, processed by the AI, and the response will be translated back to Arabic."
|
119 |
-
)
|
120 |
-
|
121 |
-
# Launch the interface
|
122 |
-
iface.launch()
|
|
|
2 |
import gradio as gr
|
3 |
from deep_translator import GoogleTranslator
|
4 |
|
|
|
5 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
6 |
|
7 |
# Function to translate Arabic text to English
|
|
|
12 |
def translate_to_arabic(text):
|
13 |
return GoogleTranslator(source='english', target='arabic').translate(text)
|
14 |
|
|
|
15 |
def format_prompt(message, history):
|
16 |
prompt = "<s>"
|
17 |
for user_prompt, bot_response in history:
|
|
|
20 |
prompt += f"[INST] {message} [/INST]"
|
21 |
return prompt
|
22 |
|
23 |
+
def generate(
|
24 |
+
prompt, history, temperature=0.1, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
|
25 |
+
):
|
26 |
# Initialize history as an empty list if it's None
|
27 |
if history is None:
|
28 |
history = []
|
|
|
44 |
# Translate the Arabic prompt to English
|
45 |
english_prompt = translate_to_english(prompt)
|
46 |
|
|
|
47 |
formatted_prompt = format_prompt(english_prompt, history)
|
48 |
+
|
49 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
50 |
output = ""
|
51 |
+
|
52 |
for response in stream:
|
53 |
output += response.token.text
|
54 |
+
|
55 |
# Translate the English response back to Arabic
|
56 |
arabic_output = translate_to_arabic(output)
|
57 |
|
58 |
# Update the history state with the latest exchange
|
59 |
history.append((prompt, arabic_output))
|
60 |
|
|
|
61 |
return arabic_output, history
|
62 |
|
63 |
+
additional_inputs=[
|
|
|
64 |
gr.Slider(
|
65 |
label="Temperature",
|
66 |
value=0.9,
|
|
|
83 |
label="Top-p (nucleus sampling)",
|
84 |
value=0.90,
|
85 |
minimum=0.0,
|
86 |
+
maximum=1,
|
87 |
step=0.05,
|
88 |
interactive=True,
|
89 |
info="Higher values sample more low-probability tokens",
|
|
|
99 |
)
|
100 |
]
|
101 |
|
102 |
+
gr.ChatInterface(
|
|
|
103 |
fn=generate,
|
104 |
+
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
105 |
+
additional_inputs=additional_inputs,
|
106 |
+
title="DorjGPT interface"
|
107 |
+
).launch(show_api=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|