Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,10 +12,7 @@ MODEL = os.environ.get("MODEL_ID")
|
|
| 12 |
|
| 13 |
PLACEHOLDER = """
|
| 14 |
<center>
|
| 15 |
-
<
|
| 16 |
-
<p>Hi! I'm MisMath. A Math advisor. My model is based on mathstral-7B-v0.1. Feel free to ask your questions</p>
|
| 17 |
-
<p>Mathstral 7B is a model specializing in mathematical and scientific tasks, based on Mistral 7B.</p>
|
| 18 |
-
<p>mathstral-7B-v0.1 is first Mathstral model</p>
|
| 19 |
</center>
|
| 20 |
"""
|
| 21 |
|
|
@@ -64,19 +61,23 @@ def stream_chat(
|
|
| 64 |
print(f'message: {message}')
|
| 65 |
print(f'history: {history}')
|
| 66 |
|
| 67 |
-
conversation
|
|
|
|
|
|
|
|
|
|
| 68 |
for prompt, answer in history:
|
| 69 |
-
conversation.append(prompt)
|
| 70 |
-
conversation.append(answer)
|
| 71 |
|
| 72 |
-
conversation.append(message)
|
|
|
|
|
|
|
|
|
|
| 73 |
|
| 74 |
-
input_ids = tokenizer(conversation, return_tensors="pt").input_ids.to(model.device)
|
| 75 |
-
|
| 76 |
streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
|
| 77 |
-
|
| 78 |
generate_kwargs = dict(
|
| 79 |
-
input_ids=input_ids,
|
| 80 |
max_new_tokens=max_new_tokens,
|
| 81 |
do_sample=False if temperature == 0 else True,
|
| 82 |
top_p=top_p,
|
|
@@ -95,6 +96,7 @@ def stream_chat(
|
|
| 95 |
buffer += new_text
|
| 96 |
yield buffer
|
| 97 |
|
|
|
|
| 98 |
|
| 99 |
chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER)
|
| 100 |
|
|
|
|
| 12 |
|
| 13 |
PLACEHOLDER = """
|
| 14 |
<center>
|
| 15 |
+
<p>MathΣtral - I'm MisMath,Your Math advisor</p>
|
|
|
|
|
|
|
|
|
|
| 16 |
</center>
|
| 17 |
"""
|
| 18 |
|
|
|
|
| 61 |
print(f'message: {message}')
|
| 62 |
print(f'history: {history}')
|
| 63 |
|
| 64 |
+
# Prepare the conversation list
|
| 65 |
+
conversation = [
|
| 66 |
+
{"role": "system", "content": system_prompt}
|
| 67 |
+
]
|
| 68 |
for prompt, answer in history:
|
| 69 |
+
conversation.append({"role": "user", "content": prompt})
|
| 70 |
+
conversation.append({"role": "assistant", "content": answer})
|
| 71 |
|
| 72 |
+
conversation.append({"role": "user", "content": message})
|
| 73 |
+
|
| 74 |
+
# Tokenize the conversation
|
| 75 |
+
input_ids = tokenizer(conversation, padding=True, truncation=True, return_tensors="pt").input_ids.to(model.device)
|
| 76 |
|
|
|
|
|
|
|
| 77 |
streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
|
| 78 |
+
|
| 79 |
generate_kwargs = dict(
|
| 80 |
+
input_ids=input_ids,
|
| 81 |
max_new_tokens=max_new_tokens,
|
| 82 |
do_sample=False if temperature == 0 else True,
|
| 83 |
top_p=top_p,
|
|
|
|
| 96 |
buffer += new_text
|
| 97 |
yield buffer
|
| 98 |
|
| 99 |
+
|
| 100 |
|
| 101 |
chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER)
|
| 102 |
|