Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -17,17 +17,26 @@ Assistant:
|
|
| 17 |
|
| 18 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
| 19 |
formatted_prompt = format_alpaca_prompt(history, message, system_message)
|
|
|
|
| 20 |
|
| 21 |
-
response
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
-
# Extract only the response
|
| 29 |
-
cleaned_response = response.split("Assistant:")[-1].strip()
|
| 30 |
-
|
| 31 |
yield cleaned_response # Output only the answer
|
| 32 |
|
| 33 |
demo = gr.ChatInterface(
|
|
|
|
| 17 |
|
| 18 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
| 19 |
formatted_prompt = format_alpaca_prompt(history, message, system_message)
|
| 20 |
+
cleaned_response = ""
|
| 21 |
|
| 22 |
+
# Retry mechanism to regenerate response if it is empty
|
| 23 |
+
for _ in range(3): # Retry up to 3 times
|
| 24 |
+
response = client.text_generation(
|
| 25 |
+
formatted_prompt,
|
| 26 |
+
max_new_tokens=max_tokens,
|
| 27 |
+
temperature=temperature,
|
| 28 |
+
top_p=top_p,
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
# Extract only the response
|
| 32 |
+
cleaned_response = response.split("Assistant:")[-1].strip()
|
| 33 |
+
|
| 34 |
+
if cleaned_response:
|
| 35 |
+
break # Exit loop if a valid response is generated
|
| 36 |
+
|
| 37 |
+
if not cleaned_response:
|
| 38 |
+
cleaned_response = "I'm sorry, I couldn't generate a response. Please try again."
|
| 39 |
|
|
|
|
|
|
|
|
|
|
| 40 |
yield cleaned_response # Output only the answer
|
| 41 |
|
| 42 |
demo = gr.ChatInterface(
|