Spaces:
Sleeping
Sleeping
æLtorio
commited on
Show warning for 500s and show better messages
Browse files
app.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
|
| 2 |
# -*- coding: utf-8 -*-
|
| 3 |
"""
|
| 4 |
app.py
|
|
@@ -25,7 +24,7 @@ base_model_path = "meta-llama/Llama-3.2-3B-Instruct"
|
|
| 25 |
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
|
| 26 |
|
| 27 |
# Define the title, description, and device description for the Gradio interface
|
| 28 |
-
device_desc = f"Cette I.A. fonctionne sur {device} 🚀." if device == torch.device('cuda') else f"🐢 Cette I.A. est très très lente sur {device}
|
| 29 |
title = f"Une intelligence artificielle pour écrire des appréciations qui tourne sur {device}"
|
| 30 |
desc = "Ce modèle vous propose une évaluation automatique."
|
| 31 |
|
|
@@ -72,8 +71,11 @@ def get_conversation(trimestre: str, moyenne_1: float,moyenne_2: float,moyenne_3
|
|
| 72 |
def infere(trimestre: str, moyenne_1: float,moyenne_2: float,moyenne_3: float, comportement: float, participation: float, travail: float) -> str:
|
| 73 |
if not torch.cuda.is_available():
|
| 74 |
gr.Warning("""No GPU available <br>
|
|
|
|
|
|
|
| 75 |
Open a message in the <a href='https://huggingface.co/spaces/eltorio/Llama-3.2-3B-appreciation/discussions'>Community Discussion</a>
|
| 76 |
-
"""
|
|
|
|
| 77 |
messages = get_conversation(trimestre, moyenne_1, moyenne_2, moyenne_3, comportement, participation, travail)
|
| 78 |
# Tokenize the input
|
| 79 |
inputs = tokenizer.apply_chat_template(
|
|
@@ -97,12 +99,12 @@ autoeval = gr.Interface(fn=infere, inputs=[
|
|
| 97 |
gr.Radio(
|
| 98 |
["1", "2", "3"], value="1", label="trimestre", info="Trimestre"
|
| 99 |
),
|
| 100 |
-
gr.Slider(0, 20,label="
|
| 101 |
-
gr.Slider(0, 20,label="
|
| 102 |
-
gr.Slider(0, 20,label="
|
| 103 |
-
gr.Slider(0, 10, value=5, label="
|
| 104 |
-
gr.Slider(0, 10, value=5, label="
|
| 105 |
-
gr.Slider(0, 10, value=5, label="
|
| 106 |
|
| 107 |
], outputs="text", title=title,
|
| 108 |
description=desc, article=long_desc)
|
|
|
|
|
|
|
| 1 |
# -*- coding: utf-8 -*-
|
| 2 |
"""
|
| 3 |
app.py
|
|
|
|
| 24 |
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
|
| 25 |
|
| 26 |
# Define the title, description, and device description for the Gradio interface
|
| 27 |
+
device_desc = f"Cette I.A. fonctionne sur {device} 🚀." if device == torch.device('cuda') else f"🐢 Cette I.A. est très très lente sur {device} 🐢."
|
| 28 |
title = f"Une intelligence artificielle pour écrire des appréciations qui tourne sur {device}"
|
| 29 |
desc = "Ce modèle vous propose une évaluation automatique."
|
| 30 |
|
|
|
|
| 71 |
def infere(trimestre: str, moyenne_1: float,moyenne_2: float,moyenne_3: float, comportement: float, participation: float, travail: float) -> str:
|
| 72 |
if not torch.cuda.is_available():
|
| 73 |
gr.Warning("""No GPU available <br>
|
| 74 |
+
The answer will appear in around 10 minutes !<br>
|
| 75 |
+
But it takes only a few seconds on a decent GPU<br>
|
| 76 |
Open a message in the <a href='https://huggingface.co/spaces/eltorio/Llama-3.2-3B-appreciation/discussions'>Community Discussion</a>
|
| 77 |
+
""",
|
| 78 |
+
duration=500)
|
| 79 |
messages = get_conversation(trimestre, moyenne_1, moyenne_2, moyenne_3, comportement, participation, travail)
|
| 80 |
# Tokenize the input
|
| 81 |
inputs = tokenizer.apply_chat_template(
|
|
|
|
| 99 |
gr.Radio(
|
| 100 |
["1", "2", "3"], value="1", label="trimestre", info="Trimestre"
|
| 101 |
),
|
| 102 |
+
gr.Slider(0, 20,label="Moyenne au premier trimestre", value=10, info="Moyenne trimestre 1"),
|
| 103 |
+
gr.Slider(0, 20,label="Moyenne au second trimestre", value=10, info="Moyenne trimestre 2"),
|
| 104 |
+
gr.Slider(0, 20,label="Moyenne au troisième trimestre", value=10, info="Moyenne trimestre 3"),
|
| 105 |
+
gr.Slider(0, 10, value=5, label="Comportement", info="Comportement (1 à 10)"),
|
| 106 |
+
gr.Slider(0, 10, value=5, label="Participation", info="Participation (1 à 10)"),
|
| 107 |
+
gr.Slider(0, 10, value=5, label="Travail", info="Travail (1 à 10)"),
|
| 108 |
|
| 109 |
], outputs="text", title=title,
|
| 110 |
description=desc, article=long_desc)
|