Spaces:
Runtime error
Runtime error
Commit
·
324a736
1
Parent(s):
e57684f
Update app.py
Browse filesAdded a theme and temperature
app.py
CHANGED
|
@@ -33,10 +33,10 @@ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
|
| 33 |
# Definimos el eos token para el modelo
|
| 34 |
eos_token = tokenizer("<|im_end|>",add_special_tokens=False)["input_ids"][0]
|
| 35 |
|
| 36 |
-
def generate_inference(instruction, input):
|
| 37 |
prompt = pipe.tokenizer.apply_chat_template([{"role": "user",
|
| 38 |
"content": f"{instruction}/n{input}"}], tokenize=False, add_generation_prompt=True)
|
| 39 |
-
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, num_beams=1, temperature=
|
| 40 |
max_time= 300, eos_token_id=eos_token)
|
| 41 |
return outputs[0]['generated_text'][len(prompt):].strip()
|
| 42 |
|
|
@@ -75,12 +75,15 @@ def clean_output():
|
|
| 75 |
def clean_all():
|
| 76 |
return "¿Quién es un solicitante de asilo/ protección internacional?","", "" #inp, cont, out
|
| 77 |
|
| 78 |
-
with gr.Blocks(theme="
|
| 79 |
title="Question Answering - Legal Refugiados v1.0"
|
| 80 |
|
| 81 |
gr.HTML("""
|
|
|
|
|
|
|
|
|
|
| 82 |
<h1 align="center">Question Answering - Legal Refugiados v1.0</h1>
|
| 83 |
-
<h2 align="center">Apoyo para
|
| 84 |
""")
|
| 85 |
|
| 86 |
inp = gr.Textbox(label="🌐 Pregunta a resolver",
|
|
@@ -88,31 +91,37 @@ with gr.Blocks(theme="ParityError/LimeFace", title="QA Legal Refugiados") as dem
|
|
| 88 |
value="¿Quién es un solicitante de asilo/ protección internacional?",
|
| 89 |
interactive=True,
|
| 90 |
)
|
| 91 |
-
context=gr.Textbox(label="Contexto",
|
| 92 |
info="Introduce el contexto de la pregunta",
|
| 93 |
placeholder="Contexto de la pregunta",
|
| 94 |
-
interactive=
|
| 95 |
)
|
| 96 |
|
| 97 |
-
out = gr.Textbox(label="
|
| 98 |
interactive=False,
|
| 99 |
placeholder="Aquí aparecerá la respuesta o información solicitada",
|
| 100 |
)
|
| 101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
inp,
|
| 103 |
context,
|
| 104 |
fn=get_context,
|
| 105 |
run_on_click= True,
|
| 106 |
-
label= "Ejemplos de cuestiones. Se les asignara el contexto automáticamente")
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
|
|
|
|
|
|
| 115 |
|
| 116 |
#demo.queue(max_size=None)
|
| 117 |
-
demo.launch(debug=True, share=False)
|
| 118 |
|
|
|
|
| 33 |
# Definimos el eos token para el modelo
|
| 34 |
eos_token = tokenizer("<|im_end|>",add_special_tokens=False)["input_ids"][0]
|
| 35 |
|
| 36 |
+
def generate_inference(instruction, input, temperature):
|
| 37 |
prompt = pipe.tokenizer.apply_chat_template([{"role": "user",
|
| 38 |
"content": f"{instruction}/n{input}"}], tokenize=False, add_generation_prompt=True)
|
| 39 |
+
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, num_beams=1, temperature=float(temperature), top_k=50, top_p=0.95,
|
| 40 |
max_time= 300, eos_token_id=eos_token)
|
| 41 |
return outputs[0]['generated_text'][len(prompt):].strip()
|
| 42 |
|
|
|
|
| 75 |
def clean_all():
|
| 76 |
return "¿Quién es un solicitante de asilo/ protección internacional?","", "" #inp, cont, out
|
| 77 |
|
| 78 |
+
with gr.Blocks(theme="snehilsanyal/scikit-learn", title="QA Legal Refugiados") as demo:
|
| 79 |
title="Question Answering - Legal Refugiados v1.0"
|
| 80 |
|
| 81 |
gr.HTML("""
|
| 82 |
+
<p align="center">
|
| 83 |
+
<img src="/file=images/markus-winkler-Je1MDuITTF4-unsplash.jpg" width="300" height="150">
|
| 84 |
+
</p>
|
| 85 |
<h1 align="center">Question Answering - Legal Refugiados v1.0</h1>
|
| 86 |
+
<h2 align="center">Apoyo para informar y responder a consultas acerca de procedimientos y leyes de actuación y protección a asiliados y refugiados.</h2>
|
| 87 |
""")
|
| 88 |
|
| 89 |
inp = gr.Textbox(label="🌐 Pregunta a resolver",
|
|
|
|
| 91 |
value="¿Quién es un solicitante de asilo/ protección internacional?",
|
| 92 |
interactive=True,
|
| 93 |
)
|
| 94 |
+
context=gr.Textbox(label="📰 Contexto",
|
| 95 |
info="Introduce el contexto de la pregunta",
|
| 96 |
placeholder="Contexto de la pregunta",
|
| 97 |
+
interactive=True,
|
| 98 |
)
|
| 99 |
|
| 100 |
+
out = gr.Textbox(label="🚀 Respuesta e información",
|
| 101 |
interactive=False,
|
| 102 |
placeholder="Aquí aparecerá la respuesta o información solicitada",
|
| 103 |
)
|
| 104 |
+
inp.change(clean_output, inputs=[], outputs=out)
|
| 105 |
+
|
| 106 |
+
with gr.Row():
|
| 107 |
+
with gr.Column(scale=3, min_width=600):
|
| 108 |
+
exam_box = gr.Examples(examples,
|
| 109 |
inp,
|
| 110 |
context,
|
| 111 |
fn=get_context,
|
| 112 |
run_on_click= True,
|
| 113 |
+
label= "📌 Ejemplos de cuestiones. Se les asignara el contexto automáticamente")
|
| 114 |
+
|
| 115 |
+
with gr.Column(scale=1, min_width=200):
|
| 116 |
+
temperature=gr.Slider(minimum=0.2, maximum=1, step=0.1, value=0.3, label="🌡 Temperature", visible=True)
|
| 117 |
+
inference_btn = gr.Button("Responder")
|
| 118 |
+
inference_btn.click(fn=generate_inference, inputs=[inp, context, temperature], outputs=out)
|
| 119 |
+
clean_context_btn = gr.Button("Limpiar contexto")
|
| 120 |
+
clean_context_btn.click(fn=clean_output, inputs=[], outputs=context)
|
| 121 |
+
clean_btn = gr.Button("Limpiar todo")
|
| 122 |
+
clean_btn.click(fn=clean_all, inputs=[], outputs=[inp, context, out])
|
| 123 |
+
|
| 124 |
|
| 125 |
#demo.queue(max_size=None)
|
| 126 |
+
demo.launch(debug=True, share=False, allowed_paths=["images/markus-winkler-Je1MDuITTF4-unsplash.jpg"])
|
| 127 |
|