M4sterStudy commited on
Commit
4bad5f1
·
verified ·
1 Parent(s): 1a5e155

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -13
app.py CHANGED
@@ -1,32 +1,30 @@
1
  import os
2
  from huggingface_hub import login
3
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
  import gradio as gr
5
- import torch
6
 
7
  # Autenticar usando el token almacenado como secreto
8
  hf_token = os.getenv("HF_API_TOKEN")
9
  login(hf_token)
10
 
11
  # Cargar el modelo y el tokenizador
12
- model_name = "mrm8488/t5-base-finetuned-spanish"
13
  tokenizer = AutoTokenizer.from_pretrained(model_name)
14
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
15
 
16
- def generate_text(input_text):
17
  inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=512)
18
- with torch.no_grad():
19
- outputs = model.generate(**inputs, max_length=200, num_beams=4, early_stopping=True)
20
- generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
21
- return generated_text
22
 
23
  # Crear la interfaz con Gradio
24
  iface = gr.Interface(
25
- fn=generate_text,
26
  inputs="text",
27
  outputs="text",
28
- title="Generador de Texto en Español",
29
- description="Genera texto en español utilizando un modelo de lenguaje preentrenado."
30
  )
31
 
32
- iface.launch()
 
1
  import os
2
  from huggingface_hub import login
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import gradio as gr
 
5
 
6
  # Autenticar usando el token almacenado como secreto
7
  hf_token = os.getenv("HF_API_TOKEN")
8
  login(hf_token)
9
 
10
  # Cargar el modelo y el tokenizador
11
+ model_name = "DeepESP/gpt2-spanish"
12
  tokenizer = AutoTokenizer.from_pretrained(model_name)
13
+ model = AutoModelForCausalLM.from_pretrained(model_name)
14
 
15
+ def chat_with_gpt2_spanish(input_text):
16
  inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=512)
17
+ outputs = model.generate(**inputs, max_length=200, num_beams=4, early_stopping=True)
18
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
+ return response
 
20
 
21
  # Crear la interfaz con Gradio
22
  iface = gr.Interface(
23
+ fn=chat_with_gpt2_spanish,
24
  inputs="text",
25
  outputs="text",
26
+ title="Chat con GPT-2 en Español",
27
+ description="Interfaz simple para comunicarte con el modelo GPT-2 en español."
28
  )
29
 
30
+ iface.launch()