File size: 2,911 Bytes
799a9c9
 
 
2ad5729
 
799a9c9
 
 
 
 
65c2655
 
799a9c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ad5729
799a9c9
 
 
 
 
2ad5729
799a9c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ad5729
799a9c9
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import streamlit as st
import yaml
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

class ModelManager:
    def __init__(self, model_name="microsoft/Phi-4-mini-instruct"):
        # Diccionario de modelos: usa los identificadores de Hugging Face
        self.models = {
            "microsoft/Phi-4-mini-instruct": "microsoft/Phi-4-mini-instruct",
            "microsoft/Phi-4-multimodal": "microsoft/Phi-4-multimodal",
            "meta-llama/Llama-3.3-70B-Instruct": "meta-llama/Llama-3.3-70B-Instruct"
        }
        self.current_model_name = model_name
        self.tokenizer = None
        self.model = None
        self.load_model(model_name)
    
    def load_model(self, model_name):
        self.current_model_name = model_name
        model_path = self.models[model_name]
        st.info(f"Cargando modelo: {model_name} ...")
        self.tokenizer = AutoTokenizer.from_pretrained(model_path)
        self.model = AutoModelForCausalLM.from_pretrained(model_path)
    
    def generate(self, prompt, max_length=50, temperature=0.7):
        inputs = self.tokenizer(prompt, return_tensors="pt")
        outputs = self.model.generate(inputs["input_ids"], max_length=max_length, temperature=temperature)
        return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
    
    def switch_model(self, model_name):
        if model_name in self.models:
            self.load_model(model_name)
        else:
            raise ValueError(f"El modelo {model_name} no est谩 disponible.")

@st.cache_data
def load_prompts():
    with open("prompt.yml", "r", encoding="utf-8") as f:
        prompts = yaml.safe_load(f)
    return prompts

def main():
    st.title("Switcher de Modelos de Transformers")
    
    # Cargar configuraci贸n de prompts
    prompts_config = load_prompts()
    
    # Selecci贸n de modelo desde la barra lateral
    st.sidebar.title("Selecci贸n de Modelo")
    model_choice = st.sidebar.selectbox("Selecciona un modelo", list(prompts_config.keys()))
    
    # Instanciar el manejador de modelos
    model_manager = ModelManager(model_name=model_choice)
    
    # Obtener el prompt de estilo para el modelo seleccionado
    style_prompt = prompts_config.get(model_choice, prompts_config.get("default_prompt", ""))
    
    st.write(f"**Modelo en uso:** {model_choice}")
    
    # 脕rea de texto para ingresar el prompt, iniciando con el estilo predefinido
    user_prompt = st.text_area("Ingresa tu prompt:", value=style_prompt)
    
    max_length = st.slider("Longitud m谩xima", min_value=10, max_value=200, value=50)
    temperature = st.slider("Temperatura", min_value=0.1, max_value=1.0, value=0.7)
    
    if st.button("Generar respuesta"):
        result = model_manager.generate(user_prompt, max_length=max_length, temperature=temperature)
        st.text_area("Salida", value=result, height=200)

if __name__ == "__main__":
    main()