File size: 2,124 Bytes
3e16642
945b2ae
 
3e16642
63a7f6a
98402a5
945b2ae
 
 
3e16642
 
98402a5
3e16642
98402a5
 
 
 
3e16642
 
 
 
 
 
98402a5
 
 
 
3e16642
 
 
 
98402a5
3e16642
98402a5
 
 
 
 
 
 
 
 
3e16642
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98402a5
3e16642
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import gradio as gr
from transformers import AutoConfig, AutoModelForCausalLM
from modeling_snowflake import Snowflake4CausalLM
import torch

# --- Load Model and Tokenizer from Hugging Face Hub ---
# Register the custom model with the transformers library
AutoConfig.register("SnowflakeCore", PretrainedConfig)
AutoModelForCausalLM.register(PretrainedConfig, Snowflake4CausalLM)

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)

# Load model
model = AutoModelForCausalLM.from_pretrained(
    MODEL_NAME,
    torch_dtype=torch.float16  # Use half precision for memory efficiency
)
model.eval()
model.to("cuda" if torch.cuda.is_available() else "cpu")

# --- Inference Function ---
def generate_text(prompt, max_length=50):
    """
    Generate text based on the input prompt using the trained model.
    """
    # Tokenize the input prompt
    inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True, max_length=384)
    input_ids = inputs["input_ids"].to(model.device)
    attention_mask = inputs["attention_mask"].to(model.device)

    # Generate output tokens
    with torch.no_grad():
        outputs = model.generate(
            input_ids=input_ids,
            attention_mask=attention_mask,
            max_length=max_length,
            pad_token_id=tokenizer.eos_token_id  # Use EOS token for padding
        )

    # Decode the generated tokens
    generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return generated_text

# --- Gradio Interface ---
with gr.Blocks() as demo:
    gr.Markdown("# Snowflake-G0-stable Language Model")
    gr.Markdown("This is an enhanced transformer language model trained on the DialogMLM-50K dataset. Try it out below!")

    with gr.Row():
        input_prompt = gr.Textbox(label="Input Prompt", placeholder="Enter your text here...")
        output_text = gr.Textbox(label="Generated Text")

    submit_button = gr.Button("Generate")

    def on_submit(prompt):
        return generate_text(prompt)

    submit_button.click(on_submit, inputs=input_prompt, outputs=output_text)

# Launch the app
demo.launch()