Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
# Load the model and tokenizer | |
def load_model(): | |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B") | |
model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B") | |
return pipeline("text-generation", model=model, tokenizer=tokenizer) | |
# Load the text generation pipeline | |
text_gen_pipeline = load_model() | |
# Streamlit app interface | |
st.title("Text Generation with Meta-Llama-3-8B") | |
st.write("Enter some text and click the button to generate a continuation.") | |
# User input | |
user_input = st.text_area("Input Text", "Once upon a time") | |
# Generate text on button click | |
if st.button("Generate Text"): | |
with st.spinner("Generating..."): | |
generated_text = text_gen_pipeline(user_input, max_length=100, num_return_sequences=1) | |
st.write(generated_text[0]['generated_text']) | |