Spaces:
Sleeping
Sleeping
File size: 940 Bytes
7df1f75 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
import streamlit as st
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
# Load the model and tokenizer
@st.cache_resource
def load_model():
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B")
model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B")
return pipeline("text-generation", model=model, tokenizer=tokenizer)
# Load the text generation pipeline
text_gen_pipeline = load_model()
# Streamlit app interface
st.title("Text Generation with Meta-Llama-3-8B")
st.write("Enter some text and click the button to generate a continuation.")
# User input
user_input = st.text_area("Input Text", "Once upon a time")
# Generate text on button click
if st.button("Generate Text"):
with st.spinner("Generating..."):
generated_text = text_gen_pipeline(user_input, max_length=100, num_return_sequences=1)
st.write(generated_text[0]['generated_text'])
|