import streamlit as st from datasets import load_dataset from transformers import AutoModelForSeq2SeqLM from transformers import AutoTokenizer from transformers import GenerationConfig huggingface_dataset_name = "dshihk/llm-generated-essay" dataset = load_dataset(huggingface_dataset_name) model_name = 'google/flan-t5-base' model = AutoModelForSeq2SeqLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) # get the topic topic = st.text_area("Enter your desired Topic of Blog") if topic: # Configurations # generation_config = GenerationConfig(max_new_tokens=1000, do_sample=True, temperature=0.7) generation_config = GenerationConfig(max_new_tokens=50) # Encode input: inputs_encoded = tokenizer(topic, return_tensors='pt') # Model Output: model_output = model.generate(inputs_encoded["input_ids"], generation_config=generation_config)[0] # Decode the output zero_output = tokenizer.decode(model_output, skip_special_tokens=True) st.write(zero_output)