--- license: mit base_model: - paust/pko-t5-base pipeline_tag: text2text-generation --- paust/pko-t5-base model based Since this model is based on paust/pko-t5-base tokenizer, you need to import it. from transformers import T5TokenizerFast, T5ForConditionalGeneration tokenizer = T5TokenizerFast.from_pretrained("paust/pko-t5-base") model = T5ForConditionalGeneration.from_pretrained(emotionanalysis/diaryempathizer-t5-ko) import torch from transformers import T5TokenizerFast, T5ForConditionalGeneration model = T5ForConditionalGeneration.from_pretrained(emotionanalysis/diaryempathizer-t5-ko) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) tokenizer = T5TokenizerFast.from_pretrained("paust/pko-t5-base") input_text = """ """ inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True, padding="max_length") inputs = {key: value.to(device) for key, value in inputs.items()} outputs = model.generate(input_ids=inputs["input_ids"], max_length=128, num_beams=4, early_stopping=True) generated_comment = tokenizer.decode(outputs[0], skip_special_tokens=True) print(generated_comment)