Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
from transformers import BartForConditionalGeneration, BartTokenizer | |
# initialize model + tok variables | |
model = None | |
tok = None | |
# Examples for each models | |
examples = [ | |
["interview-question-remake", "I have a cat named dolche and he's not very friendly with strangers. I've had him for 9 years now and it has been a pleasure to see him grow closer to us every year."], | |
["interview-length-tagged","Today's weather was really nice."], | |
["reverse-interview-question", "There are so many incredible musicians out there and so many really compelling big hits this year that it makes for a really interesting way to recap some of those big events."] | |
] | |
# Descriptions for each models | |
# descriptions = "Interview question remake is a model that..." | |
# pass in Strings of model choice and input text for context | |
def genQuestion(model_choice, context): | |
# global descriptions | |
if model_choice=="interview-question-remake": | |
model = BartForConditionalGeneration.from_pretrained("hyechanjun/interview-question-remake") | |
tok = BartTokenizer.from_pretrained("hyechanjun/interview-question-remake") | |
# descriptions = "Interview question remake is a model that..." | |
elif model_choice=="interview-length-tagged": | |
model = BartForConditionalGeneration.from_pretrained("hyechanjun/interview-length-tagged") | |
tok = BartTokenizer.from_pretrained("hyechanjun/interview-length-tagged") | |
# descriptions = "Interview question tagged is a model that..." | |
elif model_choice=="reverse-interview-question": | |
model = BartForConditionalGeneration.from_pretrained("hyechanjun/reverse-interview-question") | |
tok = BartTokenizer.from_pretrained("hyechanjun/reverse-interview-question") | |
# descriptions = "Reverse interview question is a model that..." | |
inputs = tok(context, return_tensors="pt") | |
output = model.generate(inputs["input_ids"], num_beams=4, max_length=64, min_length=9, num_return_sequences=4, diversity_penalty =1.0, num_beam_groups=4) | |
final_output = '' | |
for i in range(4): | |
final_output += [tok.decode(beam, skip_special_tokens=True, clean_up_tokenization_spaces=False) for beam in output][i] + "\n" | |
return final_output | |
iface = gr.Interface(fn=genQuestion, inputs=[gr.inputs.Dropdown(["interview-question-remake", "interview-length-tagged", "reverse-interview-question"]), "text"], examples=examples, outputs="text") | |
iface.launch() | |