import gradio as gr
from transformers import pipeline
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

tokenizer = AutoTokenizer.from_pretrained("FuriouslyAsleep/unhappyZebra100")

model = AutoModelForSequenceClassification.from_pretrained("FuriouslyAsleep/unhappyZebra100")

def greet(Doc_Passage_To_Test):

    inputs = tokenizer(Doc_Passage_To_Test, return_tensors="pt")
    outputs = model(**inputs)
    
    predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
    #print(predictions)    
#    return "Hello " + name + "!!"
    return "Probabilities are listed here (False prob, then True prob): " + str(predictions)

iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch()