abdeljalilELmajjodi's picture
add gpu spaces
fd96f46 verified
raw
history blame
2 kB
import gradio as gr
import torch
from transformers import pipeline
import os
import spaces
#load_dotenv()
key=os.environ["HF_KEY"]
def load_model():
pipe=pipeline(task="fill-mask",model="atlasia/xlm-roberta-large-ft-alatlas",token=key,device=0)
return pipe
print("[INFO] load model ...")
pipe=load_model()
print("[INFO] model loaded")
# def predict(text):
# predictions=pipe(text)
# return predictions[0]["sequence"],predictions
@spaces.GPU
def predict(text):
outputs = pipe(text)
scores= [x["score"] for x in outputs]
tokens= [x["token_str"] for x in outputs]
# scores= [x["score"] for x in outputs]
# Convert to percentages and create label-probability pairs
#probs = probabilities[0].tolist()
return {label: float(prob) * 100 for label, prob in zip(tokens, scores)}
# Create Gradio interface
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
# Input text box
input_text = gr.Textbox(
label="Input",
placeholder="Enter text here..."
)
# Button row
with gr.Row():
clear_btn = gr.Button("Clear")
submit_btn = gr.Button("Submit", variant="primary")
# Examples section
gr.Examples(
examples=["Hugging Face is the AI community, working together, to [MASK] the future."],
inputs=input_text
)
with gr.Column():
# Output label
gr.Label("Classification")
# Output probabilities
output_labels = gr.Label(
label="Classification Results",
show_label=False
)
# Button actions
submit_btn.click(
predict,
inputs=input_text,
outputs=output_labels
)
clear_btn.click(
lambda: "",
outputs=input_text
)
# Launch the app
demo.launch()