import gradio as gr import torch from transformers import pipeline import os import spaces #load_dotenv() key=os.environ["HF_KEY"] def load_model(): pipe=pipeline(task="fill-mask",model="BounharAbdelaziz/XLM-RoBERTa-Morocco",token=key,device=0) return pipe print("[INFO] load model ...") pipe=load_model() print("[INFO] model loaded") # def predict(text): # predictions=pipe(text) # return predictions[0]["sequence"],predictions @spaces.GPU def predict(text): outputs = pipe(text) scores= [x["score"] for x in outputs] tokens= [x["token_str"] for x in outputs] return {label: float(prob) for label, prob in zip(tokens, scores)} # Create Gradio interface with gr.Blocks() as demo: with gr.Row(): with gr.Column(): # Input text box input_text = gr.Textbox( label="Input", placeholder="Enter text here...", rtl=True ) # Button row with gr.Row(): clear_btn = gr.Button("Clear") submit_btn = gr.Button("Submit", variant="primary") # Examples section gr.Examples( examples=["العاصمة د هي الرباط","المغرب زوين","انا سميتي مريم، و كنسكن ف العاصمة دفلسطين"], inputs=input_text ) with gr.Column(): # Output probabilities output_labels = gr.Label( label="Prediction Results", show_label=False ) # Button actions submit_btn.click( predict, inputs=input_text, outputs=output_labels ) clear_btn.click( lambda: "", outputs=input_text ) # Launch the app demo.launch()