Spaces:
Running
on
Zero
Running
on
Zero
File size: 2,411 Bytes
a0188fc fd96f46 6ce55e7 a0188fc a81b12c a0188fc 8113c0b 2d8b692 8113c0b a0188fc 6ce55e7 fd96f46 a0188fc fd96f46 20dc541 a0188fc 3ad7345 a0188fc 8113c0b a0188fc 25944f7 8113c0b 2f6d870 8113c0b a0188fc 7849f7f 2f6d870 8113c0b a0188fc 8113c0b a0188fc 8113c0b 6ce55e7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
import gradio as gr
import torch
from transformers import pipeline
import os
import spaces
import functools
#load_dotenv()
key=os.environ["HF_KEY"]
def load_model():
print("[INFO] Loading model... This may take a minute on Spaces")
pipe = pipeline(
task="fill-mask",
model="BounharAbdelaziz/XLM-RoBERTa-Morocco",
token=key,
device=0,
torch_dtype=torch.float16 # Use half precision
)
print("[INFO] Model loaded successfully!")
return pipe
print("[INFO] load model ...")
pipe=load_model()
print("[INFO] model loaded")
# Remove the @gr.cache decorator since it's not available
@spaces.GPU
def predict(text):
outputs = pipe(text)
scores= [x["score"] for x in outputs]
tokens= [x["token_str"] for x in outputs]
return {label: float(prob) for label, prob in zip(tokens, scores)}
# Create Gradio interface
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
# Input text box
input_text = gr.Textbox(
label="Input",
placeholder="Enter text here...",
rtl=True
)
# Button row
with gr.Row():
clear_btn = gr.Button("Clear")
submit_btn = gr.Button("Submit", variant="primary")
# Examples section with caching
gr.Examples(
examples=["العاصمة د <mask> هي الرباط","المغرب <mask> زوين","انا سميتي مريم، و كنسكن ف<mask> العاصمة دفلسطين"],
inputs=input_text,
cache_examples=True,
preprocess=True # Precompute examples
)
with gr.Column():
# Output probabilities
output_labels = gr.Label(
label="Prediction Results",
show_label=False,
num_top_classes=5 # Limit to top 5 predictions
)
# Button actions
submit_btn.click(
predict,
inputs=input_text,
outputs=output_labels,
show_progress=True # Show a progress indicator
)
clear_btn.click(
lambda: "",
outputs=input_text
)
# Launch the app with queue
demo.queue(concurrency_count=3) # Allow 3 concurrent predictions
demo.launch() # Remove show_api parameter if it causes issues |