nouamanetazi HF staff commited on
Commit
8113c0b
·
verified ·
1 Parent(s): 2f1fb99

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -11
app.py CHANGED
@@ -9,18 +9,23 @@ key=os.environ["HF_KEY"]
9
 
10
 
11
  def load_model():
12
- pipe=pipeline(task="fill-mask",model="BounharAbdelaziz/XLM-RoBERTa-Morocco",token=key,device=0)
 
 
 
 
 
 
 
 
13
  return pipe
14
 
15
  print("[INFO] load model ...")
16
  pipe=load_model()
17
  print("[INFO] model loaded")
18
 
19
- # def predict(text):
20
- # predictions=pipe(text)
21
- # return predictions[0]["sequence"],predictions
22
-
23
  @spaces.GPU
 
24
  def predict(text):
25
  outputs = pipe(text)
26
  scores= [x["score"] for x in outputs]
@@ -43,24 +48,28 @@ with gr.Blocks() as demo:
43
  clear_btn = gr.Button("Clear")
44
  submit_btn = gr.Button("Submit", variant="primary")
45
 
46
- # Examples section
47
  gr.Examples(
48
  examples=["العاصمة د <mask> هي الرباط","المغرب <mask> زوين","انا سميتي مريم، و كنسكن ف<mask> العاصمة دفلسطين"],
49
- inputs=input_text
 
 
50
  )
51
 
52
  with gr.Column():
53
  # Output probabilities
54
  output_labels = gr.Label(
55
  label="Prediction Results",
56
- show_label=False
 
57
  )
58
 
59
  # Button actions
60
  submit_btn.click(
61
  predict,
62
  inputs=input_text,
63
- outputs=output_labels
 
64
  )
65
 
66
  clear_btn.click(
@@ -68,5 +77,6 @@ with gr.Blocks() as demo:
68
  outputs=input_text
69
  )
70
 
71
- # Launch the app
72
- demo.launch()
 
 
9
 
10
 
11
  def load_model():
12
+ print("[INFO] Loading model... This may take a minute on Spaces")
13
+ pipe = pipeline(
14
+ task="fill-mask",
15
+ model="atlasia/XLM-RoBERTa-Morocco",
16
+ token=key,
17
+ device=0,
18
+ torch_dtype=torch.float16 # Use half precision
19
+ )
20
+ print("[INFO] Model loaded successfully!")
21
  return pipe
22
 
23
  print("[INFO] load model ...")
24
  pipe=load_model()
25
  print("[INFO] model loaded")
26
 
 
 
 
 
27
  @spaces.GPU
28
+ @gr.cache(persist=True) # Add persistent caching
29
  def predict(text):
30
  outputs = pipe(text)
31
  scores= [x["score"] for x in outputs]
 
48
  clear_btn = gr.Button("Clear")
49
  submit_btn = gr.Button("Submit", variant="primary")
50
 
51
+ # Examples section with caching
52
  gr.Examples(
53
  examples=["العاصمة د <mask> هي الرباط","المغرب <mask> زوين","انا سميتي مريم، و كنسكن ف<mask> العاصمة دفلسطين"],
54
+ inputs=input_text,
55
+ cache_examples=True,
56
+ preprocess=True # Precompute examples
57
  )
58
 
59
  with gr.Column():
60
  # Output probabilities
61
  output_labels = gr.Label(
62
  label="Prediction Results",
63
+ show_label=False,
64
+ num_top_classes=5 # Limit to top 5 predictions
65
  )
66
 
67
  # Button actions
68
  submit_btn.click(
69
  predict,
70
  inputs=input_text,
71
+ outputs=output_labels,
72
+ show_progress=True # Show a progress indicator
73
  )
74
 
75
  clear_btn.click(
 
77
  outputs=input_text
78
  )
79
 
80
+ # Launch the app with queue
81
+ demo.queue(concurrency_count=3) # Allow 3 concurrent predictions
82
+ demo.launch(show_api=False) # Disable API tab if not needed