nouamanetazi HF staff commited on
Commit
849d516
·
verified ·
1 Parent(s): 2f6d870

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -16
app.py CHANGED
@@ -3,7 +3,6 @@ import torch
3
  from transformers import pipeline
4
  import os
5
  import spaces
6
- import functools
7
 
8
  #load_dotenv()
9
  key=os.environ["HF_KEY"]
@@ -25,7 +24,6 @@ print("[INFO] load model ...")
25
  pipe=load_model()
26
  print("[INFO] model loaded")
27
 
28
- # Remove the @gr.cache decorator since it's not available
29
  @spaces.GPU
30
  def predict(text):
31
  outputs = pipe(text)
@@ -49,20 +47,19 @@ with gr.Blocks() as demo:
49
  clear_btn = gr.Button("Clear")
50
  submit_btn = gr.Button("Submit", variant="primary")
51
 
52
- # Examples section with caching
53
- gr.Examples(
54
- examples=["العاصمة د <mask> هي الرباط","المغرب <mask> زوين","انا سميتي مريم، و كنسكن ف<mask> العاصمة دفلسطين"],
55
- inputs=input_text,
56
- cache_examples=True,
57
- preprocess=True # Precompute examples
58
- )
59
-
60
- with gr.Column():
61
  # Output probabilities
62
  output_labels = gr.Label(
63
  label="Prediction Results",
64
- show_label=False,
65
- num_top_classes=5 # Limit to top 5 predictions
 
 
 
 
 
 
 
 
66
  )
67
 
68
  # Button actions
@@ -70,7 +67,7 @@ with gr.Blocks() as demo:
70
  predict,
71
  inputs=input_text,
72
  outputs=output_labels,
73
- show_progress=True # Show a progress indicator
74
  )
75
 
76
  clear_btn.click(
@@ -79,5 +76,5 @@ with gr.Blocks() as demo:
79
  )
80
 
81
  # Launch the app with queue
82
- demo.queue(concurrency_count=3) # Allow 3 concurrent predictions
83
- demo.launch() # Remove show_api parameter if it causes issues
 
3
  from transformers import pipeline
4
  import os
5
  import spaces
 
6
 
7
  #load_dotenv()
8
  key=os.environ["HF_KEY"]
 
24
  pipe=load_model()
25
  print("[INFO] model loaded")
26
 
 
27
  @spaces.GPU
28
  def predict(text):
29
  outputs = pipe(text)
 
47
  clear_btn = gr.Button("Clear")
48
  submit_btn = gr.Button("Submit", variant="primary")
49
 
 
 
 
 
 
 
 
 
 
50
  # Output probabilities
51
  output_labels = gr.Label(
52
  label="Prediction Results",
53
+ show_label=False
54
+ )
55
+
56
+ # Examples section without caching for now
57
+ gr.Examples(
58
+ examples=["العاصمة د <mask> هي الرباط","المغرب <mask> زوين","انا سميتي مريم، و كنسكن ف<mask> العاصمة دفلسطين"],
59
+ inputs=input_text,
60
+ fn=predict, # Add the function to use for examples
61
+ outputs=output_labels, # Add the output component
62
+ cache_examples=True
63
  )
64
 
65
  # Button actions
 
67
  predict,
68
  inputs=input_text,
69
  outputs=output_labels,
70
+ show_progress=True
71
  )
72
 
73
  clear_btn.click(
 
76
  )
77
 
78
  # Launch the app with queue
79
+ demo.queue(concurrency_count=3)
80
+ demo.launch()