Shilpaj commited on
Commit
5177d9a
·
1 Parent(s): 35e684f

Fix: Gradcam issue

Browse files
Files changed (1) hide show
  1. app.py +13 -6
app.py CHANGED
@@ -53,7 +53,7 @@ def inference_wrapper(image, alpha, top_k, target_layer):
53
  """
54
  try:
55
  if image is None:
56
- return {"error": "No image provided"}, None
57
 
58
  results = inference(
59
  image,
@@ -65,13 +65,20 @@ def inference_wrapper(image, alpha, top_k, target_layer):
65
  )
66
 
67
  if results is None:
68
- return {"error": "Processing failed"}, None
69
 
70
  return results
71
 
72
  except Exception as e:
73
- print(f"Error in inference: {str(e)}")
74
- return {"error": str(e)}, None
 
 
 
 
 
 
 
75
 
76
 
77
  def main():
@@ -189,9 +196,9 @@ def main():
189
  )
190
 
191
  # Configure queue
192
- demo.queue()
193
 
194
- # Launch with minimal parameters
195
  demo.launch(
196
  server_name="0.0.0.0",
197
  server_port=7860,
 
53
  """
54
  try:
55
  if image is None:
56
+ return {"No image provided": 1.0}, None
57
 
58
  results = inference(
59
  image,
 
65
  )
66
 
67
  if results is None:
68
+ return {"Processing failed": 1.0}, None
69
 
70
  return results
71
 
72
  except Exception as e:
73
+ error_msg = str(e)
74
+ print(f"Error in inference: {error_msg}")
75
+
76
+ # Handle GPU quota error specifically
77
+ if "GPU quota" in error_msg:
78
+ return {"GPU quota exceeded - Please try again later": 1.0}, None
79
+
80
+ # Handle other errors
81
+ return {"Error: " + error_msg: 1.0}, None
82
 
83
 
84
  def main():
 
196
  )
197
 
198
  # Configure queue
199
+ demo.queue(concurrency_count=1) # Limit concurrent processing
200
 
201
+ # Launch with compatible parameters
202
  demo.launch(
203
  server_name="0.0.0.0",
204
  server_port=7860,