Atharwaaah commited on
Commit
96d6b6d
·
verified ·
1 Parent(s): 38fa4b8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +135 -21
app.py CHANGED
@@ -1,4 +1,3 @@
1
- # Remove the pip install line - use requirements.txt instead
2
  import torch
3
  from transformers import (
4
  AutoTokenizer,
@@ -15,6 +14,7 @@ from typing import List, Tuple
15
 
16
  # Create cache directory
17
  os.makedirs("model_cache", exist_ok=True)
 
18
 
19
  # Configuration for 4-bit quantization
20
  quant_config = BitsAndBytesConfig(
@@ -54,12 +54,6 @@ class RiverPollutionAnalyzer:
54
  except Exception as e:
55
  raise RuntimeError(f"Model loading failed: {str(e)}")
56
 
57
- # [Rest of your class implementation remains unchanged]
58
- # ... (keep all your existing methods) ...
59
-
60
- # [Rest of your Gradio implementation remains unchanged]
61
- # ... (keep all your existing UI code) ...
62
-
63
  self.pollutants = [
64
  "plastic waste", "chemical foam", "industrial discharge",
65
  "sewage water", "oil spill", "organic debris",
@@ -110,22 +104,97 @@ Severity: [number]"""
110
  except Exception as e:
111
  return f"⚠️ Analysis failed: {str(e)}"
112
 
113
- # [Keep all your existing parsing/formatting methods unchanged]
114
  def _parse_response(self, analysis: str) -> Tuple[List[str], int]:
115
- """Same parsing logic as before"""
116
- # ... (unchanged from your original code) ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
 
118
  def _calculate_severity(self, pollutants: List[str]) -> int:
119
- """Same severity calculation"""
120
- # ... (unchanged from your original code) ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
122
  def _format_analysis(self, pollutants: List[str], severity: int) -> str:
123
- """Same formatting"""
124
- # ... (unchanged from your original code) ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
  def analyze_chat(self, message: str) -> str:
127
- """Same chat handler"""
128
- # ... (unchanged from your original code) ...
 
 
 
 
 
 
 
 
129
 
130
  # Initialize with error handling
131
  try:
@@ -135,9 +204,38 @@ except Exception as e:
135
  analyzer = None
136
  model_status = f"❌ Model loading failed: {str(e)}"
137
 
138
- # Gradio Interface (unchanged layout from your original)
139
  css = """
140
- /* [Keep your existing CSS] */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
  """
142
 
143
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
@@ -177,7 +275,23 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
177
  outputs=analysis_output
178
  )
179
 
180
- # [Keep all other UI event handlers unchanged]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
 
182
  # Update examples to use local files
183
  gr.Examples(
@@ -192,5 +306,5 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
192
  label="Try example images:"
193
  )
194
 
195
- # Launch with queue for stability
196
- demo.queue(max_size=3).launch()
 
 
1
  import torch
2
  from transformers import (
3
  AutoTokenizer,
 
14
 
15
  # Create cache directory
16
  os.makedirs("model_cache", exist_ok=True)
17
+ os.makedirs("examples", exist_ok=True) # Create examples directory
18
 
19
  # Configuration for 4-bit quantization
20
  quant_config = BitsAndBytesConfig(
 
54
  except Exception as e:
55
  raise RuntimeError(f"Model loading failed: {str(e)}")
56
 
 
 
 
 
 
 
57
  self.pollutants = [
58
  "plastic waste", "chemical foam", "industrial discharge",
59
  "sewage water", "oil spill", "organic debris",
 
104
  except Exception as e:
105
  return f"⚠️ Analysis failed: {str(e)}"
106
 
 
107
  def _parse_response(self, analysis: str) -> Tuple[List[str], int]:
108
+ """Parse the model response into pollutants list and severity score"""
109
+ pollutants = []
110
+ severity = 0
111
+
112
+ # Extract pollutants
113
+ pollutants_match = re.search(r"Pollutants:\s*\[(.*?)\]", analysis)
114
+ if pollutants_match:
115
+ pollutants_str = pollutants_match.group(1)
116
+ pollutants = [p.strip() for p in pollutants_str.split(",") if p.strip()]
117
+
118
+ # Extract severity
119
+ severity_match = re.search(r"Severity:\s*(\d+)", analysis)
120
+ if severity_match:
121
+ severity = int(severity_match.group(1))
122
+
123
+ # If parsing failed, fallback to calculating severity
124
+ if not severity or severity < 1 or severity > 10:
125
+ severity = self._calculate_severity(pollutants)
126
+
127
+ return pollutants, severity
128
 
129
  def _calculate_severity(self, pollutants: List[str]) -> int:
130
+ """Calculate severity based on pollutants"""
131
+ if not pollutants:
132
+ return 1
133
+
134
+ severity_map = {
135
+ "plastic waste": 4,
136
+ "chemical foam": 7,
137
+ "industrial discharge": 8,
138
+ "sewage water": 6,
139
+ "oil spill": 9,
140
+ "organic debris": 3,
141
+ "construction waste": 5,
142
+ "medical waste": 8,
143
+ "floating trash": 4,
144
+ "algal bloom": 6,
145
+ "toxic sludge": 9,
146
+ "agricultural runoff": 5
147
+ }
148
+
149
+ base_score = sum(severity_map.get(p, 3) for p in pollutants)
150
+ avg_score = base_score / len(pollutants)
151
+ return min(10, max(1, round(avg_score)))
152
 
153
  def _format_analysis(self, pollutants: List[str], severity: int) -> str:
154
+ """Format the analysis results into a markdown report"""
155
+ if not pollutants:
156
+ pollutants = ["No visible pollution detected"]
157
+
158
+ pollutants_list = "\n".join(f"- {p}" for p in pollutants)
159
+ severity_desc = self.severity_descriptions.get(severity, "Unknown severity level")
160
+
161
+ return f"""
162
+ ## Pollution Analysis Report
163
+
164
+ ### Identified Pollutants:
165
+ {pollutants_list}
166
+
167
+ ### Severity Assessment:
168
+ **Level {severity}/10** - {severity_desc}
169
+
170
+ ### Recommended Actions:
171
+ {self._get_recommendations(severity)}
172
+ """
173
+
174
+ def _get_recommendations(self, severity: int) -> str:
175
+ """Get recommendations based on severity level"""
176
+ if severity <= 3:
177
+ return "Monitor the situation. Consider community clean-up efforts."
178
+ elif severity <= 5:
179
+ return "Local authorities should investigate. Basic remediation needed."
180
+ elif severity <= 7:
181
+ return "Immediate containment required. Environmental assessment needed."
182
+ elif severity <= 9:
183
+ return "Emergency response required. Notify environmental agencies."
184
+ else:
185
+ return "Disaster response needed. Evacuation may be necessary."
186
 
187
  def analyze_chat(self, message: str) -> str:
188
+ """Handle chat questions about pollution"""
189
+ prompt = f"""You are an environmental expert. Answer this question about river pollution: {message}
190
+
191
+ Provide a concise, factual response in under 100 words."""
192
+
193
+ inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
194
+ outputs = self.model.generate(**inputs, max_new_tokens=150)
195
+ response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
196
+
197
+ return response
198
 
199
  # Initialize with error handling
200
  try:
 
204
  analyzer = None
205
  model_status = f"❌ Model loading failed: {str(e)}"
206
 
207
+ # Gradio Interface
208
  css = """
209
+ .header {
210
+ text-align: center;
211
+ max-width: 800px;
212
+ margin: auto;
213
+ }
214
+ .header img {
215
+ max-width: 100%;
216
+ }
217
+ .side-by-side {
218
+ display: flex;
219
+ flex-wrap: wrap;
220
+ gap: 20px;
221
+ }
222
+ .left-panel, .right-panel {
223
+ flex: 1;
224
+ min-width: 300px;
225
+ }
226
+ .analysis-box {
227
+ border: 1px solid #e0e0e0;
228
+ border-radius: 8px;
229
+ padding: 15px;
230
+ margin-top: 15px;
231
+ background: #f9f9f9;
232
+ }
233
+ .chat-container {
234
+ border: 1px solid #e0e0e0;
235
+ border-radius: 8px;
236
+ padding: 15px;
237
+ background: #f9f9f9;
238
+ }
239
  """
240
 
241
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
 
275
  outputs=analysis_output
276
  )
277
 
278
+ def respond(message, chat_history):
279
+ if not analyzer:
280
+ return chat_history + [(message, "Models not loaded. Please try again later.")]
281
+ response = analyzer.analyze_chat(message)
282
+ return chat_history + [(message, response)]
283
+
284
+ chat_btn.click(
285
+ respond,
286
+ [chat_input, chatbot],
287
+ [chatbot],
288
+ )
289
+ chat_input.submit(
290
+ respond,
291
+ [chat_input, chatbot],
292
+ [chatbot],
293
+ )
294
+ clear_btn.click(lambda: None, None, chatbot, queue=False)
295
 
296
  # Update examples to use local files
297
  gr.Examples(
 
306
  label="Try example images:"
307
  )
308
 
309
+ # Launch with queue for stability and allowed paths
310
+ demo.queue(max_size=3).launch(allowed_paths=["examples"])