sagar004's picture
Update app.py
32746e6 verified
raw
history blame
3.38 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from io import StringIO
# Load the model and tokenizer once
model_name = "ibm-granite/granite-3.3-2b-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto")
llm = pipeline("text-generation", model=model, tokenizer=tokenizer)
# Module 1: Policy Summarization
def policy_summarizer(text):
prompt = f"Summarize the following city policy in simple terms:\n{text}\nSummary:"
result = llm(prompt, max_new_tokens=200)[0]["generated_text"]
return result.replace(prompt, "").strip()
# Module 2: Citizen Feedback
def citizen_feedback(issue):
return f"Thank you! Your issue '{issue}' has been logged and categorized appropriately."
# Module 3: KPI Forecasting
def kpi_forecasting(csv_file):
df = pd.read_csv(csv_file.name)
X = df.iloc[:, 0].values.reshape(-1, 1)
y = df.iloc[:, 1].values
model = LinearRegression().fit(X, y)
next_year = [[X[-1][0] + 1]]
prediction = model.predict(next_year)[0]
return f"πŸ“ˆ Predicted KPI for {next_year[0][0]}: {round(prediction, 2)}"
# Module 4: Eco Tips Generator
def eco_tips(keyword):
prompt = f"Give 3 actionable eco-friendly tips related to: {keyword}"
result = llm(prompt, max_new_tokens=100)[0]["generated_text"]
return result.replace(prompt, "").strip()
# Module 5: Anomaly Detection
def detect_anomaly(csv_file):
df = pd.read_csv(csv_file.name)
if 'value' not in df.columns:
return "⚠️ CSV must contain a 'value' column."
mean = df["value"].mean()
std = df["value"].std()
anomalies = df[np.abs(df["value"] - mean) > 2 * std]
if anomalies.empty:
return "βœ… No significant anomalies detected."
return "⚠️ Anomalies found:\\n" + anomalies.to_string(index=False)
# module 6: chat assistant
def chat_assistant(question):
prompt = f"Answer this smart city sustainability question:\n\nQ: {question}\nA:"
result = llm(prompt, max_new_tokens=200, temperature=0.7)[0]["generated_text"]
return result.replace(prompt, "").strip()
# Gradio Interface
app = gr.TabbedInterface(
interface_list=[
gr.Interface(fn=policy_summarizer, inputs=gr.Textbox(lines=7, label="Paste Policy Text"), outputs="textbox", title="πŸ“ Policy Summarization"),
gr.Interface(fn=citizen_feedback, inputs=gr.Textbox(lines=2, label="Describe the Issue"), outputs="text", title="πŸ“£ Citizen Feedback"),
gr.Interface(fn=kpi_forecasting, inputs=gr.File(label="Upload CSV with Year and Value columns"), outputs="text", title="πŸ“Š KPI Forecasting"),
gr.Interface(fn=eco_tips, inputs=gr.Textbox(lines=2, label="Keyword (e.g. Plastic, Solar)"), outputs="text", title="🌱 Eco Tips Generator"),
gr.Interface(fn=detect_anomaly, inputs=gr.File(label="Upload CSV with 'value' column"), outputs="text", title="🚨 Anomaly Detection"),
gr.Interface(fn=chat_assistant, inputs=gr.Textbox(lines=2, label="Ask your question"), outputs="text", title="πŸ’¬ Smart City Chat")
],
tab_names=["Summarize", "Feedback", "Forecast", "Eco Tips", "Anomalies", "Chat"],
title="πŸŒ† Sustainable Smart City Assistant"
)
app.launch()