import streamlit as st | |
import firebase_admin | |
from firebase_admin import credentials, db | |
from PIL import Image | |
import numpy as np | |
from geopy.geocoders import Nominatim | |
from tensorflow.keras.applications import MobileNetV2 | |
from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input | |
import base64 | |
from io import BytesIO | |
# Initialize Firebase | |
if not firebase_admin._apps: | |
cred = credentials.Certificate("firebase_credentials.json") | |
firebase_admin.initialize_app(cred, { | |
'databaseURL': 'https://binsight-beda0-default-rtdb.asia-southeast1.firebasedatabase.app/' | |
}) | |
# Load MobileNetV2 pre-trained model | |
mobilenet_model = MobileNetV2(weights="imagenet") | |
# Function to classify the uploaded image using MobileNetV2 | |
def classify_image_with_mobilenet(image): | |
try: | |
img = image.resize((224, 224)) | |
img_array = np.array(img) | |
img_array = np.expand_dims(img_array, axis=0) | |
img_array = preprocess_input(img_array) | |
predictions = mobilenet_model.predict(img_array) | |
labels = decode_predictions(predictions, top=5)[0] | |
return {label[1]: float(label[2]) for label in labels} | |
except Exception as e: | |
st.error(f"Error during image classification: {e}") | |
return {} | |
# Function to convert image to Base64 | |
def convert_image_to_base64(image): | |
buffered = BytesIO() | |
image.save(buffered, format="PNG") | |
img_str = base64.b64encode(buffered.getvalue()).decode() | |
return img_str | |
# Function to get detailed location info from latitude & longitude | |
def get_location_details(lat, lon): | |
try: | |
geolocator = Nominatim(user_agent="binsight") | |
location = geolocator.reverse((lat, lon), exactly_one=True) | |
if location: | |
address = location.raw.get('address', {}) | |
return { | |
"road": address.get("road", "Unknown"), | |
"city": address.get("city", address.get("town", "Unknown")), | |
"state": address.get("state", "Unknown"), | |
"country": address.get("country", "Unknown"), | |
"full_address": location.address | |
} | |
except Exception as e: | |
st.error(f"Error retrieving location details: {e}") | |
return {} | |
# JavaScript to get live location and send it to Streamlit | |
get_location_js = """ | |
<script> | |
function sendLocation() { | |
navigator.geolocation.getCurrentPosition( | |
(position) => { | |
const latitude = position.coords.latitude; | |
const longitude = position.coords.longitude; | |
const locationData = latitude + "," + longitude; | |
fetch('/_stcore/', { | |
method: 'POST', | |
headers: {'Content-Type': 'application/json'}, | |
body: JSON.stringify({latlon: locationData}) | |
}).then(response => response.json()) | |
.then(data => console.log("Location sent:", data)); | |
}, | |
(error) => { | |
console.log("Error getting location:", error); | |
} | |
); | |
} | |
sendLocation(); | |
</script> | |
""" | |
# Run JavaScript in Streamlit | |
st.components.v1.html(get_location_js, height=0) | |
# Capture location from Streamlit session state | |
if "latlon" not in st.session_state: | |
st.session_state.latlon = None | |
# Streamlit App | |
st.title("BinSight: Upload Dustbin Image") | |
# Fetch live location | |
if st.session_state.latlon: | |
try: | |
latitude, longitude = map(float, st.session_state.latlon.split(",")) | |
location_details = get_location_details(latitude, longitude) | |
st.success(f"Detected Location: {location_details['full_address']}") | |
st.write(f"**State:** {location_details['state']}") | |
st.write(f"**City:** {location_details['city']}") | |
st.write(f"**Road:** {location_details['road']}") | |
st.write(f"**Country:** {location_details['country']}") | |
except Exception as e: | |
st.error(f"Error processing location: {e}") | |
else: | |
st.warning("Fetching live location... Please allow location access.") | |
# File uploader for dustbin images | |
uploaded_file = st.file_uploader("Upload an image of the dustbin", type=["jpg", "jpeg", "png"]) | |
submit_button = st.button("Analyze and Upload") | |
if submit_button and uploaded_file and st.session_state.latlon: | |
image = Image.open(uploaded_file) | |
st.image(image, caption="Uploaded Image", use_container_width=True) | |
# Convert image to Base64 | |
image_base64 = convert_image_to_base64(image) | |
# Classify Image | |
classification_results = classify_image_with_mobilenet(image) | |
if classification_results: | |
db_ref = db.reference("dustbins") | |
dustbin_data = { | |
"latitude": latitude, | |
"longitude": longitude, | |
"location_details": location_details, | |
"classification": classification_results, | |
"status": "Pending", | |
"image": image_base64 # Store image as Base64 string | |
} | |
db_ref.push(dustbin_data) | |
st.success("Dustbin data uploaded successfully!") | |
else: | |
st.error("Missing classification details. Cannot upload.") | |
# best without image | |
# import streamlit as st | |
# import requests | |
# import firebase_admin | |
# from firebase_admin import credentials, db, auth | |
# from PIL import Image | |
# import numpy as np | |
# from geopy.geocoders import Nominatim | |
# from tensorflow.keras.applications import MobileNetV2 | |
# from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input | |
# import json | |
# # Initialize Firebase | |
# if not firebase_admin._apps: | |
# cred = credentials.Certificate("firebase_credentials.json") | |
# firebase_admin.initialize_app(cred, { | |
# 'databaseURL': 'https://binsight-beda0-default-rtdb.asia-southeast1.firebasedatabase.app/' | |
# }) | |
# # Load MobileNetV2 pre-trained model | |
# mobilenet_model = MobileNetV2(weights="imagenet") | |
# # Function to classify the uploaded image using MobileNetV2 | |
# def classify_image_with_mobilenet(image): | |
# try: | |
# img = image.resize((224, 224)) | |
# img_array = np.array(img) | |
# img_array = np.expand_dims(img_array, axis=0) | |
# img_array = preprocess_input(img_array) | |
# predictions = mobilenet_model.predict(img_array) | |
# labels = decode_predictions(predictions, top=5)[0] | |
# return {label[1]: float(label[2]) for label in labels} | |
# except Exception as e: | |
# st.error(f"Error during image classification: {e}") | |
# return {} | |
# # Function to get user's location using geolocation API | |
# def get_user_location(): | |
# st.write("Fetching location, please allow location access in your browser.") | |
# geolocator = Nominatim(user_agent="binsight") | |
# try: | |
# ip_info = requests.get("https://ipinfo.io/json").json() | |
# loc = ip_info.get("loc", "").split(",") | |
# latitude, longitude = loc[0], loc[1] if len(loc) == 2 else (None, None) | |
# if latitude and longitude: | |
# address = geolocator.reverse(f"{latitude}, {longitude}").address | |
# return latitude, longitude, address | |
# except Exception as e: | |
# st.error(f"Error retrieving location: {e}") | |
# return None, None, None | |
# # User Login | |
# st.sidebar.header("User Login") | |
# user_email = st.sidebar.text_input("Enter your email") | |
# login_button = st.sidebar.button("Login") | |
# if login_button: | |
# if user_email: | |
# st.session_state["user_email"] = user_email | |
# st.sidebar.success(f"Logged in as {user_email}") | |
# if "user_email" not in st.session_state: | |
# st.warning("Please log in first.") | |
# st.stop() | |
# # Get user location and display details | |
# latitude, longitude, address = get_user_location() | |
# if latitude and longitude: | |
# st.success(f"Location detected: {address}") | |
# else: | |
# st.warning("Unable to fetch location, please ensure location access is enabled.") | |
# st.stop() | |
# # Streamlit App | |
# st.title("BinSight: Upload Dustbin Image") | |
# uploaded_file = st.file_uploader("Upload an image of the dustbin", type=["jpg", "jpeg", "png"]) | |
# submit_button = st.button("Analyze and Upload") | |
# if submit_button and uploaded_file: | |
# image = Image.open(uploaded_file) | |
# st.image(image, caption="Uploaded Image", use_container_width=True) | |
# classification_results = classify_image_with_mobilenet(image) | |
# if classification_results: | |
# db_ref = db.reference("dustbins") | |
# dustbin_data = { | |
# "user_email": st.session_state["user_email"], | |
# "latitude": latitude, | |
# "longitude": longitude, | |
# "address": address, | |
# "classification": classification_results, | |
# "allocated_truck": None, | |
# "status": "Pending" | |
# } | |
# db_ref.push(dustbin_data) | |
# st.success("Dustbin data uploaded successfully!") | |
# st.write(f"**Location:** {address}") | |
# st.write(f"**Latitude:** {latitude}, **Longitude:** {longitude}") | |
# else: | |
# st.error("Missing classification details. Cannot upload.") | |
# best with firebase but below code is not giving correct location of user. | |
# import streamlit as st | |
# import requests | |
# import firebase_admin | |
# from firebase_admin import credentials, db, auth | |
# from PIL import Image | |
# import numpy as np | |
# from geopy.geocoders import Nominatim | |
# from tensorflow.keras.applications import MobileNetV2 | |
# from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input | |
# # Initialize Firebase | |
# if not firebase_admin._apps: | |
# cred = credentials.Certificate("firebase_credentials.json") | |
# firebase_admin.initialize_app(cred, { | |
# 'databaseURL': 'https://binsight-beda0-default-rtdb.asia-southeast1.firebasedatabase.app/' | |
# }) | |
# # Load MobileNetV2 pre-trained model | |
# mobilenet_model = MobileNetV2(weights="imagenet") | |
# # Function to classify the uploaded image using MobileNetV2 | |
# def classify_image_with_mobilenet(image): | |
# try: | |
# img = image.resize((224, 224)) | |
# img_array = np.array(img) | |
# img_array = np.expand_dims(img_array, axis=0) | |
# img_array = preprocess_input(img_array) | |
# predictions = mobilenet_model.predict(img_array) | |
# labels = decode_predictions(predictions, top=5)[0] | |
# return {label[1]: float(label[2]) for label in labels} | |
# except Exception as e: | |
# st.error(f"Error during image classification: {e}") | |
# return {} | |
# # Function to get user's location | |
# def get_user_location(): | |
# try: | |
# ip_info = requests.get("https://ipinfo.io/json").json() | |
# location = ip_info.get("loc", "").split(",") | |
# latitude = location[0] if len(location) > 0 else None | |
# longitude = location[1] if len(location) > 1 else None | |
# if latitude and longitude: | |
# geolocator = Nominatim(user_agent="binsight") | |
# address = geolocator.reverse(f"{latitude}, {longitude}").address | |
# return latitude, longitude, address | |
# return None, None, None | |
# except Exception as e: | |
# st.error(f"Unable to get location: {e}") | |
# return None, None, None | |
# # User Login | |
# st.sidebar.header("User Login") | |
# user_email = st.sidebar.text_input("Enter your email") | |
# login_button = st.sidebar.button("Login") | |
# if login_button: | |
# if user_email: | |
# st.session_state["user_email"] = user_email | |
# st.sidebar.success(f"Logged in as {user_email}") | |
# if "user_email" not in st.session_state: | |
# st.warning("Please log in first.") | |
# st.stop() | |
# # Streamlit App | |
# st.title("BinSight: Upload Dustbin Image") | |
# uploaded_file = st.file_uploader("Upload an image of the dustbin", type=["jpg", "jpeg", "png"]) | |
# submit_button = st.button("Analyze and Upload") | |
# if submit_button and uploaded_file: | |
# image = Image.open(uploaded_file) | |
# st.image(image, caption="Uploaded Image", use_container_width=True) | |
# classification_results = classify_image_with_mobilenet(image) | |
# latitude, longitude, address = get_user_location() | |
# if latitude and longitude and classification_results: | |
# db_ref = db.reference("dustbins") | |
# dustbin_data = { | |
# "user_email": st.session_state["user_email"], | |
# "latitude": latitude, | |
# "longitude": longitude, | |
# "address": address, | |
# "classification": classification_results, | |
# "allocated_truck": None, | |
# "status": "Pending" | |
# } | |
# db_ref.push(dustbin_data) | |
# st.success("Dustbin data uploaded successfully!") | |
# else: | |
# st.error("Missing classification or location details. Cannot upload.") | |
# Below is the old version but it is without of firebase and here is the addition of gemini. | |
# import streamlit as st | |
# import os | |
# from PIL import Image | |
# import numpy as np | |
# from io import BytesIO | |
# from dotenv import load_dotenv | |
# from geopy.geocoders import Nominatim | |
# from tensorflow.keras.applications import MobileNetV2 | |
# from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input | |
# import requests | |
# import google.generativeai as genai | |
# # Load environment variables | |
# load_dotenv() | |
# # Configure Generative AI | |
# genai.configure(api_key='AIzaSyBREh8Uei7uDCbzPaYW2WdalOdjVWcQLAM') | |
# # Load MobileNetV2 pre-trained model | |
# mobilenet_model = MobileNetV2(weights="imagenet") | |
# # Function to classify the uploaded image using MobileNetV2 | |
# def classify_image_with_mobilenet(image): | |
# try: | |
# img = image.resize((224, 224)) | |
# img_array = np.array(img) | |
# img_array = np.expand_dims(img_array, axis=0) | |
# img_array = preprocess_input(img_array) | |
# predictions = mobilenet_model.predict(img_array) | |
# labels = decode_predictions(predictions, top=5)[0] | |
# return {label[1]: float(label[2]) for label in labels} | |
# except Exception as e: | |
# st.error(f"Error during image classification: {e}") | |
# return {} | |
# # Function to get user's location | |
# def get_user_location(): | |
# try: | |
# ip_info = requests.get("https://ipinfo.io/json").json() | |
# location = ip_info.get("loc", "").split(",") | |
# latitude = location[0] if len(location) > 0 else None | |
# longitude = location[1] if len(location) > 1 else None | |
# if latitude and longitude: | |
# geolocator = Nominatim(user_agent="binsight") | |
# address = geolocator.reverse(f"{latitude}, {longitude}").address | |
# return latitude, longitude, address | |
# return None, None, None | |
# except Exception as e: | |
# st.error(f"Unable to get location: {e}") | |
# return None, None, None | |
# # Function to get nearest municipal details with contact info | |
# def get_nearest_municipal_details(latitude, longitude): | |
# try: | |
# if latitude and longitude: | |
# # Simulating municipal service retrieval | |
# municipal_services = [ | |
# {"latitude": "12.9716", "longitude": "77.5946", "office": "Bangalore Municipal Office", "phone": "+91-80-12345678"}, | |
# {"latitude": "28.7041", "longitude": "77.1025", "office": "Delhi Municipal Office", "phone": "+91-11-98765432"}, | |
# {"latitude": "19.0760", "longitude": "72.8777", "office": "Mumbai Municipal Office", "phone": "+91-22-22334455"}, | |
# ] | |
# # Find the nearest municipal service (mock logic: matching first two decimal points) | |
# for service in municipal_services: | |
# if str(latitude).startswith(service["latitude"][:5]) and str(longitude).startswith(service["longitude"][:5]): | |
# return f""" | |
# **Office**: {service['office']} | |
# **Phone**: {service['phone']} | |
# """ | |
# return "No nearby municipal office found. Please check manually." | |
# else: | |
# return "Location not available. Unable to fetch municipal details." | |
# except Exception as e: | |
# st.error(f"Unable to fetch municipal details: {e}") | |
# return None | |
# # Function to interact with Generative AI | |
# def get_genai_response(classification_results, location): | |
# try: | |
# classification_summary = "\n".join([f"{label}: {score:.2f}" for label, score in classification_results.items()]) | |
# location_summary = f""" | |
# Latitude: {location[0] if location[0] else 'N/A'} | |
# Longitude: {location[1] if location[1] else 'N/A'} | |
# Address: {location[2] if location[2] else 'N/A'} | |
# """ | |
# prompt = f""" | |
# ### You are an environmental expert. Analyze the following: | |
# 1. **Image Classification**: | |
# - {classification_summary} | |
# 2. **Location**: | |
# - {location_summary} | |
# ### Output Required: | |
# 1. Detailed insights about the waste detected in the image. | |
# 2. Specific health risks associated with the detected waste type. | |
# 3. Precautions to mitigate these health risks. | |
# 4. Recommendations for proper disposal. | |
# """ | |
# model = genai.GenerativeModel('gemini-pro') | |
# response = model.generate_content(prompt) | |
# return response | |
# except Exception as e: | |
# st.error(f"Error using Generative AI: {e}") | |
# return None | |
# # Function to display Generative AI response | |
# def display_genai_response(response): | |
# st.subheader("Detailed Analysis and Recommendations") | |
# if response and response.candidates: | |
# response_content = response.candidates[0].content.parts[0].text if response.candidates[0].content.parts else "" | |
# st.write(response_content) | |
# else: | |
# st.write("No response received from Generative AI or quota exceeded.") | |
# # Streamlit App | |
# st.title("BinSight: AI-Powered Dustbin and Waste Analysis System") | |
# st.text("Upload a dustbin image and get AI-powered analysis of the waste and associated health recommendations.") | |
# uploaded_file = st.file_uploader("Upload an image of the dustbin", type=["jpg", "jpeg", "png"], help="Upload a clear image of a dustbin for analysis.") | |
# submit_button = st.button("Analyze Dustbin") | |
# if submit_button: | |
# if uploaded_file is not None: | |
# image = Image.open(uploaded_file) | |
# st.image(image, caption="Uploaded Image", use_container_width =True) | |
# # Classify the image using MobileNetV2 | |
# st.subheader("Image Classification") | |
# classification_results = classify_image_with_mobilenet(image) | |
# for label, score in classification_results.items(): | |
# st.write(f"- **{label}**: {score:.2f}") | |
# # Get user location | |
# location = get_user_location() | |
# latitude, longitude, address = location | |
# st.subheader("User Location") | |
# st.write(f"Latitude: {latitude if latitude else 'N/A'}") | |
# st.write(f"Longitude: {longitude if longitude else 'N/A'}") | |
# st.write(f"Address: {address if address else 'N/A'}") | |
# # Get nearest municipal details with contact info | |
# st.subheader("Nearest Municipal Details") | |
# municipal_details = get_nearest_municipal_details(latitude, longitude) | |
# st.write(municipal_details) | |
# # Generate detailed analysis with Generative AI | |
# if classification_results: | |
# response = get_genai_response(classification_results, location) | |
# display_genai_response(response) | |
# else: | |
# st.write("Please upload an image for analysis.") | |
# # import streamlit as st | |
# # import os | |
# # from PIL import Image | |
# # import numpy as np | |
# # from io import BytesIO | |
# # from dotenv import load_dotenv | |
# # from geopy.geocoders import Nominatim | |
# # from tensorflow.keras.applications import MobileNetV2 | |
# # from tensorflow.keras.applications.mobilenet_v2 import decode_predictions, preprocess_input | |
# # import requests | |
# # import google.generativeai as genai | |
# # # Load environment variables | |
# # load_dotenv() | |
# # # Configure Generative AI | |
# # genai.configure(api_key='AIzaSyBREh8Uei7uDCbzPaYW2WdalOdjVWcQLAM') | |
# # # Load MobileNetV2 pre-trained model | |
# # mobilenet_model = MobileNetV2(weights="imagenet") | |
# # # Function to classify the uploaded image using MobileNetV2 | |
# # def classify_image_with_mobilenet(image): | |
# # try: | |
# # # Resize the image to the input size of MobileNetV2 | |
# # img = image.resize((224, 224)) | |
# # img_array = np.array(img) | |
# # img_array = np.expand_dims(img_array, axis=0) | |
# # img_array = preprocess_input(img_array) | |
# # # Predict using the MobileNetV2 model | |
# # predictions = mobilenet_model.predict(img_array) | |
# # labels = decode_predictions(predictions, top=5)[0] | |
# # return {label[1]: float(label[2]) for label in labels} | |
# # except Exception as e: | |
# # st.error(f"Error during image classification: {e}") | |
# # return {} | |
# # # Function to get user's location | |
# # def get_user_location(): | |
# # try: | |
# # # Fetch location using the IPInfo API | |
# # ip_info = requests.get("https://ipinfo.io/json").json() | |
# # location = ip_info.get("loc", "").split(",") | |
# # latitude = location[0] if len(location) > 0 else None | |
# # longitude = location[1] if len(location) > 1 else None | |
# # if latitude and longitude: | |
# # geolocator = Nominatim(user_agent="binsight") | |
# # address = geolocator.reverse(f"{latitude}, {longitude}").address | |
# # return latitude, longitude, address | |
# # return None, None, None | |
# # except Exception as e: | |
# # st.error(f"Unable to get location: {e}") | |
# # return None, None, None | |
# # # Function to get nearest municipal details | |
# # def get_nearest_municipal_details(latitude, longitude): | |
# # try: | |
# # if latitude and longitude: | |
# # # Simulating municipal service retrieval | |
# # return f"The nearest municipal office is at ({latitude}, {longitude}). Please contact your local authority for waste management services." | |
# # else: | |
# # return "Location not available. Unable to fetch municipal details." | |
# # except Exception as e: | |
# # st.error(f"Unable to fetch municipal details: {e}") | |
# # return None | |
# # # Function to interact with Generative AI | |
# # def get_genai_response(classification_results, location): | |
# # try: | |
# # # Construct prompt for Generative AI | |
# # classification_summary = "\n".join([f"{label}: {score:.2f}" for label, score in classification_results.items()]) | |
# # location_summary = f""" | |
# # Latitude: {location[0] if location[0] else 'N/A'} | |
# # Longitude: {location[1] if location[1] else 'N/A'} | |
# # Address: {location[2] if location[2] else 'N/A'} | |
# # """ | |
# # prompt = f""" | |
# # ### You are an environmental expert. Analyze the following: | |
# # 1. **Image Classification**: | |
# # - {classification_summary} | |
# # 2. **Location**: | |
# # - {location_summary} | |
# # ### Output Required: | |
# # 1. Detailed insights about the waste detected in the image. | |
# # 2. Specific health risks associated with the detected waste type. | |
# # 3. Precautions to mitigate these health risks. | |
# # 4. Recommendations for proper disposal. | |
# # """ | |
# # model = genai.GenerativeModel('gemini-pro') | |
# # response = model.generate_content(prompt) | |
# # return response | |
# # except Exception as e: | |
# # st.error(f"Error using Generative AI: {e}") | |
# # return None | |
# # # Function to display Generative AI response | |
# # def display_genai_response(response): | |
# # st.subheader("Detailed Analysis and Recommendations") | |
# # if response and response.candidates: | |
# # response_content = response.candidates[0].content.parts[0].text if response.candidates[0].content.parts else "" | |
# # st.write(response_content) | |
# # else: | |
# # st.write("No response received from Generative AI or quota exceeded.") | |
# # # Streamlit App | |
# # st.title("BinSight: AI-Powered Dustbin and Waste Analysis System") | |
# # st.text("Upload a dustbin image and get AI-powered analysis of the waste and associated health recommendations.") | |
# # uploaded_file = st.file_uploader("Upload an image of the dustbin", type=["jpg", "jpeg", "png"], help="Upload a clear image of a dustbin for analysis.") | |
# # submit_button = st.button("Analyze Dustbin") | |
# # if submit_button: | |
# # if uploaded_file is not None: | |
# # image = Image.open(uploaded_file) | |
# # st.image(image, caption="Uploaded Image", use_column_width=True) | |
# # # Classify the image using MobileNetV2 | |
# # st.subheader("Image Classification") | |
# # classification_results = classify_image_with_mobilenet(image) | |
# # for label, score in classification_results.items(): | |
# # st.write(f"- **{label}**: {score:.2f}") | |
# # # Get user location | |
# # location = get_user_location() | |
# # latitude, longitude, address = location | |
# # st.subheader("User Location") | |
# # st.write(f"Latitude: {latitude if latitude else 'N/A'}") | |
# # st.write(f"Longitude: {longitude if longitude else 'N/A'}") | |
# # st.write(f"Address: {address if address else 'N/A'}") | |
# # # Get nearest municipal details | |
# # st.subheader("Nearest Municipal Details") | |
# # municipal_details = get_nearest_municipal_details(latitude, longitude) | |
# # st.write(municipal_details) | |
# # # Generate detailed analysis with Generative AI | |
# # if classification_results: | |
# # response = get_genai_response(classification_results, location) | |
# # display_genai_response(response) | |
# # else: | |
# # st.write("Please upload an image for analysis.") | |