Spaces:
Running
Running
File size: 5,284 Bytes
3760c0b 58bd1b2 baf7aa0 d50261b 3760c0b 06ad0a5 418cf06 5867cce cefb660 884a5e9 cefb660 418cf06 84b79d8 cefb660 84b79d8 cefb660 418cf06 cefb660 84b79d8 cefb660 418cf06 06ad0a5 84b79d8 06ad0a5 58bd1b2 3760c0b 58bd1b2 cefb660 3760c0b 58bd1b2 cefb660 3760c0b 06ad0a5 3760c0b 84b79d8 3760c0b 84b79d8 5d33cbf 84b79d8 72125ff 3760c0b 84b79d8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
import time
import streamlit as st
import numpy as np
from PIL import Image
import urllib.request
import io
from utils import *
from IPython.display import display, Javascript, Image
from google.colab.output import eval_js
from base64 import b64decode, b64encode
import cv2
import numpy as np
import html
from google.colab.patches import cv2_imshow
from IPython.display import clear_output
import matplotlib.pyplot as plt
# Initialize labels and model
labels = gen_labels()
model = model_arc() # Assuming this function initializes and returns a trained model
# Streamlit UI
st.markdown('''
<div style="padding-bottom: 20px; padding-top: 20px; padding-left: 5px; padding-right: 5px">
<center><h1>EcoIdentify (Test)</h1></center>
</div>
''', unsafe_allow_html=True)
st.markdown('''
<div>
<center><h3>Please upload Waste Image to find its Category</h3></center>
</div>
''', unsafe_allow_html=True)
# Function to take a photo using the webcam
def take_photo(filename='photo.jpg', quality=0.8):
js = Javascript('''
async function takePhoto(quality) {
const div = document.createElement('div');
const capture = document.createElement('button');
capture.textContent = 'Capture';
div.appendChild(capture);
const video = document.createElement('video');
video.style.display = 'block';
const stream = await navigator.mediaDevices.getUserMedia({ video: true });
document.body.appendChild(div);
div.appendChild(video);
video.srcObject = stream;
await video.play();
// Resize the output to fit the video element.
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
// Wait for Capture to be clicked.
await new Promise((resolve) => capture.onclick = resolve);
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
stream.getVideoTracks()[0].stop();
div.remove();
return canvas.toDataURL('image/jpeg', quality);
}
''')
display(js)
data = eval_js('takePhoto({})'.format(quality))
binary = b64decode(data.split(',')[1])
with open(filename, 'wb') as f:
f.write(binary)
return filename
opt = st.selectbox("How do you want to upload the image for classification?",
('Please Select', 'Upload image via link', 'Upload image from device', 'Capture a picture'))
# Image processing based on user selection
image = None
if opt == 'Upload image from device':
file = st.file_uploader('Select', type=['jpg', 'png', 'jpeg'])
if file:
try:
image = Image.open(io.BytesIO(file.read())).resize((256, 256), Image.LANCZOS)
except Exception as e:
st.error(f"Error reading the file: {e}")
elif opt == 'Upload image via link':
img_url = st.text_input('Enter the Image Address')
if st.button('Submit'):
try:
response = urllib.request.urlopen(img_url)
image = Image.open(response).resize((256, 256), Image.LANCZOS)
except ValueError:
st.error("Please Enter a valid Image Address!")
elif opt == 'Capture a picture':
take_photo()
try:
if image is not None:
st.image(image, width=256, caption='Uploaded Image')
if st.button('Predict'):
img = preprocess(image)
#model = model_arc()
# model.load_weights("classify_model.h5")
print("---------------img-array---------------------")
print(img[np.newaxis, ...])
prediction = model.predict(img[np.newaxis, ...])
print("------------summary------------------------")
print(model.summary())
print("------------------------------------")
print(prediction)
st.info('Hey! The uploaded image has been classified as " {} waste " '.format(labels[np.argmax(prediction[0], axis=-1)]))
def message(img):
if img == 'paper' or 'cardboard' or 'metal' or 'glass':
return (
" therefore your item is recyclable. Please refer to https://www.wm.com/us/en/drop-off-locations to find a drop-off location near you.")
elif img == 'plastic':
return (
' therefore you item may have a chance of being recyclable. Since this model has yet to recognize types of plastics, please refer to https://www.bing.com/ck/a?!&&p=c1474e95017548dfJmltdHM9MTcwMzcyMTYwMCZpZ3VpZD0xNmNjOTFiOS1hMDgwLTY5MmItMzBmNi04MmE1YTE3ODY4NDImaW5zaWQ9NTIyMA&ptn=3&ver=2&hsh=3&fclid=16cc91b9-a080-692b-30f6-82a5a1786842&psq=what+type+of+plastic+can+be+recycled&u=a1aHR0cHM6Ly93d3cucGxhc3RpY3Nmb3JjaGFuZ2Uub3JnL2Jsb2cvd2hpY2gtcGxhc3RpYy1jYW4tYmUtcmVjeWNsZWQ&ntb=1 to check if this item can be recycled.')
else:
return ('Your item is not recyclable. Please discard it safely.')
st.info(message(labels[np.argmax(prediction[0], axis=-1)]))
except Exception as e:
st.info(e)
pass |