Spaces:
Sleeping
Sleeping
Added bmi predictor app
Browse files- app.py +80 -0
- lr.p +0 -0
- requirements.txt +13 -0
app.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pandas as pd
|
| 6 |
+
from glob import glob
|
| 7 |
+
import pickle
|
| 8 |
+
|
| 9 |
+
from sklearn.model_selection import train_test_split
|
| 10 |
+
from sklearn.ensemble import RandomForestRegressor
|
| 11 |
+
from sklearn.metrics import mean_squared_error, mean_absolute_error
|
| 12 |
+
from scipy.stats import pearsonr
|
| 13 |
+
|
| 14 |
+
import matplotlib.pyplot as plt
|
| 15 |
+
import seaborn as sns
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
import torchvision.transforms as transforms
|
| 19 |
+
from PIL import Image
|
| 20 |
+
|
| 21 |
+
from facenet_pytorch import MTCNN, InceptionResnetV1
|
| 22 |
+
|
| 23 |
+
import warnings
|
| 24 |
+
|
| 25 |
+
warnings.filterwarnings("ignore")
|
| 26 |
+
|
| 27 |
+
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
| 28 |
+
|
| 29 |
+
# If required, create a face detection pipeline using MTCNN:
|
| 30 |
+
mtcnn = MTCNN(
|
| 31 |
+
image_size=160, margin=40, min_face_size=20,
|
| 32 |
+
thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,
|
| 33 |
+
device=device
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
mtcnn2 = MTCNN(
|
| 37 |
+
image_size=160, margin=40, min_face_size=20,
|
| 38 |
+
thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=False,
|
| 39 |
+
device=device
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
# Create an inception resnet (in eval mode):
|
| 43 |
+
resnet = InceptionResnetV1(pretrained='vggface2').eval().to(device)
|
| 44 |
+
|
| 45 |
+
# Define the transformation to preprocess the images
|
| 46 |
+
preprocess = transforms.Compose([
|
| 47 |
+
transforms.Resize((160, 160)),
|
| 48 |
+
transforms.ToTensor(),
|
| 49 |
+
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
| 50 |
+
])
|
| 51 |
+
|
| 52 |
+
def extract_features(img):
|
| 53 |
+
img = img.convert('RGB')
|
| 54 |
+
face = mtcnn(img)
|
| 55 |
+
if face is None:
|
| 56 |
+
face = preprocess(img)
|
| 57 |
+
|
| 58 |
+
img = torch.stack([face]).to(device)
|
| 59 |
+
|
| 60 |
+
with torch.no_grad():
|
| 61 |
+
features = resnet(img)
|
| 62 |
+
|
| 63 |
+
return features[0].cpu().numpy()
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
with open("/app/models/lr.p", "rb") as f:
|
| 67 |
+
lr = pickle.load(f)
|
| 68 |
+
|
| 69 |
+
img_file_buffer = st.camera_input("Take a picture")
|
| 70 |
+
|
| 71 |
+
if img_file_buffer is not None:
|
| 72 |
+
# To read image file buffer as a PIL Image:
|
| 73 |
+
img = Image.open(img_file_buffer)
|
| 74 |
+
|
| 75 |
+
detected_image = Image.fromarray(mtcnn2(img).numpy().transpose(1, 2, 0).astype(np.uint8))
|
| 76 |
+
st.image(detected_image, caption="Detected Face")
|
| 77 |
+
|
| 78 |
+
embeddings = extract_features(img)
|
| 79 |
+
bmi = round(lr.predict([embeddings])[0], 2)
|
| 80 |
+
st.write(f"Your BMI is {bmi}")
|
lr.p
ADDED
|
Binary file (4.51 kB). View file
|
|
|
requirements.txt
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
facenet-pytorch==2.5.3
|
| 2 |
+
imgaug==0.4.0
|
| 3 |
+
matplotlib==3.7.1
|
| 4 |
+
numpy==1.23.5
|
| 5 |
+
opencv-python==4.7.0.72
|
| 6 |
+
pandas==1.5.3
|
| 7 |
+
scikit-learn==1.2.2
|
| 8 |
+
scipy==1.10.1
|
| 9 |
+
seaborn==0.12.2
|
| 10 |
+
streamlit==1.22.0
|
| 11 |
+
torch==2.0.1
|
| 12 |
+
torchvision==0.15.2
|
| 13 |
+
tqdm==4.65.0
|