Spaces:
Sleeping
Sleeping
initial commit
Browse files- .gitattributes +1 -0
- __pycache__/model.cpython-311.pyc +0 -0
- app.py +68 -0
- examples/1683426.jpg +0 -0
- examples/2246332.jpg +0 -0
- examples/3375083.jpg +0 -0
- finetuned_effnetb2_20percent.pth +3 -0
- model.py +18 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
finetuned_effnetb2_20percent.pth filter=lfs diff=lfs merge=lfs -text
|
__pycache__/model.cpython-311.pyc
ADDED
Binary file (1.45 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""The main parts are:
|
2 |
+
1. Imports and class names setup
|
3 |
+
2. Model and transforms preparation
|
4 |
+
3. Write a predict function for gradio to use
|
5 |
+
4. Write the Gradio app and the launch command
|
6 |
+
"""
|
7 |
+
import os
|
8 |
+
from typing import Tuple, Dict
|
9 |
+
import PIL
|
10 |
+
import torch
|
11 |
+
import torchvision
|
12 |
+
import gradio as gr
|
13 |
+
from timeit import default_timer
|
14 |
+
from model import create_effnetb2_model
|
15 |
+
|
16 |
+
class_names = ['pizza', 'steak', 'sushi'] #hardcoded as a list
|
17 |
+
model, transforms = create_effnetb2_model(num_classes = len(class_names))
|
18 |
+
|
19 |
+
# Load saved weights into the model, and load the model onto the CPU
|
20 |
+
model.load_state_dict(torch.load(f = "finetuned_effnetb2_20percent.pth"),
|
21 |
+
map_location = torch.device('cpu'))
|
22 |
+
|
23 |
+
# Write function to run inference on gradio
|
24 |
+
def predict(img: PIL.Image,
|
25 |
+
model: nn.Module = model,
|
26 |
+
transforms: torchvision.transforms = transforms,
|
27 |
+
class_names: List[str] = class_names) -> Tuple[Dict, float]:
|
28 |
+
"""Function to predict image class on gradio
|
29 |
+
|
30 |
+
Args:
|
31 |
+
img (np.array): Image as a numpy array
|
32 |
+
model (nn.Module, optional): Model. Defaults to vit.
|
33 |
+
class_names (List[str], optional): List of class anmes. Defaults to class_names.
|
34 |
+
|
35 |
+
Returns:
|
36 |
+
Tuple[Dict, float]: Tuplefor further processing on gradio
|
37 |
+
"""
|
38 |
+
start_time = timer()
|
39 |
+
img = transforms(img).unsqueeze(0) #add batch dimension
|
40 |
+
model.eval()
|
41 |
+
with torch.inference_mode():
|
42 |
+
pred_probs = torch.softmax(model(img), dim = 1)
|
43 |
+
pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
|
44 |
+
end_time = timer()
|
45 |
+
pred_time = round(end_time - start_time, 4)
|
46 |
+
return pred_labels_and_probs, pred_time
|
47 |
+
|
48 |
+
# Create example_list
|
49 |
+
example_list = [["examples/" + example] for example in os.listdir("examples")]
|
50 |
+
|
51 |
+
# Create Gradio App
|
52 |
+
title = 'FoodVision Mini 🍕🥩🍣'
|
53 |
+
description = "Using a [Vision Transformer](https://arxiv.org/abs/2010.11929) for Image Classification"
|
54 |
+
article = "Created by [Titus Lim](https://github.com/tituslhy)"
|
55 |
+
|
56 |
+
demo = gr.Interface(fn = predict,
|
57 |
+
inputs = gr.Image(type = "pil"),
|
58 |
+
outputs = [gr.Label(num_top_classes = 3, label = "Predictions"),
|
59 |
+
gr.Number(label = "Prediction time (s)")],
|
60 |
+
examples = example_list,
|
61 |
+
title = title,
|
62 |
+
description = description,
|
63 |
+
article = article)
|
64 |
+
|
65 |
+
# Launch demo
|
66 |
+
demo.launch(debug = False, #prints errors locally
|
67 |
+
share = True # Generate a publicly shareable URL
|
68 |
+
)
|
examples/1683426.jpg
ADDED
![]() |
examples/2246332.jpg
ADDED
![]() |
examples/3375083.jpg
ADDED
![]() |
finetuned_effnetb2_20percent.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b208111513233556e3b64bd5bac9ad2cdbd370613b66a973a50ef890ab87f3aa
|
3 |
+
size 31277471
|
model.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import torch
|
3 |
+
import torchvision
|
4 |
+
from torch import nn
|
5 |
+
|
6 |
+
def create_effnetb2_model(num_classes: int = 3,
|
7 |
+
seed: int = 42):
|
8 |
+
effnetb2_weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
|
9 |
+
effnetb2_transforms = effnetb2_weights.transforms()
|
10 |
+
effnetb2 = torchvision.models.efficientnet_b2(weights=effnetb2_weights)
|
11 |
+
for param in effnetb2.parameters():
|
12 |
+
param.requires_grad = False
|
13 |
+
torch.manual_seed(seed)
|
14 |
+
effnetb2.classifier = nn.Sequential(
|
15 |
+
nn.Dropout(p = 0.3, inplace = True),
|
16 |
+
nn.Linear(in_features = 1408, out_features = num_classes)
|
17 |
+
)
|
18 |
+
return effnetb2, effnetb2_transforms
|