Spaces:
Build error
Build error
Afonso B. Sousa
commited on
Initial version.
Browse files- app.py +64 -0
- lenet.py +50 -0
- models/lenet5.pkl +0 -0
- models/lenet5.pt +0 -0
- models/lenet5.ptc +0 -0
app.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AUTOGENERATED! DO NOT EDIT! File to edit: app.ipynb.
|
| 2 |
+
|
| 3 |
+
# %% auto 0
|
| 4 |
+
__all__ = ['MODEL_PATH', 'model', 'image', 'label', 'processed_image', 'intf', 'predict']
|
| 5 |
+
|
| 6 |
+
# %% app.ipynb 2
|
| 7 |
+
import torch
|
| 8 |
+
import numpy as np
|
| 9 |
+
import gradio as gr
|
| 10 |
+
from PIL import Image
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
import sys
|
| 13 |
+
np.set_printoptions(threshold=sys.maxsize)
|
| 14 |
+
|
| 15 |
+
# %% app.ipynb 4
|
| 16 |
+
from lenet import LeNet5
|
| 17 |
+
# Allowlist the custom class
|
| 18 |
+
MODEL_PATH = Path("models/lenet5.pt")
|
| 19 |
+
model = torch.load(MODEL_PATH, weights_only=False)
|
| 20 |
+
model = model.to('cpu') # Move model to CPU
|
| 21 |
+
model.eval()
|
| 22 |
+
|
| 23 |
+
def predict(img):
|
| 24 |
+
# Create a new image with a white background
|
| 25 |
+
background = Image.new("L", (28, 28), 255)
|
| 26 |
+
|
| 27 |
+
# Resize the input image
|
| 28 |
+
img_pil = img["composite"].resize((28, 28))
|
| 29 |
+
|
| 30 |
+
# Paste the resized image onto the white background
|
| 31 |
+
background.paste(img_pil, (0, 0), img_pil)
|
| 32 |
+
|
| 33 |
+
# Convert to numpy
|
| 34 |
+
img_array = np.array(background)
|
| 35 |
+
|
| 36 |
+
# Invert colors (MNIST has white digits on black)
|
| 37 |
+
img_array = 255 - img_array
|
| 38 |
+
|
| 39 |
+
# Create a displayable version of the inverted image (what the model actually sees)
|
| 40 |
+
inverted_debug = img_array.astype(np.uint8)
|
| 41 |
+
|
| 42 |
+
img_tensor = torch.tensor(img_array, dtype=torch.float32)
|
| 43 |
+
img_tensor = img_tensor.unsqueeze(0).unsqueeze(0) # Add channel and batch dimensions
|
| 44 |
+
|
| 45 |
+
# Debug: Print the shape and values of the input tensor
|
| 46 |
+
print(f"Input tensor shape: {img_tensor.shape}")
|
| 47 |
+
print(f"Input tensor values: {img_tensor}")
|
| 48 |
+
|
| 49 |
+
with torch.no_grad():
|
| 50 |
+
output = model(img_tensor)
|
| 51 |
+
probabilities = torch.nn.functional.softmax(output, dim=1)[0]
|
| 52 |
+
|
| 53 |
+
print(f"Output shape: {output.shape}")
|
| 54 |
+
print(f"Probabilities shape: {probabilities.shape}")
|
| 55 |
+
print(f"Probabilities: {probabilities}")
|
| 56 |
+
|
| 57 |
+
# Create dictionary of label: probability for Gradio Label output
|
| 58 |
+
return {str(i): float(prob) for i, prob in enumerate(probabilities)}, inverted_debug
|
| 59 |
+
|
| 60 |
+
image = gr.Sketchpad(type="pil", sources=(), canvas_size=(280,280), brush=gr.Brush(colors=["#000000"], color_mode="fixed", default_size=20), layers=False, transforms=[])
|
| 61 |
+
label = gr.Label()
|
| 62 |
+
processed_image = gr.Image(label="What the Model Sees (28x28)")
|
| 63 |
+
intf = gr.Interface(title="Title", fn=predict, inputs=image, outputs=[label, processed_image], clear_btn=None)
|
| 64 |
+
intf.launch(inline=False, debug=True)
|
lenet.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AUTOGENERATED! DO NOT EDIT! File to edit: simple_network.ipynb.
|
| 2 |
+
|
| 3 |
+
# %% auto 0
|
| 4 |
+
__all__ = ['LeNet5']
|
| 5 |
+
|
| 6 |
+
# %% simple_network.ipynb 1
|
| 7 |
+
#%matplotlib inline
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
import torch.optim as optim
|
| 13 |
+
|
| 14 |
+
import torchvision
|
| 15 |
+
import torchvision.transforms as transforms
|
| 16 |
+
|
| 17 |
+
import matplotlib
|
| 18 |
+
import matplotlib.pyplot as plt
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
# %% simple_network.ipynb 4
|
| 22 |
+
class LeNet5(nn.Module):
|
| 23 |
+
def __init__(self, num_classes):
|
| 24 |
+
super().__init__()
|
| 25 |
+
self.l1 = nn.Sequential(
|
| 26 |
+
nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1, padding=2), # 28*28-->32*32-->28*28
|
| 27 |
+
nn.BatchNorm2d(6),
|
| 28 |
+
nn.ReLU(),
|
| 29 |
+
nn.MaxPool2d(kernel_size = 2, stride = 2))
|
| 30 |
+
|
| 31 |
+
self.l2 = nn.Sequential(
|
| 32 |
+
nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0), # 10*10
|
| 33 |
+
nn.BatchNorm2d(16),
|
| 34 |
+
nn.ReLU(),
|
| 35 |
+
nn.MaxPool2d(kernel_size = 2, stride = 2))
|
| 36 |
+
|
| 37 |
+
self.classifier = nn.Sequential(
|
| 38 |
+
nn.Flatten(),
|
| 39 |
+
nn.Linear(in_features=16*5*5, out_features=120),
|
| 40 |
+
nn.ReLU(),
|
| 41 |
+
nn.Linear(in_features=120, out_features=84),
|
| 42 |
+
nn.ReLU(),
|
| 43 |
+
nn.Linear(in_features=84, out_features=num_classes),
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
def forward(self, x):
|
| 47 |
+
out = self.l1(x)
|
| 48 |
+
out = self.l2(out)
|
| 49 |
+
out = self.classifier(out)
|
| 50 |
+
return out
|
models/lenet5.pkl
ADDED
|
Binary file (256 kB). View file
|
|
|
models/lenet5.pt
ADDED
|
Binary file (258 kB). View file
|
|
|
models/lenet5.ptc
ADDED
|
Binary file (270 kB). View file
|
|
|