Spaces:
Runtime error
Runtime error
add more image sample
Browse files- .ipynb_checkpoints/app-checkpoint.py +1 -1
- .ipynb_checkpoints/train-checkpoint.ipynb +42 -131
- app.py +1 -1
- gradio_queue.db +0 -0
- kitten.jpg +0 -0
- sample_images/Abyssinian_27.jpg +0 -0
- sample_images/Abyssinian_29.jpg +0 -0
- sample_images/Abyssinian_67.jpg +0 -0
- sample_images/Abyssinian_7.jpg +0 -0
- sample_images/Bengal_108.jpg +0 -0
- sample_images/Bengal_109.jpg +0 -0
- sample_images/Bengal_19.jpg +0 -0
- sample_images/Bengal_21.jpg +0 -0
- sample_images/Birman_103.jpg +0 -0
- sample_images/Birman_113.jpg +0 -0
- sample_images/Birman_120.jpg +0 -0
- sample_images/Bombay_19.jpg +0 -0
- sample_images/Bombay_25.jpg +0 -0
- sample_images/British_Shorthair_57.jpg +0 -0
- sample_images/British_Shorthair_61.jpg +0 -0
- sample_images/Egyptian_Mau_57.jpg +0 -0
- sample_images/Egyptian_Mau_63.jpg +0 -0
- sample_images/american_bulldog_24.jpg +0 -0
- sample_images/american_bulldog_83.jpg +0 -0
- sample_images/american_pit_bull_terrier_52.jpg +0 -0
- sample_images/american_pit_bull_terrier_76.jpg +0 -0
- sample_images/basset_hound_12.jpg +0 -0
- sample_images/basset_hound_17.jpg +0 -0
- sample_images/beagle_120.jpg +0 -0
- sample_images/beagle_125.jpg +0 -0
- sample_images/chihuahua_93.jpg +0 -0
- sample_images/chihuahua_94.jpg +0 -0
- sample_images/staffordshire_bull_terrier_129.jpg +0 -0
- sample_images/staffordshire_bull_terrier_173.jpg +0 -0
- sample_images/wheaten_terrier_137.jpg +0 -0
- sample_images/wheaten_terrier_138.jpg +0 -0
- sample_images/yorkshire_terrier_189.jpg +0 -0
- sample_images/yorkshire_terrier_196.jpg +0 -0
- siamese.jpg +0 -0
- train.ipynb +260 -127
.ipynb_checkpoints/app-checkpoint.py
CHANGED
@@ -13,7 +13,7 @@ def predict(img):
|
|
13 |
title = "Pet Breed Classifier"
|
14 |
description = "A pet breed classifier trained on the Oxford Pets dataset"
|
15 |
interpretation='default'
|
16 |
-
examples = [
|
17 |
article="<p style='text-align: center'><a href='https://dicksonneoh.com' target='_blank'>Blog post</a></p>"
|
18 |
enable_queue=True
|
19 |
|
|
|
13 |
title = "Pet Breed Classifier"
|
14 |
description = "A pet breed classifier trained on the Oxford Pets dataset"
|
15 |
interpretation='default'
|
16 |
+
examples = ["sample_images/"+file for file in files]
|
17 |
article="<p style='text-align: center'><a href='https://dicksonneoh.com' target='_blank'>Blog post</a></p>"
|
18 |
enable_queue=True
|
19 |
|
.ipynb_checkpoints/train-checkpoint.ipynb
CHANGED
@@ -2,114 +2,10 @@
|
|
2 |
"cells": [
|
3 |
{
|
4 |
"cell_type": "code",
|
5 |
-
"execution_count":
|
6 |
"id": "311970df-d109-452d-a843-c31048daf6e3",
|
7 |
"metadata": {},
|
8 |
-
"outputs": [
|
9 |
-
{
|
10 |
-
"name": "stderr",
|
11 |
-
"output_type": "stream",
|
12 |
-
"text": [
|
13 |
-
"/home/camaro/anaconda3/envs/gradio/lib/python3.8/site-packages/torch/_tensor.py:1051: UserWarning: torch.solve is deprecated in favor of torch.linalg.solveand will be removed in a future PyTorch release.\n",
|
14 |
-
"torch.linalg.solve has its arguments reversed and does not return the LU factorization.\n",
|
15 |
-
"To get the LU factorization see torch.lu, which can be used with torch.lu_solve or torch.lu_unpack.\n",
|
16 |
-
"X = torch.solve(B, A).solution\n",
|
17 |
-
"should be replaced with\n",
|
18 |
-
"X = torch.linalg.solve(A, B) (Triggered internally at ../aten/src/ATen/native/BatchLinearAlgebra.cpp:766.)\n",
|
19 |
-
" ret = func(*args, **kwargs)\n"
|
20 |
-
]
|
21 |
-
},
|
22 |
-
{
|
23 |
-
"data": {
|
24 |
-
"text/html": [
|
25 |
-
"<table border=\"1\" class=\"dataframe\">\n",
|
26 |
-
" <thead>\n",
|
27 |
-
" <tr style=\"text-align: left;\">\n",
|
28 |
-
" <th>epoch</th>\n",
|
29 |
-
" <th>train_loss</th>\n",
|
30 |
-
" <th>valid_loss</th>\n",
|
31 |
-
" <th>accuracy</th>\n",
|
32 |
-
" <th>time</th>\n",
|
33 |
-
" </tr>\n",
|
34 |
-
" </thead>\n",
|
35 |
-
" <tbody>\n",
|
36 |
-
" <tr>\n",
|
37 |
-
" <td>0</td>\n",
|
38 |
-
" <td>1.302047</td>\n",
|
39 |
-
" <td>0.278566</td>\n",
|
40 |
-
" <td>0.912720</td>\n",
|
41 |
-
" <td>00:27</td>\n",
|
42 |
-
" </tr>\n",
|
43 |
-
" </tbody>\n",
|
44 |
-
"</table>"
|
45 |
-
],
|
46 |
-
"text/plain": [
|
47 |
-
"<IPython.core.display.HTML object>"
|
48 |
-
]
|
49 |
-
},
|
50 |
-
"metadata": {},
|
51 |
-
"output_type": "display_data"
|
52 |
-
},
|
53 |
-
{
|
54 |
-
"data": {
|
55 |
-
"text/html": [
|
56 |
-
"<table border=\"1\" class=\"dataframe\">\n",
|
57 |
-
" <thead>\n",
|
58 |
-
" <tr style=\"text-align: left;\">\n",
|
59 |
-
" <th>epoch</th>\n",
|
60 |
-
" <th>train_loss</th>\n",
|
61 |
-
" <th>valid_loss</th>\n",
|
62 |
-
" <th>accuracy</th>\n",
|
63 |
-
" <th>time</th>\n",
|
64 |
-
" </tr>\n",
|
65 |
-
" </thead>\n",
|
66 |
-
" <tbody>\n",
|
67 |
-
" <tr>\n",
|
68 |
-
" <td>0</td>\n",
|
69 |
-
" <td>0.340128</td>\n",
|
70 |
-
" <td>0.220963</td>\n",
|
71 |
-
" <td>0.933018</td>\n",
|
72 |
-
" <td>00:30</td>\n",
|
73 |
-
" </tr>\n",
|
74 |
-
" <tr>\n",
|
75 |
-
" <td>1</td>\n",
|
76 |
-
" <td>0.291360</td>\n",
|
77 |
-
" <td>0.273055</td>\n",
|
78 |
-
" <td>0.912043</td>\n",
|
79 |
-
" <td>00:29</td>\n",
|
80 |
-
" </tr>\n",
|
81 |
-
" <tr>\n",
|
82 |
-
" <td>2</td>\n",
|
83 |
-
" <td>0.221144</td>\n",
|
84 |
-
" <td>0.231351</td>\n",
|
85 |
-
" <td>0.933694</td>\n",
|
86 |
-
" <td>00:29</td>\n",
|
87 |
-
" </tr>\n",
|
88 |
-
" <tr>\n",
|
89 |
-
" <td>3</td>\n",
|
90 |
-
" <td>0.142861</td>\n",
|
91 |
-
" <td>0.212190</td>\n",
|
92 |
-
" <td>0.939784</td>\n",
|
93 |
-
" <td>00:29</td>\n",
|
94 |
-
" </tr>\n",
|
95 |
-
" <tr>\n",
|
96 |
-
" <td>4</td>\n",
|
97 |
-
" <td>0.097161</td>\n",
|
98 |
-
" <td>0.201630</td>\n",
|
99 |
-
" <td>0.943166</td>\n",
|
100 |
-
" <td>00:29</td>\n",
|
101 |
-
" </tr>\n",
|
102 |
-
" </tbody>\n",
|
103 |
-
"</table>"
|
104 |
-
],
|
105 |
-
"text/plain": [
|
106 |
-
"<IPython.core.display.HTML object>"
|
107 |
-
]
|
108 |
-
},
|
109 |
-
"metadata": {},
|
110 |
-
"output_type": "display_data"
|
111 |
-
}
|
112 |
-
],
|
113 |
"source": [
|
114 |
"from fastai.vision.all import *\n",
|
115 |
"path = untar_data(URLs.PETS)\n",
|
@@ -122,7 +18,7 @@
|
|
122 |
},
|
123 |
{
|
124 |
"cell_type": "code",
|
125 |
-
"execution_count":
|
126 |
"id": "ef4ffc95-6051-4354-af16-25477b279657",
|
127 |
"metadata": {},
|
128 |
"outputs": [],
|
@@ -132,7 +28,7 @@
|
|
132 |
},
|
133 |
{
|
134 |
"cell_type": "code",
|
135 |
-
"execution_count":
|
136 |
"id": "49289d4b-7e8c-4264-bb03-8a0d851caf1c",
|
137 |
"metadata": {},
|
138 |
"outputs": [],
|
@@ -146,44 +42,59 @@
|
|
146 |
},
|
147 |
{
|
148 |
"cell_type": "code",
|
149 |
-
"execution_count":
|
150 |
-
"id": "
|
151 |
"metadata": {},
|
152 |
"outputs": [
|
153 |
{
|
154 |
"name": "stdout",
|
155 |
"output_type": "stream",
|
156 |
"text": [
|
157 |
-
"
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
"\
|
166 |
-
"\
|
167 |
-
"
|
168 |
-
"
|
169 |
-
"
|
170 |
-
"
|
171 |
-
"
|
172 |
-
"
|
173 |
-
"
|
174 |
-
"
|
175 |
-
"
|
176 |
-
"\
|
|
|
177 |
]
|
178 |
}
|
179 |
],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
"source": [
|
181 |
"import gradio as gr\n",
|
182 |
"\n",
|
183 |
"title = \"Pet Breed Classifier\"\n",
|
184 |
"description = \"A pet breed classifier trained on the Oxford Pets dataset\"\n",
|
185 |
"interpretation='default'\n",
|
186 |
-
"examples = ['siamese.jpg', 'kitten.jpg']\n",
|
|
|
187 |
"article=\"<p style='text-align: center'><a href='https://dicksonneoh.com' target='_blank'>Blog post</a></p>\"\n",
|
188 |
"enable_queue=True\n",
|
189 |
"\n",
|
|
|
2 |
"cells": [
|
3 |
{
|
4 |
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
"id": "311970df-d109-452d-a843-c31048daf6e3",
|
7 |
"metadata": {},
|
8 |
+
"outputs": [],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
"source": [
|
10 |
"from fastai.vision.all import *\n",
|
11 |
"path = untar_data(URLs.PETS)\n",
|
|
|
18 |
},
|
19 |
{
|
20 |
"cell_type": "code",
|
21 |
+
"execution_count": null,
|
22 |
"id": "ef4ffc95-6051-4354-af16-25477b279657",
|
23 |
"metadata": {},
|
24 |
"outputs": [],
|
|
|
28 |
},
|
29 |
{
|
30 |
"cell_type": "code",
|
31 |
+
"execution_count": null,
|
32 |
"id": "49289d4b-7e8c-4264-bb03-8a0d851caf1c",
|
33 |
"metadata": {},
|
34 |
"outputs": [],
|
|
|
42 |
},
|
43 |
{
|
44 |
"cell_type": "code",
|
45 |
+
"execution_count": 2,
|
46 |
+
"id": "a6b53fe8-ded5-4048-afd7-e488dc884aec",
|
47 |
"metadata": {},
|
48 |
"outputs": [
|
49 |
{
|
50 |
"name": "stdout",
|
51 |
"output_type": "stream",
|
52 |
"text": [
|
53 |
+
"american_pit_bull_terrier_52.jpg\n",
|
54 |
+
"staffordshire_bull_terrier_129.jpg\n",
|
55 |
+
"wheaten_terrier_138.jpg\n",
|
56 |
+
"staffordshire_bull_terrier_172.jpg\n",
|
57 |
+
"yorkshire_terrier_189.jpg\n",
|
58 |
+
"Abyssinian_67.jpg\n",
|
59 |
+
"basset_hound_12.jpg\n",
|
60 |
+
"american_bulldog_24.jpg\n",
|
61 |
+
"beagle_120.jpg\n",
|
62 |
+
"staffordshire_bull_terrier_173.jpg\n",
|
63 |
+
"beagle_125.jpg\n",
|
64 |
+
"Bengal_21.jpg\n",
|
65 |
+
"basset_hound_17.jpg\n",
|
66 |
+
"staffordshire_bull_terrier_130.jpg\n",
|
67 |
+
"Abyssinian_7.jpg\n",
|
68 |
+
"american_bulldog_83.jpg\n",
|
69 |
+
"Birman_103.jpg\n",
|
70 |
+
"Bengal_19.jpg\n",
|
71 |
+
"american_pit_bull_terrier_76.jpg\n",
|
72 |
+
"yorkshire_terrier_196.jpg\n",
|
73 |
+
"wheaten_terrier_137.jpg\n"
|
74 |
]
|
75 |
}
|
76 |
],
|
77 |
+
"source": [
|
78 |
+
"import os\n",
|
79 |
+
"for root, dirs, files in os.walk(r'sample_image/'):\n",
|
80 |
+
" for filename in files:\n",
|
81 |
+
" print(filename)"
|
82 |
+
]
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"cell_type": "code",
|
86 |
+
"execution_count": null,
|
87 |
+
"id": "62fe5dc0-5fd1-4cc7-af8d-a325e3915173",
|
88 |
+
"metadata": {},
|
89 |
+
"outputs": [],
|
90 |
"source": [
|
91 |
"import gradio as gr\n",
|
92 |
"\n",
|
93 |
"title = \"Pet Breed Classifier\"\n",
|
94 |
"description = \"A pet breed classifier trained on the Oxford Pets dataset\"\n",
|
95 |
"interpretation='default'\n",
|
96 |
+
"# examples = ['siamese.jpg', 'kitten.jpg']\n",
|
97 |
+
"examples = files\n",
|
98 |
"article=\"<p style='text-align: center'><a href='https://dicksonneoh.com' target='_blank'>Blog post</a></p>\"\n",
|
99 |
"enable_queue=True\n",
|
100 |
"\n",
|
app.py
CHANGED
@@ -13,7 +13,7 @@ def predict(img):
|
|
13 |
title = "Pet Breed Classifier"
|
14 |
description = "A pet breed classifier trained on the Oxford Pets dataset"
|
15 |
interpretation='default'
|
16 |
-
examples = [
|
17 |
article="<p style='text-align: center'><a href='https://dicksonneoh.com' target='_blank'>Blog post</a></p>"
|
18 |
enable_queue=True
|
19 |
|
|
|
13 |
title = "Pet Breed Classifier"
|
14 |
description = "A pet breed classifier trained on the Oxford Pets dataset"
|
15 |
interpretation='default'
|
16 |
+
examples = ["sample_images/"+file for file in files]
|
17 |
article="<p style='text-align: center'><a href='https://dicksonneoh.com' target='_blank'>Blog post</a></p>"
|
18 |
enable_queue=True
|
19 |
|
gradio_queue.db
CHANGED
Binary files a/gradio_queue.db and b/gradio_queue.db differ
|
|
kitten.jpg
DELETED
Binary file (422 kB)
|
|
sample_images/Abyssinian_27.jpg
ADDED
![]() |
sample_images/Abyssinian_29.jpg
ADDED
![]() |
sample_images/Abyssinian_67.jpg
ADDED
![]() |
sample_images/Abyssinian_7.jpg
ADDED
![]() |
sample_images/Bengal_108.jpg
ADDED
![]() |
sample_images/Bengal_109.jpg
ADDED
![]() |
sample_images/Bengal_19.jpg
ADDED
![]() |
sample_images/Bengal_21.jpg
ADDED
![]() |
sample_images/Birman_103.jpg
ADDED
![]() |
sample_images/Birman_113.jpg
ADDED
![]() |
sample_images/Birman_120.jpg
ADDED
![]() |
sample_images/Bombay_19.jpg
ADDED
![]() |
sample_images/Bombay_25.jpg
ADDED
![]() |
sample_images/British_Shorthair_57.jpg
ADDED
![]() |
sample_images/British_Shorthair_61.jpg
ADDED
![]() |
sample_images/Egyptian_Mau_57.jpg
ADDED
![]() |
sample_images/Egyptian_Mau_63.jpg
ADDED
![]() |
sample_images/american_bulldog_24.jpg
ADDED
![]() |
sample_images/american_bulldog_83.jpg
ADDED
![]() |
sample_images/american_pit_bull_terrier_52.jpg
ADDED
![]() |
sample_images/american_pit_bull_terrier_76.jpg
ADDED
![]() |
sample_images/basset_hound_12.jpg
ADDED
![]() |
sample_images/basset_hound_17.jpg
ADDED
![]() |
sample_images/beagle_120.jpg
ADDED
![]() |
sample_images/beagle_125.jpg
ADDED
![]() |
sample_images/chihuahua_93.jpg
ADDED
![]() |
sample_images/chihuahua_94.jpg
ADDED
![]() |
sample_images/staffordshire_bull_terrier_129.jpg
ADDED
![]() |
sample_images/staffordshire_bull_terrier_173.jpg
ADDED
![]() |
sample_images/wheaten_terrier_137.jpg
ADDED
![]() |
sample_images/wheaten_terrier_138.jpg
ADDED
![]() |
sample_images/yorkshire_terrier_189.jpg
ADDED
![]() |
sample_images/yorkshire_terrier_196.jpg
ADDED
![]() |
siamese.jpg
DELETED
Binary file (440 kB)
|
|
train.ipynb
CHANGED
@@ -2,114 +2,10 @@
|
|
2 |
"cells": [
|
3 |
{
|
4 |
"cell_type": "code",
|
5 |
-
"execution_count":
|
6 |
"id": "311970df-d109-452d-a843-c31048daf6e3",
|
7 |
"metadata": {},
|
8 |
-
"outputs": [
|
9 |
-
{
|
10 |
-
"name": "stderr",
|
11 |
-
"output_type": "stream",
|
12 |
-
"text": [
|
13 |
-
"/home/camaro/anaconda3/envs/gradio/lib/python3.8/site-packages/torch/_tensor.py:1051: UserWarning: torch.solve is deprecated in favor of torch.linalg.solveand will be removed in a future PyTorch release.\n",
|
14 |
-
"torch.linalg.solve has its arguments reversed and does not return the LU factorization.\n",
|
15 |
-
"To get the LU factorization see torch.lu, which can be used with torch.lu_solve or torch.lu_unpack.\n",
|
16 |
-
"X = torch.solve(B, A).solution\n",
|
17 |
-
"should be replaced with\n",
|
18 |
-
"X = torch.linalg.solve(A, B) (Triggered internally at ../aten/src/ATen/native/BatchLinearAlgebra.cpp:766.)\n",
|
19 |
-
" ret = func(*args, **kwargs)\n"
|
20 |
-
]
|
21 |
-
},
|
22 |
-
{
|
23 |
-
"data": {
|
24 |
-
"text/html": [
|
25 |
-
"<table border=\"1\" class=\"dataframe\">\n",
|
26 |
-
" <thead>\n",
|
27 |
-
" <tr style=\"text-align: left;\">\n",
|
28 |
-
" <th>epoch</th>\n",
|
29 |
-
" <th>train_loss</th>\n",
|
30 |
-
" <th>valid_loss</th>\n",
|
31 |
-
" <th>accuracy</th>\n",
|
32 |
-
" <th>time</th>\n",
|
33 |
-
" </tr>\n",
|
34 |
-
" </thead>\n",
|
35 |
-
" <tbody>\n",
|
36 |
-
" <tr>\n",
|
37 |
-
" <td>0</td>\n",
|
38 |
-
" <td>1.359868</td>\n",
|
39 |
-
" <td>0.263231</td>\n",
|
40 |
-
" <td>0.918809</td>\n",
|
41 |
-
" <td>00:27</td>\n",
|
42 |
-
" </tr>\n",
|
43 |
-
" </tbody>\n",
|
44 |
-
"</table>"
|
45 |
-
],
|
46 |
-
"text/plain": [
|
47 |
-
"<IPython.core.display.HTML object>"
|
48 |
-
]
|
49 |
-
},
|
50 |
-
"metadata": {},
|
51 |
-
"output_type": "display_data"
|
52 |
-
},
|
53 |
-
{
|
54 |
-
"data": {
|
55 |
-
"text/html": [
|
56 |
-
"<table border=\"1\" class=\"dataframe\">\n",
|
57 |
-
" <thead>\n",
|
58 |
-
" <tr style=\"text-align: left;\">\n",
|
59 |
-
" <th>epoch</th>\n",
|
60 |
-
" <th>train_loss</th>\n",
|
61 |
-
" <th>valid_loss</th>\n",
|
62 |
-
" <th>accuracy</th>\n",
|
63 |
-
" <th>time</th>\n",
|
64 |
-
" </tr>\n",
|
65 |
-
" </thead>\n",
|
66 |
-
" <tbody>\n",
|
67 |
-
" <tr>\n",
|
68 |
-
" <td>0</td>\n",
|
69 |
-
" <td>0.338471</td>\n",
|
70 |
-
" <td>0.239413</td>\n",
|
71 |
-
" <td>0.929635</td>\n",
|
72 |
-
" <td>00:30</td>\n",
|
73 |
-
" </tr>\n",
|
74 |
-
" <tr>\n",
|
75 |
-
" <td>1</td>\n",
|
76 |
-
" <td>0.291378</td>\n",
|
77 |
-
" <td>0.262474</td>\n",
|
78 |
-
" <td>0.927605</td>\n",
|
79 |
-
" <td>00:30</td>\n",
|
80 |
-
" </tr>\n",
|
81 |
-
" <tr>\n",
|
82 |
-
" <td>2</td>\n",
|
83 |
-
" <td>0.219857</td>\n",
|
84 |
-
" <td>0.189212</td>\n",
|
85 |
-
" <td>0.947226</td>\n",
|
86 |
-
" <td>00:30</td>\n",
|
87 |
-
" </tr>\n",
|
88 |
-
" <tr>\n",
|
89 |
-
" <td>3</td>\n",
|
90 |
-
" <td>0.148120</td>\n",
|
91 |
-
" <td>0.188948</td>\n",
|
92 |
-
" <td>0.952639</td>\n",
|
93 |
-
" <td>00:30</td>\n",
|
94 |
-
" </tr>\n",
|
95 |
-
" <tr>\n",
|
96 |
-
" <td>4</td>\n",
|
97 |
-
" <td>0.100334</td>\n",
|
98 |
-
" <td>0.183129</td>\n",
|
99 |
-
" <td>0.951962</td>\n",
|
100 |
-
" <td>00:29</td>\n",
|
101 |
-
" </tr>\n",
|
102 |
-
" </tbody>\n",
|
103 |
-
"</table>"
|
104 |
-
],
|
105 |
-
"text/plain": [
|
106 |
-
"<IPython.core.display.HTML object>"
|
107 |
-
]
|
108 |
-
},
|
109 |
-
"metadata": {},
|
110 |
-
"output_type": "display_data"
|
111 |
-
}
|
112 |
-
],
|
113 |
"source": [
|
114 |
"from fastai.vision.all import *\n",
|
115 |
"path = untar_data(URLs.PETS)\n",
|
@@ -122,17 +18,18 @@
|
|
122 |
},
|
123 |
{
|
124 |
"cell_type": "code",
|
125 |
-
"execution_count":
|
126 |
"id": "ef4ffc95-6051-4354-af16-25477b279657",
|
127 |
"metadata": {},
|
128 |
"outputs": [],
|
129 |
"source": [
|
|
|
130 |
"learn = load_learner('export.pkl')"
|
131 |
]
|
132 |
},
|
133 |
{
|
134 |
"cell_type": "code",
|
135 |
-
"execution_count":
|
136 |
"id": "49289d4b-7e8c-4264-bb03-8a0d851caf1c",
|
137 |
"metadata": {},
|
138 |
"outputs": [],
|
@@ -146,7 +43,48 @@
|
|
146 |
},
|
147 |
{
|
148 |
"cell_type": "code",
|
149 |
-
"execution_count":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
150 |
"id": "62fe5dc0-5fd1-4cc7-af8d-a325e3915173",
|
151 |
"metadata": {},
|
152 |
"outputs": [
|
@@ -154,27 +92,221 @@
|
|
154 |
"name": "stdout",
|
155 |
"output_type": "stream",
|
156 |
"text": [
|
157 |
-
"Running on local URL: http://127.0.0.1:
|
|
|
|
|
|
|
158 |
]
|
159 |
},
|
160 |
{
|
161 |
-
"
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
"
|
175 |
-
|
176 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
}
|
179 |
],
|
180 |
"source": [
|
@@ -183,7 +315,8 @@
|
|
183 |
"title = \"Pet Breed Classifier\"\n",
|
184 |
"description = \"A pet breed classifier trained on the Oxford Pets dataset\"\n",
|
185 |
"interpretation='default'\n",
|
186 |
-
"examples = ['siamese.jpg', 'kitten.jpg']\n",
|
|
|
187 |
"article=\"<p style='text-align: center'><a href='https://dicksonneoh.com' target='_blank'>Blog post</a></p>\"\n",
|
188 |
"enable_queue=True\n",
|
189 |
"\n",
|
|
|
2 |
"cells": [
|
3 |
{
|
4 |
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
"id": "311970df-d109-452d-a843-c31048daf6e3",
|
7 |
"metadata": {},
|
8 |
+
"outputs": [],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
"source": [
|
10 |
"from fastai.vision.all import *\n",
|
11 |
"path = untar_data(URLs.PETS)\n",
|
|
|
18 |
},
|
19 |
{
|
20 |
"cell_type": "code",
|
21 |
+
"execution_count": 4,
|
22 |
"id": "ef4ffc95-6051-4354-af16-25477b279657",
|
23 |
"metadata": {},
|
24 |
"outputs": [],
|
25 |
"source": [
|
26 |
+
"from fastai.vision.all import *\n",
|
27 |
"learn = load_learner('export.pkl')"
|
28 |
]
|
29 |
},
|
30 |
{
|
31 |
"cell_type": "code",
|
32 |
+
"execution_count": 5,
|
33 |
"id": "49289d4b-7e8c-4264-bb03-8a0d851caf1c",
|
34 |
"metadata": {},
|
35 |
"outputs": [],
|
|
|
43 |
},
|
44 |
{
|
45 |
"cell_type": "code",
|
46 |
+
"execution_count": 6,
|
47 |
+
"id": "a6b53fe8-ded5-4048-afd7-e488dc884aec",
|
48 |
+
"metadata": {},
|
49 |
+
"outputs": [
|
50 |
+
{
|
51 |
+
"name": "stdout",
|
52 |
+
"output_type": "stream",
|
53 |
+
"text": [
|
54 |
+
"american_pit_bull_terrier_52.jpg\n",
|
55 |
+
"staffordshire_bull_terrier_129.jpg\n",
|
56 |
+
"wheaten_terrier_138.jpg\n",
|
57 |
+
"staffordshire_bull_terrier_172.jpg\n",
|
58 |
+
"yorkshire_terrier_189.jpg\n",
|
59 |
+
"Abyssinian_67.jpg\n",
|
60 |
+
"basset_hound_12.jpg\n",
|
61 |
+
"american_bulldog_24.jpg\n",
|
62 |
+
"beagle_120.jpg\n",
|
63 |
+
"staffordshire_bull_terrier_173.jpg\n",
|
64 |
+
"beagle_125.jpg\n",
|
65 |
+
"Bengal_21.jpg\n",
|
66 |
+
"basset_hound_17.jpg\n",
|
67 |
+
"staffordshire_bull_terrier_130.jpg\n",
|
68 |
+
"Abyssinian_7.jpg\n",
|
69 |
+
"american_bulldog_83.jpg\n",
|
70 |
+
"Birman_103.jpg\n",
|
71 |
+
"Bengal_19.jpg\n",
|
72 |
+
"american_pit_bull_terrier_76.jpg\n",
|
73 |
+
"yorkshire_terrier_196.jpg\n",
|
74 |
+
"wheaten_terrier_137.jpg\n"
|
75 |
+
]
|
76 |
+
}
|
77 |
+
],
|
78 |
+
"source": [
|
79 |
+
"import os\n",
|
80 |
+
"for root, dirs, files in os.walk(r'sample_image/'):\n",
|
81 |
+
" for filename in files:\n",
|
82 |
+
" print(filename)"
|
83 |
+
]
|
84 |
+
},
|
85 |
+
{
|
86 |
+
"cell_type": "code",
|
87 |
+
"execution_count": 14,
|
88 |
"id": "62fe5dc0-5fd1-4cc7-af8d-a325e3915173",
|
89 |
"metadata": {},
|
90 |
"outputs": [
|
|
|
92 |
"name": "stdout",
|
93 |
"output_type": "stream",
|
94 |
"text": [
|
95 |
+
"Running on local URL: http://127.0.0.1:7863/\n",
|
96 |
+
"Running on public URL: https://41046.gradio.app\n",
|
97 |
+
"\n",
|
98 |
+
"This share link will expire in 72 hours. To get longer links, send an email to: [email protected]\n"
|
99 |
]
|
100 |
},
|
101 |
{
|
102 |
+
"data": {
|
103 |
+
"text/html": [
|
104 |
+
"\n",
|
105 |
+
" <iframe\n",
|
106 |
+
" width=\"900\"\n",
|
107 |
+
" height=\"500\"\n",
|
108 |
+
" src=\"https://41046.gradio.app\"\n",
|
109 |
+
" frameborder=\"0\"\n",
|
110 |
+
" allowfullscreen\n",
|
111 |
+
" \n",
|
112 |
+
" ></iframe>\n",
|
113 |
+
" "
|
114 |
+
],
|
115 |
+
"text/plain": [
|
116 |
+
"<IPython.lib.display.IFrame at 0x7f4bf02fd190>"
|
117 |
+
]
|
118 |
+
},
|
119 |
+
"metadata": {},
|
120 |
+
"output_type": "display_data"
|
121 |
+
},
|
122 |
+
{
|
123 |
+
"data": {
|
124 |
+
"text/plain": [
|
125 |
+
"(<Flask 'gradio.networking'>,\n",
|
126 |
+
" 'http://127.0.0.1:7863/',\n",
|
127 |
+
" 'https://41046.gradio.app')"
|
128 |
+
]
|
129 |
+
},
|
130 |
+
"execution_count": 14,
|
131 |
+
"metadata": {},
|
132 |
+
"output_type": "execute_result"
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"name": "stderr",
|
136 |
+
"output_type": "stream",
|
137 |
+
"text": [
|
138 |
+
"[2021-11-19 20:09:32,457] ERROR in app: Exception on /file/sample_images/staffordshire_bull_terrier_172.jpg [GET]\n",
|
139 |
+
"Traceback (most recent call last):\n",
|
140 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/app.py\", line 2073, in wsgi_app\n",
|
141 |
+
" response = self.full_dispatch_request()\n",
|
142 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/app.py\", line 1518, in full_dispatch_request\n",
|
143 |
+
" rv = self.handle_user_exception(e)\n",
|
144 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask_cors/extension.py\", line 165, in wrapped_function\n",
|
145 |
+
" return cors_after_request(app.make_response(f(*args, **kwargs)))\n",
|
146 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/app.py\", line 1516, in full_dispatch_request\n",
|
147 |
+
" rv = self.dispatch_request()\n",
|
148 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/app.py\", line 1502, in dispatch_request\n",
|
149 |
+
" return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n",
|
150 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/gradio/networking.py\", line 93, in wrapper\n",
|
151 |
+
" return func(*args, **kwargs)\n",
|
152 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/gradio/networking.py\", line 386, in file\n",
|
153 |
+
" return send_file(os.path.join(app.cwd, path))\n",
|
154 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/helpers.py\", line 612, in send_file\n",
|
155 |
+
" return werkzeug.utils.send_file(\n",
|
156 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/werkzeug/utils.py\", line 701, in send_file\n",
|
157 |
+
" stat = os.stat(path)\n",
|
158 |
+
"FileNotFoundError: [Errno 2] No such file or directory: '/home/dnth/Desktop/webdemos/webdemo-pets-classifier/sample_images/staffordshire_bull_terrier_172.jpg'\n"
|
159 |
]
|
160 |
+
},
|
161 |
+
{
|
162 |
+
"data": {
|
163 |
+
"text/html": [],
|
164 |
+
"text/plain": [
|
165 |
+
"<IPython.core.display.HTML object>"
|
166 |
+
]
|
167 |
+
},
|
168 |
+
"metadata": {},
|
169 |
+
"output_type": "display_data"
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"name": "stderr",
|
173 |
+
"output_type": "stream",
|
174 |
+
"text": [
|
175 |
+
"[2021-11-19 20:09:37,305] ERROR in app: Exception on /file/sample_images/staffordshire_bull_terrier_172.jpg [GET]\n",
|
176 |
+
"Traceback (most recent call last):\n",
|
177 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/app.py\", line 2073, in wsgi_app\n",
|
178 |
+
" response = self.full_dispatch_request()\n",
|
179 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/app.py\", line 1518, in full_dispatch_request\n",
|
180 |
+
" rv = self.handle_user_exception(e)\n",
|
181 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask_cors/extension.py\", line 165, in wrapped_function\n",
|
182 |
+
" return cors_after_request(app.make_response(f(*args, **kwargs)))\n",
|
183 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/app.py\", line 1516, in full_dispatch_request\n",
|
184 |
+
" rv = self.dispatch_request()\n",
|
185 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/app.py\", line 1502, in dispatch_request\n",
|
186 |
+
" return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n",
|
187 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/gradio/networking.py\", line 93, in wrapper\n",
|
188 |
+
" return func(*args, **kwargs)\n",
|
189 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/gradio/networking.py\", line 386, in file\n",
|
190 |
+
" return send_file(os.path.join(app.cwd, path))\n",
|
191 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/helpers.py\", line 612, in send_file\n",
|
192 |
+
" return werkzeug.utils.send_file(\n",
|
193 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/werkzeug/utils.py\", line 701, in send_file\n",
|
194 |
+
" stat = os.stat(path)\n",
|
195 |
+
"FileNotFoundError: [Errno 2] No such file or directory: '/home/dnth/Desktop/webdemos/webdemo-pets-classifier/sample_images/staffordshire_bull_terrier_172.jpg'\n"
|
196 |
+
]
|
197 |
+
},
|
198 |
+
{
|
199 |
+
"data": {
|
200 |
+
"text/html": [],
|
201 |
+
"text/plain": [
|
202 |
+
"<IPython.core.display.HTML object>"
|
203 |
+
]
|
204 |
+
},
|
205 |
+
"metadata": {},
|
206 |
+
"output_type": "display_data"
|
207 |
+
},
|
208 |
+
{
|
209 |
+
"name": "stderr",
|
210 |
+
"output_type": "stream",
|
211 |
+
"text": [
|
212 |
+
"[2021-11-19 20:09:39,059] ERROR in app: Exception on /api/interpret/ [POST]\n",
|
213 |
+
"Traceback (most recent call last):\n",
|
214 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/gradio/inputs.py\", line 795, in _segment_by_slic\n",
|
215 |
+
" from skimage.segmentation import slic\n",
|
216 |
+
"ModuleNotFoundError: No module named 'skimage'\n",
|
217 |
+
"\n",
|
218 |
+
"During handling of the above exception, another exception occurred:\n",
|
219 |
+
"\n",
|
220 |
+
"Traceback (most recent call last):\n",
|
221 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/app.py\", line 2073, in wsgi_app\n",
|
222 |
+
" response = self.full_dispatch_request()\n",
|
223 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/app.py\", line 1518, in full_dispatch_request\n",
|
224 |
+
" rv = self.handle_user_exception(e)\n",
|
225 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask_cors/extension.py\", line 165, in wrapped_function\n",
|
226 |
+
" return cors_after_request(app.make_response(f(*args, **kwargs)))\n",
|
227 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/app.py\", line 1516, in full_dispatch_request\n",
|
228 |
+
" rv = self.dispatch_request()\n",
|
229 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/app.py\", line 1502, in dispatch_request\n",
|
230 |
+
" return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n",
|
231 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/gradio/networking.py\", line 93, in wrapper\n",
|
232 |
+
" return func(*args, **kwargs)\n",
|
233 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/gradio/networking.py\", line 368, in interpret\n",
|
234 |
+
" interpretation_scores, alternative_outputs = app.interface.interpret(\n",
|
235 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/gradio/interface.py\", line 393, in interpret\n",
|
236 |
+
" tokens, neighbor_values, masks = input_component.tokenize(\n",
|
237 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/gradio/inputs.py\", line 819, in tokenize\n",
|
238 |
+
" segments_slic, resized_and_cropped_image = self._segment_by_slic(x)\n",
|
239 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/gradio/inputs.py\", line 797, in _segment_by_slic\n",
|
240 |
+
" raise ValueError(\n",
|
241 |
+
"ValueError: Error: running this interpretation for images requires scikit-image, please install it first.\n"
|
242 |
+
]
|
243 |
+
},
|
244 |
+
{
|
245 |
+
"data": {
|
246 |
+
"text/html": [],
|
247 |
+
"text/plain": [
|
248 |
+
"<IPython.core.display.HTML object>"
|
249 |
+
]
|
250 |
+
},
|
251 |
+
"metadata": {},
|
252 |
+
"output_type": "display_data"
|
253 |
+
},
|
254 |
+
{
|
255 |
+
"data": {
|
256 |
+
"text/html": [],
|
257 |
+
"text/plain": [
|
258 |
+
"<IPython.core.display.HTML object>"
|
259 |
+
]
|
260 |
+
},
|
261 |
+
"metadata": {},
|
262 |
+
"output_type": "display_data"
|
263 |
+
},
|
264 |
+
{
|
265 |
+
"data": {
|
266 |
+
"text/html": [],
|
267 |
+
"text/plain": [
|
268 |
+
"<IPython.core.display.HTML object>"
|
269 |
+
]
|
270 |
+
},
|
271 |
+
"metadata": {},
|
272 |
+
"output_type": "display_data"
|
273 |
+
},
|
274 |
+
{
|
275 |
+
"name": "stderr",
|
276 |
+
"output_type": "stream",
|
277 |
+
"text": [
|
278 |
+
"[2021-11-19 20:09:57,928] ERROR in app: Exception on /file/sample_images/staffordshire_bull_terrier_130.jpg [GET]\n",
|
279 |
+
"Traceback (most recent call last):\n",
|
280 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/app.py\", line 2073, in wsgi_app\n",
|
281 |
+
" response = self.full_dispatch_request()\n",
|
282 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/app.py\", line 1518, in full_dispatch_request\n",
|
283 |
+
" rv = self.handle_user_exception(e)\n",
|
284 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask_cors/extension.py\", line 165, in wrapped_function\n",
|
285 |
+
" return cors_after_request(app.make_response(f(*args, **kwargs)))\n",
|
286 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/app.py\", line 1516, in full_dispatch_request\n",
|
287 |
+
" rv = self.dispatch_request()\n",
|
288 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/app.py\", line 1502, in dispatch_request\n",
|
289 |
+
" return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n",
|
290 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/gradio/networking.py\", line 93, in wrapper\n",
|
291 |
+
" return func(*args, **kwargs)\n",
|
292 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/gradio/networking.py\", line 386, in file\n",
|
293 |
+
" return send_file(os.path.join(app.cwd, path))\n",
|
294 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/flask/helpers.py\", line 612, in send_file\n",
|
295 |
+
" return werkzeug.utils.send_file(\n",
|
296 |
+
" File \"/home/dnth/anaconda3/envs/gradio/lib/python3.8/site-packages/werkzeug/utils.py\", line 701, in send_file\n",
|
297 |
+
" stat = os.stat(path)\n",
|
298 |
+
"FileNotFoundError: [Errno 2] No such file or directory: '/home/dnth/Desktop/webdemos/webdemo-pets-classifier/sample_images/staffordshire_bull_terrier_130.jpg'\n"
|
299 |
+
]
|
300 |
+
},
|
301 |
+
{
|
302 |
+
"data": {
|
303 |
+
"text/html": [],
|
304 |
+
"text/plain": [
|
305 |
+
"<IPython.core.display.HTML object>"
|
306 |
+
]
|
307 |
+
},
|
308 |
+
"metadata": {},
|
309 |
+
"output_type": "display_data"
|
310 |
}
|
311 |
],
|
312 |
"source": [
|
|
|
315 |
"title = \"Pet Breed Classifier\"\n",
|
316 |
"description = \"A pet breed classifier trained on the Oxford Pets dataset\"\n",
|
317 |
"interpretation='default'\n",
|
318 |
+
"# examples = ['siamese.jpg', 'kitten.jpg']\n",
|
319 |
+
"examples = [\"sample_images/\"+file for file in files] \n",
|
320 |
"article=\"<p style='text-align: center'><a href='https://dicksonneoh.com' target='_blank'>Blog post</a></p>\"\n",
|
321 |
"enable_queue=True\n",
|
322 |
"\n",
|