Spaces:
Runtime error
Runtime error
File size: 1,408 Bytes
15935e0 738c40d 15935e0 738c40d 15935e0 738c40d 15935e0 738c40d 15935e0 294f6f6 15935e0 6e22a2d 15935e0 6e22a2d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
# creating app.py
########## imports ############
import torch
import torch.nn as nn
from torchvision import models, transforms
import gradio as gr
from model import create_model
import PIL
from PIL import Image
import os
from pathlib import Path
###############################
def predict(img):
swinb, transform = create_model()
class_names = []
with open('classes.txt', 'r') as f:
class_names = [foodname.strip() for foodname in f.readlines()]
swinb.load_state_dict(torch.load(f = 'models/swin40%newer.pth', map_location = torch.device('cpu')))
img = transform(img).unsqueeze(0)
swinb.eval()
with torch.inference_mode():
pred_label = class_names[swinb(img).softmax(dim = 1).argmax(dim = 1)]
print(pred_label)
return pred_label
###############################
title = 'FoodVision Project'
description = 'FoodVision is an image classification model based on EfficientNet_B2 which has been trained on a 101 different classes using the Food101 dataset'
###############################
example_list = [['examples/' + example] for example in os.listdir('examples')] ##############
for examples in example_list:
print(examples)
demo = gr.Interface(
fn = predict,
inputs = gr.Image(type = 'pil'),
outputs = [gr.Textbox()],
examples = example_list,
title = title,
description = description
)
demo.launch(debug = True)
|