Spaces:
Runtime error
Runtime error
File size: 3,267 Bytes
d8c36a7 3812f5d d8c36a7 2e67624 d8c36a7 e497225 d8c36a7 3812f5d c1dd621 d6ae94d 3812f5d e497225 1412a7a e497225 d8c36a7 18efee8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
import torch
from PIL import Image
from torchvision import datasets, models, transforms
import gradio as gr
import os
import torch.nn as nn
os.system("wget https://github.com/liuxiaoyuyuyu/vanGogh-and-Other-Artist/blob/main/artist_classes.txt")
#os.system("wget https://github.com/liuxiaoyuyuyu/vanGogh-and-Other-Artist/blob/main/model_weights_mobilenet_v2_valp1trainp2.pth")
#model = torch.hub.load('pytorch/vision:v0.9.0', 'mobilenet_v2', pretrained=False)
#checkpoint = 'https://github.com/liuxiaoyuyuyu/vanGogh-and-Other-Artist/blob/main/model_weights_mobilenet_v2_valp1trainp2.pth'
#model.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=False))
model = models.mobilenet_v2()
num_ftrs = model.classifier[1].in_features
model.classifier[1] = nn.Linear(num_ftrs, 6)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
#model = model.to(device)
model.load_state_dict(torch.load('model_weights_mobilenet_v2_valp1trainp2.pth',map_location=device))
#model.load_state_dict(torch.load('model_weights_mobilenet_v2_valp1trainp2.pth'))
#torch.hub.download_url_to_file("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
def inference(input_image):
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
# move the input and model to GPU for speed if available
if torch.cuda.is_available():
input_batch = input_batch.to('cuda')
model.to('cuda')
with torch.no_grad():
output = model(input_batch)
# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
probabilities = torch.nn.functional.softmax(output[0], dim=0)
# Read the categories
with open("artist_classes.txt", "r") as f:
categories = [s.strip() for s in f.readlines()]
# Show top categories per image
top5_prob, top5_catid = torch.topk(probabilities, 6)
result = {}
for i in range(top5_prob.size(0)):
result[categories[top5_catid[i]]] = top5_prob[i].item()
return result
inputs = gr.inputs.Image(type='pil')
outputs = gr.outputs.Label(type="confidences",num_top_classes=5)
title = "MOBILENET V2"
description = "Gradio demo for MOBILENET V2, Efficient networks optimized for speed and memory, with residual blocks. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1801.04381'>MobileNetV2: Inverted Residuals and Linear Bottlenecks</a> | <a href='https://github.com/pytorch/vision/blob/master/torchvision/models/mobilenet.py'>Github Repo</a></p>"
#examples = [
# ['dog.jpg']
#]
#gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples, analytics_enabled=False).launch()
gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, analytics_enabled=False).launch()
|