File size: 3,374 Bytes
c86592c
 
 
 
 
 
 
 
 
 
 
 
 
 
7f09978
 
 
6a31e95
c86592c
7f09978
c86592c
 
 
 
 
 
7f09978
c86592c
 
d8a104d
c86592c
 
 
 
 
 
 
 
6a31e95
 
c86592c
 
 
 
d8a104d
c86592c
 
 
 
e912fcf
 
f4d989e
 
 
 
 
 
e912fcf
 
c86592c
 
 
 
05e6243
c86592c
 
401e4f1
e0b3223
c86592c
d8a104d
c86592c
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import torch
from PIL import Image
from torchvision import datasets, models, transforms
import gradio as gr
import os
import torch.nn as nn


os.system("wget https://github.com/liuxiaoyuyuyu/vanGogh-and-Other-Artist/blob/main/artist_classes.txt")
#os.system("wget https://github.com/liuxiaoyuyuyu/vanGogh-and-Other-Artist/blob/main/model_weights_mobilenet_v2_valp1trainp2.pth")

#model = torch.hub.load('pytorch/vision:v0.9.0', 'mobilenet_v2', pretrained=False)
#checkpoint = 'https://github.com/liuxiaoyuyuyu/vanGogh-and-Other-Artist/blob/main/model_weights_mobilenet_v2_valp1trainp2.pth'
#model.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=False))
model = models.vgg16()
num_ftrs = model.classifier[6].in_features
model.classifier[6] = nn.Linear(num_ftrs, 6)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
#model = model.to(device)
model.load_state_dict(torch.load('VGG16_weights_May28.pth',map_location=device))

#torch.hub.download_url_to_file("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")


def inference(input_image):
    preprocess = transforms.Compose([
        transforms.Resize(260),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        #transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    input_tensor = preprocess(input_image)
    input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model

    # move the input and model to GPU for speed if available
    if torch.cuda.is_available():
        input_batch = input_batch.to('cuda')
        model.to('cuda')
    else:
        model.to('cpu')

    with torch.no_grad():
        output = model(input_batch)
    # The output has unnormalized scores. To get probabilities, you can run a softmax on it.
    probabilities = torch.nn.functional.softmax(output[0], dim=0)

    # Read the categories
    with open("artist_classes.txt", "r") as f:
        categories = [s.strip() for s in f.readlines()]
        
    categories = {
        0:"vanGogh",
        1:"Monet",
        2:"Leonardo da Vinci",
        3:"Rembrandt",
        4:"Pablo Picasso",
        5:"Salvador Dali"
    }
    
    # Show top categories per image
    top5_prob, top5_catid = torch.topk(probabilities, 6)
    result = {}
    for i in range(top5_prob.size(0)):
        result[categories[top5_catid[i].item()]] = top5_prob[i].item()
    return result

inputs = gr.Image(type='pil')
outputs = gr.Label(type="confidences",num_top_classes=5)

title = "Artist Classifier"
description = "Gradio demo for MOBILENET V2, Efficient networks optimized for speed and memory, with residual blocks. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1801.04381'>MobileNetV2: Inverted Residuals and Linear Bottlenecks</a> | <a href='https://github.com/pytorch/vision/blob/master/torchvision/models/mobilenet.py'>Github Repo</a></p>"

#examples = [
#            ['dog.jpg']
#]
#gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples, analytics_enabled=False).launch()
gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, analytics_enabled=False).launch()