File size: 2,227 Bytes
2038e95
 
 
767e7fb
732ad15
2038e95
4c1b30b
 
732ad15
 
 
 
 
 
 
 
 
 
 
7b93d9a
14ebc4e
 
 
8dd5bd6
f9a1807
14ebc4e
732ad15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
from huggingface_hub import from_pretrained_fastai
import gradio as gr
from fastai.vision.all import *
from icevision.all import *
import PIL

import torch
from torchvision import transforms

class_map = ClassMap(['apple','banana','orange'])

presize = 512
size = 384

train_tfms = tfms.A.Adapter(
    [*tfms.A.aug_tfms(size=size, presize=presize), tfms.A.Normalize()]
)
valid_tfms = tfms.A.Adapter([*tfms.A.resize_and_pad(size), tfms.A.Normalize()])


# backbone = faster_rcnn.backbones.resnet_fpn.resnet18(pretrained=True)
# model1 = faster_rcnn.model(backbone=backbone, num_classes=len(class_map))
# model1.load_state_dict(torch.load('fasterRCNNFruits.pth', map_location=torch.device('cpu')))
model1 = models.torchvision.faster_rcnn.model(backbone=models.torchvision.faster_rcnn.backbones.resnet18_fpn(pretrained=True), num_classes=len(class_map)+1)
state_dict = torch.load('fasterRCNNFruits.pth', map_location=torch.device('cpu'))
model1.load_state_dict(state_dict)

def show_preds(input_image, display_label, display_bbox, detection_threshold):

    if detection_threshold==0: detection_threshold=0.5

    img = PIL.Image.fromarray(input_image, 'RGB')

    pred_dict  = models.torchvision.faster_rcnn.end2end_detect(img, valid_tfms, model1, class_map=ClassMap(class_map), detection_threshold=detection_threshold,
                                           display_label=display_label, display_bbox=display_bbox, return_img=True, 
                                           font_size=16, label_color="#FF59D6")

    return pred_dict['img']

# display_chkbox = gr.inputs.CheckboxGroup(["Label", "BBox"], label="Display", default=True)
display_chkbox_label = gr.inputs.Checkbox(label="Label", default=True)
display_chkbox_box = gr.inputs.Checkbox(label="Box", default=True)

detection_threshold_slider = gr.inputs.Slider(minimum=0, maximum=1, step=0.1, default=0.5, label="Detection Threshold")

outputs = gr.outputs.Image(type="pil")

gr_interface = gr.Interface(fn=show_preds, inputs=["image", display_chkbox_label, display_chkbox_box,  detection_threshold_slider], outputs=outputs, examples=[['mixed_24.jpg', True, True, 0.5], ['mixed_24.jpg', True, True, 0.5]])

gr_interface.launch(inline=False, share=False, debug=True)