Spaces:
Runtime error
Runtime error
Commit
·
5d8a73f
1
Parent(s):
172f5d3
initial
Browse files- app.py +67 -0
- requirements.txt +2 -0
app.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from matplotlib.pyplot import axis
|
2 |
+
import gradio as gr
|
3 |
+
import requests
|
4 |
+
import numpy as np
|
5 |
+
from torch import nn
|
6 |
+
from transformers import SegformerFeatureExtractor, SegformerForSemanticSegmentation
|
7 |
+
import requests
|
8 |
+
|
9 |
+
url1 = 'https://cdn.pixabay.com/photo/2014/09/07/21/52/city-438393_1280.jpg'
|
10 |
+
r = requests.get(url1, allow_redirects=True)
|
11 |
+
open("city1.jpg", 'wb').write(r.content)
|
12 |
+
url2 = 'https://cdn.pixabay.com/photo/2016/02/19/11/36/canal-1209808_1280.jpg'
|
13 |
+
r = requests.get(url2, allow_redirects=True)
|
14 |
+
open("city2.jpg", 'wb').write(r.content)
|
15 |
+
|
16 |
+
def cityscapes_palette():
|
17 |
+
return [[128, 64, 128],[244, 35, 232],[70, 70, 70],[102, 102, 156],[190, 153, 153],
|
18 |
+
[153, 153, 153],[250, 170, 30],[220, 220, 0],[107, 142, 35],[152, 251, 152],
|
19 |
+
[70, 130, 180], [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70],
|
20 |
+
[0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]]
|
21 |
+
|
22 |
+
model_name = "nvidia/segformer-b5-finetuned-cityscapes-1024-1024"
|
23 |
+
|
24 |
+
feature_extractor = SegformerFeatureExtractor.from_pretrained(model_name)
|
25 |
+
model = SegformerForSemanticSegmentation.from_pretrained(model_name)
|
26 |
+
|
27 |
+
def inference(image):
|
28 |
+
inputs = feature_extractor(images=image.resize((1024,1024)), return_tensors="pt")
|
29 |
+
outputs = model(**inputs)
|
30 |
+
|
31 |
+
# First, rescale logits to original image size
|
32 |
+
logits = nn.functional.interpolate(outputs.logits.detach().cpu(),
|
33 |
+
size=image.size[::-1], # (height, width)
|
34 |
+
mode='bilinear',
|
35 |
+
align_corners=False)
|
36 |
+
|
37 |
+
# Second, apply argmax on the class dimension
|
38 |
+
seg = logits.argmax(dim=1)[0]
|
39 |
+
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) # height, width, 3
|
40 |
+
palette = np.array(cityscapes_palette())
|
41 |
+
for label, color in enumerate(palette):
|
42 |
+
color_seg[seg == label, :] = color
|
43 |
+
|
44 |
+
# Show image + mask
|
45 |
+
img = np.array(image) * 0.5 + color_seg * 0.5
|
46 |
+
img = img.astype(np.uint8)
|
47 |
+
|
48 |
+
merged = np.concatenate((np.concatenate((np.array(image), color_seg), axis=1), np.concatenate((np.zeros_like(image), img), axis=1)), axis=0)
|
49 |
+
return merged
|
50 |
+
|
51 |
+
|
52 |
+
|
53 |
+
title = "Transformers - SegFormer B5 @ 1024px"
|
54 |
+
description = "demo for SegFormer. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below.\nModel: nvidia/segformer-b5-finetuned-cityscapes-1024-1024"
|
55 |
+
article = "<p style='text-align: center'><a href='https://huggingface.co/transformers/model_doc/segformer.html#segformerforsemanticsegmentation'>Segformer page</a></p>"
|
56 |
+
|
57 |
+
gr.Interface(
|
58 |
+
inference,
|
59 |
+
[gr.inputs.Image(type="pil", label="Input")],
|
60 |
+
gr.outputs.Image(type="numpy", label="Output"),
|
61 |
+
title=title,
|
62 |
+
description=description,
|
63 |
+
article=article,
|
64 |
+
examples=[
|
65 |
+
["city1.jpg"],
|
66 |
+
["city2.jpg"]
|
67 |
+
]).launch()
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
opencv-python-headless
|
2 |
+
transformers
|