Spaces:
Runtime error
Runtime error
Chris
commited on
Commit
·
4049301
1
Parent(s):
a281d68
First local working version!
Browse files- .gitignore +4 -0
- =1.12 +14 -0
- app.py +19 -30
- input_img.jpg +0 -0
- pyvenv.cfg +3 -0
.gitignore
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
bin
|
| 2 |
+
lib
|
| 3 |
+
output
|
| 4 |
+
share
|
=1.12
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Requirement already satisfied: xtcocotools in ./lib/python3.10/site-packages (1.14.3)
|
| 2 |
+
Requirement already satisfied: cython>=0.27.3 in ./lib/python3.10/site-packages (from xtcocotools) (3.0.7)
|
| 3 |
+
Requirement already satisfied: numpy>=1.20.0 in ./lib/python3.10/site-packages (from xtcocotools) (1.23.0)
|
| 4 |
+
Requirement already satisfied: matplotlib>=2.1.0 in ./lib/python3.10/site-packages (from xtcocotools) (3.7.4)
|
| 5 |
+
Requirement already satisfied: setuptools>=18.0 in ./lib/python3.10/site-packages (from xtcocotools) (65.5.0)
|
| 6 |
+
Requirement already satisfied: kiwisolver>=1.0.1 in ./lib/python3.10/site-packages (from matplotlib>=2.1.0->xtcocotools) (1.4.5)
|
| 7 |
+
Requirement already satisfied: cycler>=0.10 in ./lib/python3.10/site-packages (from matplotlib>=2.1.0->xtcocotools) (0.12.1)
|
| 8 |
+
Requirement already satisfied: contourpy>=1.0.1 in ./lib/python3.10/site-packages (from matplotlib>=2.1.0->xtcocotools) (1.2.0)
|
| 9 |
+
Requirement already satisfied: pillow>=6.2.0 in ./lib/python3.10/site-packages (from matplotlib>=2.1.0->xtcocotools) (9.4.0)
|
| 10 |
+
Requirement already satisfied: packaging>=20.0 in ./lib/python3.10/site-packages (from matplotlib>=2.1.0->xtcocotools) (23.2)
|
| 11 |
+
Requirement already satisfied: fonttools>=4.22.0 in ./lib/python3.10/site-packages (from matplotlib>=2.1.0->xtcocotools) (4.47.0)
|
| 12 |
+
Requirement already satisfied: python-dateutil>=2.7 in ./lib/python3.10/site-packages (from matplotlib>=2.1.0->xtcocotools) (2.8.2)
|
| 13 |
+
Requirement already satisfied: pyparsing>=2.3.1 in ./lib/python3.10/site-packages (from matplotlib>=2.1.0->xtcocotools) (2.4.5)
|
| 14 |
+
Requirement already satisfied: six>=1.5 in ./lib/python3.10/site-packages (from python-dateutil>=2.7->matplotlib>=2.1.0->xtcocotools) (1.16.0)
|
app.py
CHANGED
|
@@ -23,34 +23,34 @@ warnings.filterwarnings("ignore")
|
|
| 23 |
mmpose_model_list = ["human", "hand", "face", "animal", "wholebody",
|
| 24 |
"vitpose", "vitpose-s", "vitpose-b", "vitpose-l", "vitpose-h"]
|
| 25 |
|
| 26 |
-
|
| 27 |
def save_image(img, img_path):
|
| 28 |
# Convert PIL image to OpenCV image
|
| 29 |
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
|
| 30 |
# Save OpenCV image
|
| 31 |
cv2.imwrite(img_path, img)
|
| 32 |
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
'zidane.jpg')
|
| 45 |
|
| 46 |
|
| 47 |
-
def predict_pose(img, model_name
|
| 48 |
img_path = "input_img.jpg"
|
|
|
|
| 49 |
save_image(img, img_path)
|
| 50 |
device = torch.cuda.current_device() if torch.cuda.is_available() else 'cpu'
|
| 51 |
inferencer = MMPoseInferencer(model_name, device=device)
|
| 52 |
result_generator = inferencer(img_path, show=False, out_dir=out_dir)
|
| 53 |
result = next(result_generator)
|
|
|
|
| 54 |
save_dir = './output/visualizations/'
|
| 55 |
if os.path.exists(save_dir):
|
| 56 |
out_img_path = save_dir + img_path
|
|
@@ -58,25 +58,14 @@ def predict_pose(img, model_name, out_dir):
|
|
| 58 |
else:
|
| 59 |
out_img_path = img_path
|
| 60 |
out_img = PIL.Image.open(out_img_path)
|
| 61 |
-
return out_img
|
| 62 |
|
| 63 |
-
download_test_image()
|
| 64 |
input_image = gr.inputs.Image(type='pil', label="Original Image")
|
| 65 |
model_name = gr.inputs.Dropdown(choices=[m for m in mmpose_model_list], label='Model')
|
| 66 |
-
out_dir = gr.inputs.Textbox(label="Output Directory", default="./output")
|
| 67 |
output_image = gr.outputs.Image(type="pil", label="Output Image")
|
|
|
|
| 68 |
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
['dogs.jpg', 'animal'],
|
| 72 |
-
]
|
| 73 |
-
title = "MMPose detection web demo"
|
| 74 |
-
description = "<div align='center'><img src='https://raw.githubusercontent.com/open-mmlab/mmpose/main/resources/mmpose-logo.png' width='450''/><div>" \
|
| 75 |
-
"<p style='text-align: center'><a href='https://github.com/open-mmlab/mmpose'>MMPose</a> MMPose 是一款基于 PyTorch 的姿态分析的开源工具箱,是 OpenMMLab 项目的成员之一。" \
|
| 76 |
-
"OpenMMLab Pose Estimation Toolbox and Benchmark..</p>"
|
| 77 |
-
article = "<p style='text-align: center'><a href='https://github.com/open-mmlab/mmpose'>MMPose</a></p>" \
|
| 78 |
-
"<p style='text-align: center'><a href='https://github.com/isLinXu'>gradio build by gatilin</a></a></p>"
|
| 79 |
-
|
| 80 |
-
iface = gr.Interface(fn=predict_pose, inputs=[input_image, model_name, out_dir], outputs=output_image,
|
| 81 |
-
examples=examples, title=title, description=description, article=article)
|
| 82 |
iface.launch()
|
|
|
|
| 23 |
mmpose_model_list = ["human", "hand", "face", "animal", "wholebody",
|
| 24 |
"vitpose", "vitpose-s", "vitpose-b", "vitpose-l", "vitpose-h"]
|
| 25 |
|
|
|
|
| 26 |
def save_image(img, img_path):
|
| 27 |
# Convert PIL image to OpenCV image
|
| 28 |
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
|
| 29 |
# Save OpenCV image
|
| 30 |
cv2.imwrite(img_path, img)
|
| 31 |
|
| 32 |
+
# def download_test_image():
|
| 33 |
+
# # Images
|
| 34 |
+
# torch.hub.download_url_to_file(
|
| 35 |
+
# 'https://user-images.githubusercontent.com/59380685/266264420-21575a83-4057-41cf-8a4a-b3ea6f332d79.jpg',
|
| 36 |
+
# 'bus.jpg')
|
| 37 |
+
# torch.hub.download_url_to_file(
|
| 38 |
+
# 'https://user-images.githubusercontent.com/59380685/266264536-82afdf58-6b9a-4568-b9df-551ee72cb6d9.jpg',
|
| 39 |
+
# 'dogs.jpg')
|
| 40 |
+
# torch.hub.download_url_to_file(
|
| 41 |
+
# 'https://user-images.githubusercontent.com/59380685/266264600-9d0c26ca-8ba6-45f2-b53b-4dc98460c43e.jpg',
|
| 42 |
+
# 'zidane.jpg')
|
|
|
|
| 43 |
|
| 44 |
|
| 45 |
+
def predict_pose(img, model_name):
|
| 46 |
img_path = "input_img.jpg"
|
| 47 |
+
out_dir = "./output";
|
| 48 |
save_image(img, img_path)
|
| 49 |
device = torch.cuda.current_device() if torch.cuda.is_available() else 'cpu'
|
| 50 |
inferencer = MMPoseInferencer(model_name, device=device)
|
| 51 |
result_generator = inferencer(img_path, show=False, out_dir=out_dir)
|
| 52 |
result = next(result_generator)
|
| 53 |
+
print(result)
|
| 54 |
save_dir = './output/visualizations/'
|
| 55 |
if os.path.exists(save_dir):
|
| 56 |
out_img_path = save_dir + img_path
|
|
|
|
| 58 |
else:
|
| 59 |
out_img_path = img_path
|
| 60 |
out_img = PIL.Image.open(out_img_path)
|
| 61 |
+
return (out_img, result)
|
| 62 |
|
| 63 |
+
# download_test_image()
|
| 64 |
input_image = gr.inputs.Image(type='pil', label="Original Image")
|
| 65 |
model_name = gr.inputs.Dropdown(choices=[m for m in mmpose_model_list], label='Model')
|
|
|
|
| 66 |
output_image = gr.outputs.Image(type="pil", label="Output Image")
|
| 67 |
+
output_text = gr.outputs.Textbox(label="Output Text")
|
| 68 |
|
| 69 |
+
title = "MMPose detection for ShopByShape"
|
| 70 |
+
iface = gr.Interface(fn=predict_pose, inputs=[input_image, model_name], outputs=[output_image, output_text], title=title)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
iface.launch()
|
input_img.jpg
ADDED
|
pyvenv.cfg
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
home = /usr/local/bin
|
| 2 |
+
include-system-site-packages = false
|
| 3 |
+
version = 3.10.11
|