SeaOtocinclus commited on
Commit
5e57aec
·
verified ·
1 Parent(s): 0974126

Update app.py

Browse files

Fixing various typos

Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -56,7 +56,7 @@ def process_image(image: PIL.Image.Image) -> tuple[PIL.Image.Image, list[dict]]:
56
  inputs = pose_image_processor(image, boxes=[person_boxes], return_tensors="pt").to(device)
57
 
58
  # for vitpose-plus-base checkpoint we should additionaly provide dataset_index
59
- # to sepcify which MOE experts to use for inference
60
  if pose_model.config.backbone_config.num_experts > 1:
61
  dataset_index = torch.tensor([0] * len(inputs["pixel_values"]))
62
  dataset_index = dataset_index.to(inputs["pixel_values"].device)
@@ -99,10 +99,10 @@ def process_image(image: PIL.Image.Image) -> tuple[PIL.Image.Image, list[dict]]:
99
 
100
  annotated_frame = image.copy()
101
 
102
- # annotate boundg boxes
103
  annotated_frame = bounding_box_annotator.annotate(scene=image.copy(), detections=detections)
104
 
105
- # annotate edges and verticies
106
  annotated_frame = edge_annotator.annotate(scene=annotated_frame, key_points=keypoints)
107
  return vertex_annotator.annotate(scene=annotated_frame, key_points=keypoints), human_readable_results
108
 
 
56
  inputs = pose_image_processor(image, boxes=[person_boxes], return_tensors="pt").to(device)
57
 
58
  # for vitpose-plus-base checkpoint we should additionaly provide dataset_index
59
+ # to specify which MOE experts to use for inference
60
  if pose_model.config.backbone_config.num_experts > 1:
61
  dataset_index = torch.tensor([0] * len(inputs["pixel_values"]))
62
  dataset_index = dataset_index.to(inputs["pixel_values"].device)
 
99
 
100
  annotated_frame = image.copy()
101
 
102
+ # annotate bounding boxes
103
  annotated_frame = bounding_box_annotator.annotate(scene=image.copy(), detections=detections)
104
 
105
+ # annotate edges and vertices
106
  annotated_frame = edge_annotator.annotate(scene=annotated_frame, key_points=keypoints)
107
  return vertex_annotator.annotate(scene=annotated_frame, key_points=keypoints), human_readable_results
108