Narendra9009 commited on
Commit
6ee506d
·
verified ·
1 Parent(s): 53da0b8

updated the segmentation model with the roboflow model for time being

Browse files
Files changed (1) hide show
  1. app.py +57 -8
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  from ultralytics import YOLO
2
  import cv2
3
  from stockfish import Stockfish
@@ -5,6 +6,10 @@ import os
5
  import numpy as np
6
  import streamlit as st
7
 
 
 
 
 
8
 
9
  # Constants
10
  FEN_MAPPING = {
@@ -91,14 +96,58 @@ def main():
91
 
92
  # Load the YOLO models
93
  model = YOLO("fine_tuned_on_all_data.pt") # Replace with your trained model weights file
94
- seg_model = YOLO("segmentation.pt")
95
-
96
- # Load and process the image
97
- img = cv2.imread(temp_file_path)
98
- r = seg_model.predict(source=temp_file_path)
99
- xyxy = r[0].boxes.xyxy
100
- x_min, y_min, x_max, y_max = map(int, xyxy[0])
101
- new_img = img[y_min:y_max, x_min:x_max]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
  # Resize the image to 224x224
104
  image = cv2.resize(new_img, (224, 224))
 
1
+ from inference_sdk import InferenceHTTPClient
2
  from ultralytics import YOLO
3
  import cv2
4
  from stockfish import Stockfish
 
6
  import numpy as np
7
  import streamlit as st
8
 
9
+ CLIENT = InferenceHTTPClient(
10
+ api_url="https://outline.roboflow.com",
11
+ api_key="9Ez1hwfkqVa2h6pRQQHH"
12
+ )
13
 
14
  # Constants
15
  FEN_MAPPING = {
 
96
 
97
  # Load the YOLO models
98
  model = YOLO("fine_tuned_on_all_data.pt") # Replace with your trained model weights file
99
+
100
+
101
+ # seg_model = YOLO("segmentation.pt")
102
+
103
+ # # Load and process the image
104
+ # img = cv2.imread(temp_file_path)
105
+ # r = seg_model.predict(source=temp_file_path)
106
+ # xyxy = r[0].boxes.xyxy
107
+ # x_min, y_min, x_max, y_max = map(int, xyxy[0])
108
+ # new_img = img[y_min:y_max, x_min:x_max]
109
+
110
+
111
+
112
+
113
+ result = CLIENT.infer(temp_file_path, model_id="chessboard-segmentation/1")
114
+
115
+ image = cv2.imread(temp_file_path)
116
+
117
+ if image is None:
118
+ st.write("Error: Image not loaded.")
119
+
120
+
121
+ prediction_data = result
122
+
123
+
124
+ if not prediction_data.get('predictions'):
125
+ st.write("No board found.")
126
+ return
127
+ else:
128
+ for prediction in prediction_data.get('predictions', []):
129
+ if 'x' in prediction and 'y' in prediction and 'width' in prediction and 'height' in prediction:
130
+ x, y, w, h = prediction['x'], prediction['y'], prediction['width'], prediction['height']
131
+ # print(f"Bounding box coordinates: ({x}, {y}), width={w}, height={h}")
132
+
133
+ x1, y1 = int(x - w / 2), int(y - h / 2)
134
+ x2, y2 = int(x + w / 2), int(y + h / 2)
135
+
136
+ src_pts = np.array([[x1, y1], [x2, y1], [x2, y2], [x1, y2]], dtype="float32")
137
+ # print(f"Source Points: {src_pts}")
138
+
139
+ chessboard_size = 600
140
+ dst_pts = np.array([[0, 0], [chessboard_size - 1, 0], [chessboard_size - 1, chessboard_size - 1], [0, chessboard_size - 1]], dtype="float32")
141
+ # print(f"Destination Points: {dst_pts}")
142
+
143
+ matrix = cv2.getPerspectiveTransform(src_pts, dst_pts)
144
+
145
+ # Apply the perspective warp
146
+ transformed_chessboard = cv2.warpPerspective(image, matrix, (chessboard_size, chessboard_size))
147
+
148
+ # Convert images to RGB for display
149
+ new_img = cv2.cvtColor(transformed_chessboard, cv2.COLOR_BGR2RGB)
150
+
151
 
152
  # Resize the image to 224x224
153
  image = cv2.resize(new_img, (224, 224))