Spaces:
Sleeping
Sleeping
file added
Browse files
app.py
CHANGED
@@ -15,6 +15,7 @@ import pathlib
|
|
15 |
import cv2 # Import OpenCV
|
16 |
import numpy as np
|
17 |
|
|
|
18 |
# Pathlib adjustment for Windows compatibility
|
19 |
# temp = pathlib.PosixPath
|
20 |
# pathlib.PosixPath = pathlib.WindowsPath
|
|
|
15 |
import cv2 # Import OpenCV
|
16 |
import numpy as np
|
17 |
|
18 |
+
|
19 |
# Pathlib adjustment for Windows compatibility
|
20 |
# temp = pathlib.PosixPath
|
21 |
# pathlib.PosixPath = pathlib.WindowsPath
|
pred.py
CHANGED
@@ -21,10 +21,11 @@ transform = transforms.Compose([
|
|
21 |
])
|
22 |
im = transform(image).unsqueeze(0) # Add batch dimension (BCHW)
|
23 |
|
24 |
-
|
25 |
-
output = model(im)
|
26 |
-
print(output)
|
27 |
-
|
|
|
28 |
|
29 |
|
30 |
# Get predictions
|
@@ -48,3 +49,63 @@ if hasattr(model, 'names'):
|
|
48 |
class_name = model.names[predicted_class_id]
|
49 |
print(f"Predicted Class Name: {class_name}")
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
])
|
22 |
im = transform(image).unsqueeze(0) # Add batch dimension (BCHW)
|
23 |
|
24 |
+
try:
|
25 |
+
output = model(im)
|
26 |
+
print(output)
|
27 |
+
except Exception as e:
|
28 |
+
logger.error(f"Error in image prediction: {e}")
|
29 |
|
30 |
|
31 |
# Get predictions
|
|
|
49 |
class_name = model.names[predicted_class_id]
|
50 |
print(f"Predicted Class Name: {class_name}")
|
51 |
|
52 |
+
# import torch
|
53 |
+
# import cv2 # Import OpenCV
|
54 |
+
# from torchvision import transforms
|
55 |
+
# import pathlib
|
56 |
+
|
57 |
+
# # Pathlib adjustment for Windows compatibility
|
58 |
+
# temp = pathlib.PosixPath
|
59 |
+
# pathlib.PosixPath = pathlib.WindowsPath
|
60 |
+
|
61 |
+
# # Load pre-trained YOLOv5 model
|
62 |
+
# model = torch.hub.load(
|
63 |
+
# r'C:\Users\RESHMA R B\OneDrive\Documents\Desktop\project_without_malayalam\chatbot2\yolov5',
|
64 |
+
# 'custom',
|
65 |
+
# path=r"C:\Users\RESHMA R B\OneDrive\Documents\Desktop\project_without_malayalam\chatbot2\models\best.pt",
|
66 |
+
# source="local"
|
67 |
+
# )
|
68 |
+
|
69 |
+
# # Set model to evaluation mode
|
70 |
+
# model.eval()
|
71 |
+
|
72 |
+
# # Define image transformations (for PyTorch)
|
73 |
+
# transform = transforms.Compose([
|
74 |
+
# transforms.ToTensor(), # Convert image to tensor
|
75 |
+
# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), # Normalize
|
76 |
+
# ])
|
77 |
+
|
78 |
+
# # Load and preprocess the image using OpenCV
|
79 |
+
# img_path = r"C:\Users\RESHMA R B\OneDrive\Documents\Desktop\project_without_malayalam\chatbot2\ACNE.jpg"
|
80 |
+
# image = cv2.imread(img_path) # Load image in BGR format
|
81 |
+
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert BGR to RGB
|
82 |
+
# image_resized = cv2.resize(image, (224, 224)) # Resize to match model's expected input size
|
83 |
+
|
84 |
+
# # Transform the image for the model
|
85 |
+
# im = transform(image_resized).unsqueeze(0) # Add batch dimension (BCHW)
|
86 |
+
|
87 |
+
# # Get predictions
|
88 |
+
# with torch.no_grad():
|
89 |
+
# output = model(im) # Raw model output (logits)
|
90 |
+
|
91 |
+
# # Apply softmax to get confidence scores
|
92 |
+
# softmax = torch.nn.Softmax(dim=1)
|
93 |
+
# probs = softmax(output)
|
94 |
+
|
95 |
+
# # Get the predicted class and its confidence score
|
96 |
+
# predicted_class_id = torch.argmax(probs, dim=1).item()
|
97 |
+
# confidence_score = probs[0, predicted_class_id].item()
|
98 |
+
|
99 |
+
# # Print predicted class and confidence score
|
100 |
+
# print(f"Predicted Class ID: {predicted_class_id}")
|
101 |
+
# print(f"Confidence Score: {confidence_score:.4f}")
|
102 |
+
|
103 |
+
# # Print predicted class name if available
|
104 |
+
# if hasattr(model, 'names'):
|
105 |
+
# class_name = model.names[predicted_class_id]
|
106 |
+
# print(f"Predicted Class Name: {class_name}")
|
107 |
+
|
108 |
+
|
109 |
+
# cv2.imshow("Input Image", image)
|
110 |
+
# cv2.waitKey(0)
|
111 |
+
# cv2.destroyAllWindows()
|