Spaces:
Sleeping
Sleeping
file added
Browse files
app.py
CHANGED
|
@@ -198,7 +198,7 @@ def predict_image(image):
|
|
| 198 |
|
| 199 |
if hasattr(model, 'names'):
|
| 200 |
class_name = model.names[predicted_class_id]
|
| 201 |
-
prediction_result = f"Predicted Class: {class_name}\nConfidence: {confidence_score:.4f}"
|
| 202 |
description = get_description(class_name)
|
| 203 |
else:
|
| 204 |
prediction_result = f"Predicted Class ID: {predicted_class_id}\nConfidence: {confidence_score:.4f}"
|
|
|
|
| 198 |
|
| 199 |
if hasattr(model, 'names'):
|
| 200 |
class_name = model.names[predicted_class_id]
|
| 201 |
+
prediction_result = f"Predicted Class: {class_name}\nConfidence Score: {confidence_score:.4f}"
|
| 202 |
description = get_description(class_name)
|
| 203 |
else:
|
| 204 |
prediction_result = f"Predicted Class ID: {predicted_class_id}\nConfidence: {confidence_score:.4f}"
|
pred.py
CHANGED
|
@@ -48,63 +48,3 @@ if hasattr(model, 'names'):
|
|
| 48 |
class_name = model.names[predicted_class_id]
|
| 49 |
print(f"Predicted Class Name: {class_name}")
|
| 50 |
|
| 51 |
-
# import torch
|
| 52 |
-
# import cv2 # Import OpenCV
|
| 53 |
-
# from torchvision import transforms
|
| 54 |
-
# import pathlib
|
| 55 |
-
|
| 56 |
-
# # Pathlib adjustment for Windows compatibility
|
| 57 |
-
# temp = pathlib.PosixPath
|
| 58 |
-
# pathlib.PosixPath = pathlib.WindowsPath
|
| 59 |
-
|
| 60 |
-
# # Load pre-trained YOLOv5 model
|
| 61 |
-
# model = torch.hub.load(
|
| 62 |
-
# r'C:\Users\RESHMA R B\OneDrive\Documents\Desktop\project_without_malayalam\chatbot2\yolov5',
|
| 63 |
-
# 'custom',
|
| 64 |
-
# path=r"C:\Users\RESHMA R B\OneDrive\Documents\Desktop\project_without_malayalam\chatbot2\models\best.pt",
|
| 65 |
-
# source="local"
|
| 66 |
-
# )
|
| 67 |
-
|
| 68 |
-
# # Set model to evaluation mode
|
| 69 |
-
# model.eval()
|
| 70 |
-
|
| 71 |
-
# # Define image transformations (for PyTorch)
|
| 72 |
-
# transform = transforms.Compose([
|
| 73 |
-
# transforms.ToTensor(), # Convert image to tensor
|
| 74 |
-
# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), # Normalize
|
| 75 |
-
# ])
|
| 76 |
-
|
| 77 |
-
# # Load and preprocess the image using OpenCV
|
| 78 |
-
# img_path = r"C:\Users\RESHMA R B\OneDrive\Documents\Desktop\project_without_malayalam\chatbot2\ACNE.jpg"
|
| 79 |
-
# image = cv2.imread(img_path) # Load image in BGR format
|
| 80 |
-
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert BGR to RGB
|
| 81 |
-
# image_resized = cv2.resize(image, (224, 224)) # Resize to match model's expected input size
|
| 82 |
-
|
| 83 |
-
# # Transform the image for the model
|
| 84 |
-
# im = transform(image_resized).unsqueeze(0) # Add batch dimension (BCHW)
|
| 85 |
-
|
| 86 |
-
# # Get predictions
|
| 87 |
-
# with torch.no_grad():
|
| 88 |
-
# output = model(im) # Raw model output (logits)
|
| 89 |
-
|
| 90 |
-
# # Apply softmax to get confidence scores
|
| 91 |
-
# softmax = torch.nn.Softmax(dim=1)
|
| 92 |
-
# probs = softmax(output)
|
| 93 |
-
|
| 94 |
-
# # Get the predicted class and its confidence score
|
| 95 |
-
# predicted_class_id = torch.argmax(probs, dim=1).item()
|
| 96 |
-
# confidence_score = probs[0, predicted_class_id].item()
|
| 97 |
-
|
| 98 |
-
# # Print predicted class and confidence score
|
| 99 |
-
# print(f"Predicted Class ID: {predicted_class_id}")
|
| 100 |
-
# print(f"Confidence Score: {confidence_score:.4f}")
|
| 101 |
-
|
| 102 |
-
# # Print predicted class name if available
|
| 103 |
-
# if hasattr(model, 'names'):
|
| 104 |
-
# class_name = model.names[predicted_class_id]
|
| 105 |
-
# print(f"Predicted Class Name: {class_name}")
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
# cv2.imshow("Input Image", image)
|
| 109 |
-
# cv2.waitKey(0)
|
| 110 |
-
# cv2.destroyAllWindows()
|
|
|
|
| 48 |
class_name = model.names[predicted_class_id]
|
| 49 |
print(f"Predicted Class Name: {class_name}")
|
| 50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|