Shilpaj commited on
Commit
aa63283
·
1 Parent(s): ebbea61

Debug: Prediction issue

Browse files
Files changed (1) hide show
  1. inference.py +29 -20
inference.py CHANGED
@@ -23,19 +23,19 @@ def inference(image, alpha, top_k, target_layer, model=None, classes=None):
23
  Run inference with GradCAM visualization
24
  """
25
  try:
26
- # Clear CUDA cache before starting
27
  if torch.cuda.is_available():
28
  torch.cuda.empty_cache()
29
 
30
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
31
 
 
 
 
32
  # Ensure model is on correct device and in eval mode
33
  model = model.to(device)
34
  model.eval()
35
 
36
- # Process with reduced precision to save memory
37
  with torch.cuda.amp.autocast():
38
- # Save a copy of input img
39
  org_img = image.copy()
40
 
41
  # Convert img to tensor and normalize it
@@ -47,18 +47,32 @@ def inference(image, alpha, top_k, target_layer, model=None, classes=None):
47
  )
48
  ])
49
 
50
- # Preprocess the input image and move to device
51
  input_tensor = _transform(image).to(device)
 
 
 
52
  input_tensor = input_tensor.unsqueeze(0)
53
  input_tensor.requires_grad = True
54
 
55
  # Get Model Predictions
56
  outputs = model(input_tensor)
 
 
 
57
  probabilities = torch.softmax(outputs, dim=1)[0]
58
- confidences = {classes[i]: float(probabilities[i]) for i in range(1000)}
 
 
 
 
 
 
 
59
 
60
- # Select the top classes based on user input
61
- sorted_confidences = sorted(confidences.items(), key=lambda val: val[1], reverse=True)
 
62
  show_confidences = OrderedDict(sorted_confidences[:top_k])
63
 
64
  # Map layer numbers to meaningful parts of the ResNet architecture
@@ -71,41 +85,36 @@ def inference(image, alpha, top_k, target_layer, model=None, classes=None):
71
  6: model.layer4[-1]
72
  }
73
 
74
- # Ensure valid layer selection
75
  target_layer = min(max(target_layer, 1), 6)
76
  target_layers = [_layers[target_layer]]
77
 
78
- # Get the class activations from the selected layer
79
- cam = GradCAM(model=model, target_layers=target_layers)
80
 
 
 
81
  # Get the most probable class index
82
  top_class = max(confidences.items(), key=lambda x: x[1])[0]
83
  class_idx = classes.index(top_class)
 
84
 
85
- # Generate GradCAM for the top predicted class
86
  grayscale_cam = cam(
87
  input_tensor=input_tensor,
88
  targets=[ClassifierOutputTarget(class_idx)],
89
- aug_smooth=False, # Disable augmentation for memory efficiency
90
- eigen_smooth=False # Disable eigen smoothing for memory efficiency
91
  )
92
  grayscale_cam = grayscale_cam[0, :]
93
 
94
- # Overlay input image with Class activations
95
  visualization = show_cam_on_image(org_img/255., grayscale_cam, use_rgb=True, image_weight=alpha)
96
 
97
- # Clear CUDA cache after processing
98
  if torch.cuda.is_available():
99
  torch.cuda.empty_cache()
100
 
101
  return show_confidences, visualization
102
 
103
- except torch.cuda.OutOfMemoryError:
104
- if torch.cuda.is_available():
105
- torch.cuda.empty_cache()
106
- raise RuntimeError("GPU out of memory - Please try again with a smaller image")
107
-
108
  except Exception as e:
 
109
  if torch.cuda.is_available():
110
  torch.cuda.empty_cache()
111
  raise e
 
23
  Run inference with GradCAM visualization
24
  """
25
  try:
 
26
  if torch.cuda.is_available():
27
  torch.cuda.empty_cache()
28
 
29
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
30
 
31
+ # Debug: Print model mode
32
+ print(f"Model mode: {model.training}")
33
+
34
  # Ensure model is on correct device and in eval mode
35
  model = model.to(device)
36
  model.eval()
37
 
 
38
  with torch.cuda.amp.autocast():
 
39
  org_img = image.copy()
40
 
41
  # Convert img to tensor and normalize it
 
47
  )
48
  ])
49
 
50
+ # Debug: Print image tensor stats
51
  input_tensor = _transform(image).to(device)
52
+ print(f"Input tensor shape: {input_tensor.shape}")
53
+ print(f"Input tensor range: [{input_tensor.min():.2f}, {input_tensor.max():.2f}]")
54
+
55
  input_tensor = input_tensor.unsqueeze(0)
56
  input_tensor.requires_grad = True
57
 
58
  # Get Model Predictions
59
  outputs = model(input_tensor)
60
+ print(f"Raw output shape: {outputs.shape}")
61
+ print(f"Raw output range: [{outputs.min():.2f}, {outputs.max():.2f}]")
62
+
63
  probabilities = torch.softmax(outputs, dim=1)[0]
64
+ print(f"Probabilities sum: {probabilities.sum():.2f}") # Should be close to 1.0
65
+
66
+ # Get top 5 predictions for debugging
67
+ top_probs, top_indices = torch.topk(probabilities, 5)
68
+ print("\nTop 5 predictions:")
69
+ for idx, (prob, class_idx) in enumerate(zip(top_probs, top_indices)):
70
+ class_name = classes[class_idx]
71
+ print(f"{idx+1}. {class_name}: {prob:.4f}")
72
 
73
+ # Create confidence dictionary
74
+ confidences = {classes[i]: float(probabilities[i]) for i in range(len(classes))}
75
+ sorted_confidences = sorted(confidences.items(), key=lambda x: x[1], reverse=True)
76
  show_confidences = OrderedDict(sorted_confidences[:top_k])
77
 
78
  # Map layer numbers to meaningful parts of the ResNet architecture
 
85
  6: model.layer4[-1]
86
  }
87
 
 
88
  target_layer = min(max(target_layer, 1), 6)
89
  target_layers = [_layers[target_layer]]
90
 
91
+ # Debug: Print selected layer
92
+ print(f"\nUsing target layer: {target_layers[0]}")
93
 
94
+ cam = GradCAM(model=model, target_layers=target_layers)
95
+
96
  # Get the most probable class index
97
  top_class = max(confidences.items(), key=lambda x: x[1])[0]
98
  class_idx = classes.index(top_class)
99
+ print(f"\nSelected class for GradCAM: {top_class} (index: {class_idx})")
100
 
 
101
  grayscale_cam = cam(
102
  input_tensor=input_tensor,
103
  targets=[ClassifierOutputTarget(class_idx)],
104
+ aug_smooth=False,
105
+ eigen_smooth=False
106
  )
107
  grayscale_cam = grayscale_cam[0, :]
108
 
 
109
  visualization = show_cam_on_image(org_img/255., grayscale_cam, use_rgb=True, image_weight=alpha)
110
 
 
111
  if torch.cuda.is_available():
112
  torch.cuda.empty_cache()
113
 
114
  return show_confidences, visualization
115
 
 
 
 
 
 
116
  except Exception as e:
117
+ print(f"Error in inference: {str(e)}")
118
  if torch.cuda.is_available():
119
  torch.cuda.empty_cache()
120
  raise e