File size: 2,590 Bytes
900cef8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import faiss
import numpy as np
import torch
import os, glob

def get_results(features_path):
    print(features_path)
    embeddings_np = torch.load(features_path).numpy()
    all_cow_ids =  torch.load("../big_model_inference/all_cow_ids.pt").numpy()


    mid_point = len(embeddings_np) // 2
    # print(f"mid_point : {mid_point}")
    embeddings_np_first_half = embeddings_np[:mid_point]
    embeddings_np_second_half = embeddings_np[mid_point:]

    all_cow_ids_first_half = all_cow_ids[:mid_point]
    all_cow_ids_second_half = all_cow_ids[mid_point:]

    # # Assuming embeddings_np is your numpy array of shape (N, 512) and dtype=np.float32
    d = embeddings_np_first_half.shape[1]  # Dimensionality (512)
    nlist = 100  # Number of clusters (you can tune this parameter)


    m = 8  # Number of subquantizers (must be a divisor of d)
    nbits = 8  # Bits per subquantizer

    flat_index = faiss.IndexFlatL2(d)
    index_ivf = faiss.IndexIVFPQ(flat_index, d, nlist, m, nbits)
    index_ivf.nprobe = 10
    index_ivf.train(embeddings_np_first_half)
    index_ivf.add(embeddings_np_first_half)
    # flat_index.add(embeddings_np_first_half)
    k = 6 
    distances, indices = index_ivf.search(embeddings_np_second_half, k) 


    # print("Nearest neighbors (indices) for the first 10 images:")
    # print(indices[-10:])
    # print("Corresponding distances:")
    # print(distances[-10:])


    # Calculate top-1 and top-5 accuracy
    top1_correct = 0
    top5_correct = 0

    for i, indices_row in enumerate(indices):
        query_id = all_cow_ids_second_half[i]
        
        # Get cow IDs for the retrieved results
        retrieved_ids = [all_cow_ids_first_half[idx] for idx in indices_row]
        
        # Top-1: Check if the first result matches the query ID
        if retrieved_ids[0] == query_id:
            top1_correct += 1
        
        # Top-5: Check if any of the first 5 results match the query ID
        if query_id in retrieved_ids[:5]:
            top5_correct += 1

    # Calculate accuracy rates
    top1_accuracy = top1_correct / len(embeddings_np_second_half)
    top5_accuracy = top5_correct / len(embeddings_np_second_half)

    print(f"Top-1 Accuracy: {top1_accuracy:.4f}")
    print(f"Top-5 Accuracy: {top5_accuracy:.4f}")
    
directory = '../big_model_inference'  # replace with your directory path
pattern = os.path.join(directory, '*.pt')
exclude_file = 'all_cow_ids.pt'
for features_path in glob.glob(pattern):
    if os.path.basename(features_path) != exclude_file:
        get_results(features_path)
        # print(features_path)