File size: 2,130 Bytes
d87c6b1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import json
import os

import pandas as pd
import torch
import yaml

from embeddings import compute_embeddings, load_model

# Load configurations
with open("configs.yaml", "r") as file:
    configs = yaml.safe_load(file)

# Load and process the movie dataset
movies_data = pd.read_csv(configs['dataset'])

# Define columns to drop that are not needed
columns_drop = ['budget', 'homepage', 'id', 'original_language', 'original_title', 
                'popularity', 'revenue', 'spoken_languages', 'status', 'tagline']
movies_data.drop(columns=columns_drop, axis=1, inplace=True)
movies_data.dropna(inplace=True)  # Drop rows with missing values

# Convert JSON string columns to a comma-separated string of names
columns_json_to_csv = ['genres', 'keywords', 'production_companies', 'production_countries']
for col in columns_json_to_csv:
    movies_data[col] = movies_data[col].apply(
        lambda json_str: ', '.join([item["name"] for item in json.loads(json_str)])
    )

# Extract the year from 'release_date'
movies_data['release_date'] = pd.to_datetime(movies_data['release_date']).dt.year

# Convert 'runtime' to integers
movies_data['runtime'] = movies_data['runtime'].astype(int)

# Combine 'overview', 'genres', and 'keywords' into a single string for each movie
movies_data_processed = movies_data[['overview', 'genres', 'keywords']].apply(
    lambda row: '. '.join([f"{col.capitalize()}: {val}" for col, val in row.items()]), 
    axis=1
).tolist()

# Save the processed dataset
movies_data.to_csv(configs['processed_dataset'], index=False)

# Process embeddings for each model
for model_name in configs['hf_models']:
    model, tokenizer = load_model(model_name)
    movie_embeddings = compute_embeddings(movies_data_processed, model, tokenizer)
    
    embedding_dir_path = f"{configs['movie_embeddings']}/{model_name}"
    embedding_file_path = f"{embedding_dir_path}/{configs['movie_embeddings']}.pt"
    os.makedirs(embedding_dir_path, exist_ok=True)

    torch.save(movie_embeddings, embedding_file_path)
    print(f"Saved embeddings for {model_name}")