{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "2683899d", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "from scipy.misc import face\n", "from scipy.ndimage import zoom\n", "from scipy.special import logsumexp\n", "import torch\n", "import matplotlib.pyplot as plt\n", "import pickle\n", "import scipy.io\n", "import cv2\n", "import os" ] }, { "cell_type": "code", "execution_count": 2, "id": "32bd8589", "metadata": { "scrolled": true }, "outputs": [], "source": [ "def load_images_from_folder(folder):\n", " images = []\n", " img_name = []\n", " for filename in os.listdir(folder):\n", " img = cv2.imread(os.path.join(folder,filename))\n", " if img is not None:\n", " images.append(img)\n", " img_name.append(filename)\n", " return images, img_name" ] }, { "cell_type": "code", "execution_count": 3, "id": "c5ebf6a1", "metadata": {}, "outputs": [], "source": [ "imgs, img_name = load_images_from_folder('stimuli')" ] }, { "cell_type": "code", "execution_count": 4, "id": "571c8db2", "metadata": { "scrolled": true }, "outputs": [ { "data": { "text/plain": [ "['67.jpg',\n", " '1587.jpg',\n", " '1458.jpg',\n", " '91.jpg',\n", " '1397.jpg',\n", " '121.jpg',\n", " '1324.jpg',\n", " '1153.jpg',\n", " '1597.jpg',\n", " '1143.jpg',\n", " '1039.jpg',\n", " '1400.jpg',\n", " '1209.jpg',\n", " '75.jpg',\n", " '1480.jpg',\n", " '1093.jpg',\n", " '1267.jpg',\n", " '1227.jpg',\n", " '2.jpg',\n", " '1109.jpg',\n", " '1558.jpg',\n", " '1385.jpg',\n", " '22.jpg',\n", " '56.jpg',\n", " '1118.jpg',\n", " '1078.jpg',\n", " '1532.jpg',\n", " '1416.jpg',\n", " '79.jpg',\n", " '1485.jpg',\n", " '1471.jpg',\n", " '95.jpg',\n", " '20.jpg',\n", " '1042.jpg',\n", " '1262.jpg',\n", " '1288.jpg',\n", " '15.jpg',\n", " '128.jpg',\n", " '72.jpg',\n", " '117.jpg',\n", " '1413.jpg',\n", " '1266.jpg',\n", " '1234.jpg',\n", " '125.jpg',\n", " '1293.jpg',\n", " '147.jpg',\n", " '1557.jpg',\n", " '11.jpg',\n", " '1645.jpg',\n", " '1239.jpg',\n", " '1394.jpg',\n", " '151.jpg',\n", " '010.jpg',\n", " '1559.jpg',\n", " '36.jpg',\n", " '1065.jpg',\n", " '1337.jpg',\n", " '1294.jpg',\n", " '17.jpg',\n", " '143.jpg',\n", " '1022.jpg',\n", " '1016.jpg',\n", " '60.jpg',\n", " '98.jpg',\n", " '1448.jpg',\n", " '1224.jpg',\n", " '139.jpg',\n", " '1149.jpg',\n", " '1455.jpg',\n", " '76.jpg',\n", " '1654.jpg',\n", " '1438.jpg',\n", " '152.jpg',\n", " '1329.jpg',\n", " '1249.jpg',\n", " '1383.jpg',\n", " '1642.jpg',\n", " '102.jpg',\n", " '27.jpg',\n", " '1363.jpg',\n", " '1108.jpg',\n", " '28.jpg',\n", " '1387.jpg',\n", " '1538.jpg',\n", " '1388.jpg',\n", " '138.jpg',\n", " '66.jpg',\n", " '1079.jpg',\n", " '1043.jpg',\n", " '1317.jpg',\n", " '44.jpg',\n", " '1040.jpg',\n", " '103.jpg',\n", " '1644.jpg',\n", " '1120.jpg',\n", " '1125.jpg',\n", " '1392.jpg',\n", " '111.jpg',\n", " '82.jpg',\n", " '85.jpg',\n", " '1627.jpg',\n", " '106.jpg',\n", " '57.jpg',\n", " '1568.jpg',\n", " '1029.jpg',\n", " '1351.jpg',\n", " '1087.jpg',\n", " '83.jpg',\n", " '146.jpg',\n", " '1465.jpg',\n", " '1561.jpg',\n", " '1505.jpg',\n", " '1183.jpg',\n", " '48.jpg',\n", " '1275.jpg',\n", " '1541.jpg',\n", " '1565.jpg',\n", " '1682.jpg',\n", " '123.jpg',\n", " '1647.jpg',\n", " '1523.jpg',\n", " '64.jpg',\n", " '1426.jpg',\n", " '1321.jpg',\n", " '1624.jpg',\n", " '1126.jpg',\n", " '38.jpg',\n", " '1513.jpg',\n", " '141.jpg',\n", " '1304.jpg',\n", " '1367.jpg',\n", " '1618.jpg',\n", " '1669.jpg',\n", " '81.jpg',\n", " '25.jpg',\n", " '1500.jpg',\n", " '1219.jpg',\n", " '1699.jpg',\n", " '149.jpg',\n", " '1487.jpg',\n", " '1638.jpg',\n", " '1442.jpg',\n", " '150.jpg',\n", " '148.jpg',\n", " '1382.jpg',\n", " '42.jpg',\n", " '1553.jpg',\n", " '014.jpg',\n", " '1474.jpg',\n", " '97.jpg',\n", " '140.jpg',\n", " '1195.jpg',\n", " '1245.jpg',\n", " '1610.jpg',\n", " '58.jpg',\n", " '127.jpg',\n", " '1516.jpg',\n", " '1353.jpg',\n", " '1184.jpg',\n", " '1358.jpg',\n", " '1160.jpg',\n", " '1015.jpg',\n", " '1449.jpg',\n", " '132.jpg',\n", " '1612.jpg',\n", " '1.jpg',\n", " '1303.jpg',\n", " '1095.jpg',\n", " '1658.jpg',\n", " '008.jpg',\n", " '005.jpg',\n", " '157.jpg',\n", " '1574.jpg',\n", " '1144.jpg',\n", " '112.jpg',\n", " '003.jpg',\n", " '1343.jpg',\n", " '55.jpg',\n", " '154.jpg',\n", " '1178.jpg',\n", " '1673.jpg',\n", " '96.jpg',\n", " '1365.jpg',\n", " '1117.jpg',\n", " '24.jpg',\n", " '1077.jpg',\n", " '1316.jpg',\n", " '1492.jpg',\n", " '1520.jpg',\n", " '1556.jpg',\n", " '1410.jpg',\n", " '159.jpg',\n", " '1210.jpg',\n", " '002.jpg',\n", " '1548.jpg',\n", " '118.jpg',\n", " '1423.jpg',\n", " '1113.jpg',\n", " '1428.jpg',\n", " '1472.jpg',\n", " '87.jpg',\n", " '21.jpg',\n", " '001.jpg',\n", " '1062.jpg',\n", " '1096.jpg',\n", " '1460.jpg',\n", " '1314.jpg',\n", " '1554.jpg',\n", " '1498.jpg',\n", " '1049.jpg',\n", " '1206.jpg',\n", " '119.jpg',\n", " '3.jpg',\n", " '1372.jpg',\n", " '74.jpg',\n", " '007.jpg',\n", " '30.jpg',\n", " '1163.jpg',\n", " '1281.jpg',\n", " '1522.jpg',\n", " '1001.jpg',\n", " '011.jpg',\n", " '108.jpg',\n", " '131.jpg',\n", " '68.jpg',\n", " '1215.jpg',\n", " '1527.jpg',\n", " '122.jpg',\n", " '1698.jpg',\n", " '1690.jpg',\n", " '1083.jpg',\n", " '1092.jpg',\n", " '006.jpg',\n", " '113.jpg',\n", " '80.jpg',\n", " '1347.jpg',\n", " '1499.jpg',\n", " '1018.jpg',\n", " '89.jpg',\n", " '004.jpg',\n", " '78.jpg',\n", " '1443.jpg',\n", " '1094.jpg',\n", " '1131.jpg',\n", " '1476.jpg',\n", " '1483.jpg',\n", " '013.jpg',\n", " '1517.jpg',\n", " '1586.jpg',\n", " '1346.jpg',\n", " '1535.jpg',\n", " '1254.jpg',\n", " '9.jpg',\n", " '009.jpg',\n", " '156.jpg',\n", " '1190.jpg',\n", " '1306.jpg',\n", " '1067.jpg',\n", " '49.jpg',\n", " '115.jpg',\n", " '1151.jpg',\n", " '120.jpg',\n", " '1192.jpg',\n", " '39.jpg',\n", " '84.jpg',\n", " '1510.jpg',\n", " '69.jpg',\n", " '1360.jpg',\n", " '1290.jpg',\n", " '1433.jpg',\n", " '1570.jpg',\n", " '1489.jpg',\n", " '1352.jpg',\n", " '153.jpg',\n", " '133.jpg',\n", " '1469.jpg',\n", " '144.jpg',\n", " '110.jpg',\n", " '145.jpg',\n", " '7.jpg',\n", " '1182.jpg',\n", " '35.jpg',\n", " '1369.jpg',\n", " '1504.jpg',\n", " '70.jpg',\n", " '116.jpg',\n", " '1488.jpg',\n", " '126.jpg',\n", " '1240.jpg',\n", " '1651.jpg',\n", " '012.jpg',\n", " '1186.jpg',\n", " '1393.jpg',\n", " '109.jpg',\n", " '137.jpg',\n", " '1105.jpg',\n", " '1297.jpg',\n", " '1238.jpg',\n", " '50.jpg',\n", " '1589.jpg']" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "img_name" ] }, { "cell_type": "code", "execution_count": 5, "id": "e99e7121", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "300" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(img_name)" ] }, { "cell_type": "code", "execution_count": 6, "id": "0dc9ab34", "metadata": {}, "outputs": [], "source": [ "def load_fix_from_folder(folder):\n", " fix_X = []\n", " fix_Y = []\n", " radius = []\n", " img_name = []\n", " for filename in os.listdir(folder):\n", " fix_X.append(scipy.io.loadmat(os.path.join(folder,filename))['currImData'][:,4])\n", " fix_Y.append(scipy.io.loadmat(os.path.join(folder,filename))['currImData'][:,5])\n", " radius.append(scipy.io.loadmat(os.path.join(folder,filename))['currImData'][:,6])\n", " img_name.append(str(scipy.io.loadmat(os.path.join(folder,filename))['currImName'][0][0]) + '.jpg')\n", " #print(filename)\n", " #print(img_name)\n", " return fix_X, fix_Y, radius, img_name" ] }, { "cell_type": "code", "execution_count": 7, "id": "864cb318", "metadata": {}, "outputs": [], "source": [ "import os\n", "\n", "def create_folder(folder_path):\n", " try:\n", " os.mkdir(folder_path)\n", " print(f\"Folder '{folder_path}' created successfully.\")\n", " except FileExistsError:\n", " print(f\"Folder '{folder_path}' already exists.\")\n", " except Exception as e:\n", " print(f\"An error occurred: {e}\")" ] }, { "cell_type": "code", "execution_count": 8, "id": "47b06581", "metadata": {}, "outputs": [], "source": [ "import os\n", "\n", "def folder_exists(folder_path):\n", " return os.path.exists(folder_path) and os.path.isdir(folder_path)\n", "\n" ] }, { "cell_type": "code", "execution_count": 9, "id": "bb2809f0", "metadata": {}, "outputs": [], "source": [ "def add_circles(matrix, x_list, y_list, r_list):\n", " for x, y, r in zip(x_list, y_list, r_list):\n", " x, y, r = int(x), int(y), int(r)\n", " for i in range(max(0, y - r), min(matrix.shape[0], y + r + 1)):\n", " for j in range(max(0, x - r), min(matrix.shape[1], x + r + 1)):\n", " if (i - y) ** 2 + (j - x) ** 2 <= r ** 2:\n", " matrix[i][j] += 1\n", " return matrix" ] }, { "cell_type": "code", "execution_count": null, "id": "d5f1efd1", "metadata": { "scrolled": true }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Using cache found in /home/pranjul/.cache/torch/hub/pytorch_vision_v0.6.0\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Folder 'DG3_HG_heatmaps/S10_fix' created successfully.\n", "Folder 'DG3_HG_heatmaps/S11_fix' created successfully.\n", "Folder 'DG3_HG_heatmaps/S12_fix' created successfully.\n", "Folder 'DG3_HG_heatmaps/S13_fix' created successfully.\n", "Folder 'DG3_HG_heatmaps/S14_fix' created successfully.\n", "Folder 'DG3_HG_heatmaps/S15_fix' created successfully.\n", "Folder 'DG3_HG_heatmaps/S16_fix' created successfully.\n" ] } ], "source": [ "import matplotlib.pyplot as plt\n", "import numpy as np\n", "from scipy.misc import face\n", "from scipy.ndimage import zoom\n", "from scipy.special import logsumexp\n", "import torch\n", "\n", "import deepgaze_pytorch\n", "\n", "DEVICE = 'cuda'\n", "\n", "# you can use DeepGazeI or DeepGazeIIE\n", "model = deepgaze_pytorch.DeepGazeIII(pretrained=True).to(DEVICE)\n", "\n", "#image = face()\n", "\n", "\n", "for q in range(10, 56):\n", " \n", " x = []\n", " \n", " # Replace 'path/to/your/folder' with the folder path you want to check\n", " folder_path = 'S_fix/S'+ str(q) +'_fix'\n", " \n", " if folder_exists(folder_path):\n", " \n", " fix_X, fix_Y, radius, img_name = load_fix_from_folder('S_fix/S'+ str(q) +'_fix')\n", " \n", " \n", "\n", " # Replace 'path/to/your/folder' with the desired folder path\n", " folder_path = 'DG3_HG_heatmaps/S'+ str(q) +'_fix'\n", " create_folder(folder_path)\n", "\n", "\n", " for i in range(len(img_name)):\n", "\n", " image = cv2.imread('/home/pranjul/DeepGaze/stimuli/' + img_name[i])\n", "\n", " if image is not None and len(fix_X[i]) > 3 and len(fix_Y[i] > 3):\n", "\n", " # location of previous scanpath fixations in x and y (pixel coordinates), starting with the initial fixation on the image.\n", " #fixation_history_x = np.array([1024//2, 300, 500, 200, 200, 700])\n", " #fixation_history_y = np.array([768//2, 300, 100, 300, 100, 500])\n", "\n", " #print(img_name[i])\n", "\n", " fixation_history_x = fix_X[i]/3\n", " #print(fixation_history_x)\n", " fixation_history_y = fix_Y[i]/3\n", " radius_history = radius[i]/5\n", " \n", " #print(fixation_history_x, fixation_history_y, radius_history)\n", " \n", " # Create a 2D matrix filled with zeros of size (600, 800)\n", " matrix_size = (600, 800)\n", " matrix = np.zeros(matrix_size, dtype=int)\n", "\n", " # Call the function to add circles to the matrix\n", " result_matrix = add_circles(matrix, fixation_history_x, fixation_history_y, radius_history)\n", " \n", " #plt.imshow(result_matrix)\n", " #plt.plot(fixation_history_x, fixation_history_y, 'o-', color='red')\n", " #plt.axis('on')\n", " #plt.colorbar(fraction=0.046, pad=0.04) # Adjust fraction and pad values as needed\n", " #plt.tight_layout()\n", " \n", " # load precomputed centerbias log density (from MIT1003) over a 1024x1024 image\n", " # you can download the centerbias from https://github.com/matthias-k/DeepGaze/releases/download/v1.0.0/centerbias_mit1003.npy\n", " # alternatively, you can use a uniform centerbias via `centerbias_template = np.zeros((1024, 1024))`.\n", " centerbias_template = np.load('centerbias_mit1003.npy')\n", " \n", " # rescale to match image size\n", " centerbias = zoom(centerbias_template, (image.shape[0]/centerbias_template.shape[0], image.shape[1]/centerbias_template.shape[1]), order=0, mode='nearest')\n", " # renormalize log density\n", " centerbias -= logsumexp(centerbias)\n", "\n", " image_tensor = torch.tensor([image.transpose(2, 0, 1)]).to(DEVICE)\n", " centerbias_tensor = torch.tensor([centerbias]).to(DEVICE)\n", " x_hist_tensor = torch.tensor([fixation_history_x[model.included_fixations]]).to(DEVICE)\n", " y_hist_tensor = torch.tensor([fixation_history_y[model.included_fixations]]).to(DEVICE)\n", "\n", " log_density_prediction = model(image_tensor, centerbias_tensor, x_hist_tensor, y_hist_tensor)\n", "\n", " # Scale factor\n", " #scale_factor = 3\n", "\n", " # Calculate the new width and height\n", " #new_width = image.shape[1] * scale_factor\n", " #new_height = image.shape[0] * scale_factor\n", "\n", " # Resize the image using cv2.resize()\n", " #image = cv2.resize(image, (new_width, new_height))\n", "\n", " image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n", " \n", " x.append((log_density_prediction.detach().cpu().numpy()[0, 0], str(img_name[i].split('.')[0]),\n", " 'S' + str(q), result_matrix))\n", " \n", " \n", " \n", " f, axs = plt.subplots(nrows=1, ncols=3, figsize=(16, 9))\n", " axs[0].imshow(image)\n", " axs[0].plot(fixation_history_x, fixation_history_y, 'o-', color='red')\n", " axs[0].scatter(fixation_history_x[-1], fixation_history_y[-1], 100, color='white', zorder=100)\n", " axs[0].set_axis_off()\n", " axs[1].matshow(log_density_prediction.detach().cpu().numpy()[0, 0]) # first image in batch, first (and only) channel\n", " axs[1].plot(fixation_history_x, fixation_history_y, 'o-', color='red')\n", " axs[1].scatter(fixation_history_x[-1], fixation_history_y[-1], 100, color='white', zorder=100)\n", " axs[1].set_axis_off()\n", " axs[2].matshow(result_matrix)\n", " axs[2].plot(fixation_history_x, fixation_history_y, 'o-', color='red')\n", " axs[2].scatter(fixation_history_x[-1], fixation_history_y[-1], 100, color='white', zorder=100)\n", " axs[2].set_axis_off()\n", " #plt.show()\n", " plt.savefig(os.path.join('DG3_HG_heatmaps/S'+ str(q) +'_fix', img_name[i]))\n", " plt.close()\n", " #break\n", " \n", " # Open a file in binary write mode\n", " with open('DG3_HG_heatmaps/S'+ str(q) +'_fix/' + 'S'+ str(q) + '.pkl', 'wb') as file:\n", " pickle.dump(x, file)\n", " \n", " #break\n", " #break" ] }, { "cell_type": "code", "execution_count": null, "id": "a7496b7d", "metadata": {}, "outputs": [], "source": [ "len(x)" ] }, { "cell_type": "code", "execution_count": null, "id": "17e1a970", "metadata": { "scrolled": true }, "outputs": [], "source": [ "x_loaded = {}\n", "\n", "for q in range(1, 5):\n", "\n", " # Replace 'path/to/your/folder' with the folder path you want to check\n", " folder_path = 'S_fix/S0'+ str(q) +'_fix'\n", " \n", " if folder_exists(folder_path):\n", " # Open a file in binary write mode\n", " with open('DG3_HG_heatmaps/S0'+ str(q) +'_fix/' + 'S0'+ str(q) + '.pkl', 'rb') as file:\n", " x_loaded[q] = pickle.load(file)\n", "\n", "#x_loaded = [x.tolist() for x in x_loaded]\n" ] }, { "cell_type": "code", "execution_count": null, "id": "8eba733e", "metadata": {}, "outputs": [], "source": [ "x_loaded" ] }, { "cell_type": "code", "execution_count": null, "id": "b5c443d6", "metadata": {}, "outputs": [], "source": [ "len(x_loaded)" ] }, { "cell_type": "code", "execution_count": null, "id": "4ea66668", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "84c04b86", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "3e4f31b1", "metadata": {}, "outputs": [], "source": [ "x_loaded_q = []\n", "\n", "for q in range(1, 5):\n", " if q in x_loaded:\n", " print(len(x_loaded[q]))\n", " x_loaded_q.append(x_loaded[q])\n", " " ] }, { "cell_type": "code", "execution_count": null, "id": "93264ea7", "metadata": { "scrolled": true }, "outputs": [], "source": [ "np.shape(np.reshape(x_loaded_q, (849, 4)))" ] }, { "cell_type": "code", "execution_count": null, "id": "7aaf41a1", "metadata": {}, "outputs": [], "source": [ "283*3" ] }, { "cell_type": "code", "execution_count": null, "id": "18f03b5b", "metadata": {}, "outputs": [], "source": [ "plt.matshow(np.reshape(x_loaded_q, (849, 4))[100][0])" ] }, { "cell_type": "code", "execution_count": null, "id": "54701e93", "metadata": {}, "outputs": [], "source": [ "plt.matshow(np.reshape(x_loaded_q, (849, 4))[100][3])" ] }, { "cell_type": "code", "execution_count": null, "id": "8966156a", "metadata": {}, "outputs": [], "source": [ "x_loaded" ] }, { "cell_type": "code", "execution_count": null, "id": "4e2a4d75", "metadata": {}, "outputs": [], "source": [ "x_loaded_q_reshaped = np.reshape(x_loaded_q, (849, 4))" ] }, { "cell_type": "code", "execution_count": null, "id": "45392af9", "metadata": {}, "outputs": [], "source": [ "img.shape" ] }, { "cell_type": "code", "execution_count": null, "id": "93a09086", "metadata": { "scrolled": true }, "outputs": [], "source": [ "import numpy as np\n", "from scipy.misc import face\n", "from scipy.ndimage import zoom\n", "from scipy.special import logsumexp\n", "import torch\n", "import matplotlib.pyplot as plt\n", "\n", "import deepgaze_pytorch\n", "\n", "DEVICE = 'cuda'\n", "\n", "# you can use DeepGazeI or DeepGazeIIE\n", "model = deepgaze_pytorch.DeepGazeI(pretrained=True).to(DEVICE)\n", "\n", "# image = face()\n", "\n", "x = {}\n", "\n", "for i in range(len(image_paths)):\n", " print(i)\n", " \n", " image = cv2.imread(image_paths[i])\n", " \n", " # load precomputed centerbias log density (from MIT1003) over a 1024x1024 image\n", " # you can download the centerbias from https://github.com/matthias-k/DeepGaze/releases/download/v1.0.0/centerbias_mit1003.npy\n", " # alternatively, you can use a uniform centerbias via `centerbias_template = np.zeros((1024, 1024))`.\n", " centerbias_template = np.load('centerbias_mit1003.npy')\n", " # rescale to match image size\n", " centerbias = zoom(centerbias_template, (image.shape[0]/centerbias_template.shape[0], image.shape[1]/centerbias_template.shape[1]), order=0, mode='nearest')\n", " # renormalize log density\n", " centerbias -= logsumexp(centerbias)\n", "\n", " image_tensor = torch.tensor([image.transpose(2, 0, 1)]).to(DEVICE)\n", " centerbias_tensor = torch.tensor([centerbias]).to(DEVICE)\n", "\n", " log_density_prediction = model(image_tensor, centerbias_tensor)\n", " \n", " #a = log_density_prediction.detach().cpu().numpy()[0, 0]\n", " \n", " #x[img_name[i].split('.')[0]] = a\n", " \n", " \n", " f, axs = plt.subplots(nrows=1, ncols=2, figsize=(16, 9))\n", " axs[0].imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n", " # axs[0].plot(fixation_history_x, fixation_history_y, 'o-', color='red')\n", " # axs[0].scatter(fixation_history_x[-1], fixation_history_y[-1], 100, color='yellow', zorder=100)\n", " axs[0].set_axis_off()\n", " axs[1].matshow(log_density_prediction.detach().cpu().numpy()[0, 0]) # first image in batch, first (and only) channel\n", " # axs[1].plot(fixation_history_x, fixation_history_y, 'o-', color='red')\n", " # axs[1].scatter(fixation_history_x[-1], fixation_history_y[-1], 100, color='yellow', zorder=100)\n", " axs[1].set_axis_off()\n", " plt.savefig(os.path.join('DG2_modified_imgs_heatmaps', '{0}.jpg'.format(i)))\n", " \n", " \n", " #break" ] }, { "cell_type": "code", "execution_count": null, "id": "3e4e709a", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "2bd1220a", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "d2f42e76", "metadata": { "scrolled": false }, "outputs": [], "source": [ "import numpy as np\n", "from scipy.misc import face\n", "from scipy.ndimage import zoom\n", "from scipy.special import logsumexp\n", "import torch\n", "import matplotlib.pyplot as plt\n", "\n", "import deepgaze_pytorch\n", "\n", "DEVICE = 'cuda'\n", "\n", "# you can use DeepGazeI or DeepGazeIIE\n", "model = deepgaze_pytorch.DeepGazeIIE(pretrained=True).to(DEVICE)\n", "\n", "# image = face()\n", "\n", "#x = []\n", "\n", "for i in range(len(imgs)):\n", " \n", " image = imgs[i]\n", " \n", " # load precomputed centerbias log density (from MIT1003) over a 1024x1024 image\n", " # you can download the centerbias from https://github.com/matthias-k/DeepGaze/releases/download/v1.0.0/centerbias_mit1003.npy\n", " # alternatively, you can use a uniform centerbias via `centerbias_template = np.zeros((1024, 1024))`.\n", " centerbias_template = np.load('centerbias_mit1003.npy')\n", " # centerbias_template = np.zeros((1024, 1024))\n", " # rescale to match image size\n", " centerbias = zoom(centerbias_template, (image.shape[0]/centerbias_template.shape[0], image.shape[1]/centerbias_template.shape[1]), order=0, mode='nearest')\n", " # renormalize log density\n", " centerbias -= logsumexp(centerbias)\n", "\n", " image_tensor = torch.tensor([image.transpose(2, 0, 1)]).to(DEVICE)\n", " centerbias_tensor = torch.tensor([centerbias]).to(DEVICE)\n", "\n", " log_density_prediction = model(image_tensor, centerbias_tensor)\n", " \n", " #a = log_density_prediction.detach().cpu().numpy()[0,0]\n", " \n", " #x[a] = str(img_name[i].split('.')[0])\n", " \n", " # Inside your loop\n", " x.append((log_density_prediction.detach().cpu().numpy()[0, 0], str(img_name[i].split('.')[0])))\n", "\n", "\n", "\n", " \n", " '''\n", " f, axs = plt.subplots(nrows=1, ncols=2, figsize=(16, 9))\n", " axs[0].imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n", " # axs[0].plot(fixation_history_x, fixation_history_y, 'o-', color='red')\n", " # axs[0].scatter(fixation_history_x[-1], fixation_history_y[-1], 100, color='yellow', zorder=100)\n", " axs[0].set_axis_off()\n", " axs[1].matshow(log_density_prediction.detach().cpu().numpy()[0, 0]) # first image in batch, first (and only) channel\n", " # axs[1].plot(fixation_history_x, fixation_history_y, 'o-', color='red')\n", " # axs[1].scatter(fixation_history_x[-1], fixation_history_y[-1], 100, color='yellow', zorder=100)\n", " axs[1].set_axis_off()\n", " # plt.savefig(os.path.join('DG2_heatmaps', '{0}.jpg'.format(i)))\n", " '''\n", " \n", " break" ] }, { "cell_type": "code", "execution_count": null, "id": "fcb41708", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "1ad5624e", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "26527272", "metadata": {}, "outputs": [], "source": [ "import glob\n", "from scipy.io import loadmat\n", "from scipy.stats import pearsonr, spearmanr\n", "from sklearn.preprocessing import MinMaxScaler\n", "\n", "scaler = MinMaxScaler()" ] }, { "cell_type": "code", "execution_count": null, "id": "3938f5cb", "metadata": {}, "outputs": [], "source": [ "\n", "y_faces = {}\n", "\n", "for filename in glob.glob('/home/pranjul/DeepGaze/heatmaps/faces/*.mat'): #assuming gif\n", " \n", " fn=loadmat(filename)\n", " y_faces[filename.split('/')[-1].split('.')[0]] = fn\n", " #break" ] }, { "cell_type": "code", "execution_count": null, "id": "c5902106", "metadata": {}, "outputs": [], "source": [ "\n", "y_objects = {}\n", "\n", "for filename in glob.glob('/home/pranjul/DeepGaze/heatmaps/objects/*.mat'): #assuming gif\n", " \n", " fn=loadmat(filename)\n", " y_objects[filename.split('/')[-1].split('.')[0]] = fn\n", " #break" ] }, { "cell_type": "code", "execution_count": null, "id": "e6fa7c47", "metadata": {}, "outputs": [], "source": [ "\n", "y_pareidolia = {}\n", "\n", "for filename in glob.glob('/home/pranjul/DeepGaze/heatmaps/pareidolia/*.mat'): #assuming gif\n", " \n", " fn=loadmat(filename)\n", " y_pareidolia[filename.split('/')[-1].split('.')[0]] = fn\n", " #break" ] }, { "cell_type": "code", "execution_count": null, "id": "90d31035", "metadata": {}, "outputs": [], "source": [ "y_pareidolia['2']['a']" ] }, { "cell_type": "code", "execution_count": null, "id": "bcf0f6dc", "metadata": {}, "outputs": [], "source": [ "y_pareidolia['2']['a'].shape" ] }, { "cell_type": "code", "execution_count": null, "id": "c416a753", "metadata": {}, "outputs": [], "source": [ "plt.imshow(y_pareidolia['2']['a'])\n", "plt.axis('off')\n", "plt.colorbar(fraction=0.046, pad=0.04) # Adjust fraction and pad values as needed\n", "plt.tight_layout()\n", "#plt.savefig('HG_mars_face.png', dpi=600)" ] }, { "cell_type": "code", "execution_count": null, "id": "f0a6bda6", "metadata": {}, "outputs": [], "source": [ "spearmanr([3,4,5,6,7], [1,2,3,4,5])" ] }, { "cell_type": "code", "execution_count": null, "id": "40524e56", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "5b89081d", "metadata": {}, "outputs": [], "source": [ "ke = []\n", "for k in range(len(x_loaded)):\n", " if x_loaded[k][1] in y_faces:\n", " #print(k)\n", " ke.append(k)\n", " " ] }, { "cell_type": "code", "execution_count": null, "id": "a5f6584d", "metadata": {}, "outputs": [], "source": [ "x_loaded[2][3].flatten().shape" ] }, { "cell_type": "code", "execution_count": null, "id": "726784f7", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "aadc4268", "metadata": {}, "outputs": [], "source": [ "len(x)" ] }, { "cell_type": "code", "execution_count": null, "id": "c36eb153", "metadata": {}, "outputs": [], "source": [ "len(ke)" ] }, { "cell_type": "code", "execution_count": null, "id": "a2166932", "metadata": { "scrolled": true }, "outputs": [], "source": [ "#dg_faces = []\n", "#eg_faces = []\n", "ke_faces = []\n", "correlation_coef_faces = []\n", "\n", "for k in range(len(x_loaded_q_reshaped)):\n", " if x_loaded_q_reshaped[k][1] in y_faces:\n", " #print(k)\n", " ke_faces.append(k)\n", " #print(np.shape(x[k]))\n", " #print(y_faces[k])\n", " #dg_faces.append(scaler.fit_transform(np.array(x[k])).flatten())\n", " #eg_faces.append(scaler.fit_transform(np.array(y_faces[k]['a'])).flatten())\n", " correlation_coef_faces.append(spearmanr(x_loaded_q_reshaped[k][0].flatten(),\n", " x_loaded_q_reshaped[k][3].flatten())[0])\n", " #correlation_coef = spearmanr(np.array(dg_faces).flatten(), np.array(eg_faces).flatten())\n", "\n", " #break\n", "\n", "#spearmanr(scaler.fit_transform(cv2.resize(x['1397'], (800, 600))).flatten(), scaler.fit_transform(y_faces['1397']['a']).flatten())[0]\n", "\n", " \n", "# correlation_coef, p_value = spearmanr(np.array(dg_faces).flatten(), np.array(eg_faces).flatten())\n", "# correlation_coef = np.corrcoef(np.array(dg_faces).flatten(), np.array(eg_faces).flatten())\n", "# print(\"Correlation coefficient:\", correlation_coef)\n", "# print(\"p-value:\", p_value)" ] }, { "cell_type": "code", "execution_count": null, "id": "c0881d58", "metadata": { "scrolled": true }, "outputs": [], "source": [ "correlation_coef_faces" ] }, { "cell_type": "code", "execution_count": null, "id": "723cc7fe", "metadata": {}, "outputs": [], "source": [ "np.mean(correlation_coef_faces)" ] }, { "cell_type": "code", "execution_count": null, "id": "52e9c3ac", "metadata": {}, "outputs": [], "source": [ "np.std(correlation_coef_faces)" ] }, { "cell_type": "code", "execution_count": null, "id": "c187f3a1", "metadata": { "scrolled": true }, "outputs": [], "source": [ "plt.plot(correlation_coef_faces, 'o')" ] }, { "cell_type": "code", "execution_count": null, "id": "70a4ecfb", "metadata": { "scrolled": true }, "outputs": [], "source": [ "len(correlation_coef_faces)" ] }, { "cell_type": "code", "execution_count": null, "id": "77266844", "metadata": { "scrolled": true }, "outputs": [], "source": [ "#dg_objects = []\n", "#eg_objects = []\n", "ke_objects = []\n", "correlation_coef_objects = []\n", "\n", "for k in range(len(x_loaded_q_reshaped)):\n", " if x_loaded_q_reshaped[k][1] in y_objects:\n", " #print(k)\n", " ke_objects.append(k)\n", " #print(np.shape(x[k]))\n", " #print(y_faces[k])\n", " #dg_objects.append(np.array(x[k]).flatten())\n", " #eg_objects.append(np.array(y_objects[k]['a']).flatten())\n", " correlation_coef_objects.append(spearmanr(x_loaded_q_reshaped[k][0].flatten(), \n", " x_loaded_q_reshaped[k][3].flatten())[0])\n", "\n", " #break\n", "\n", "#correlation_coef, p_value = spearmanr(np.array(dg_objects).flatten(), np.array(eg_objects).flatten())\n", "# correlation_coef = np.corrcoef(a, b)\n", "# print(\"Correlation coefficient:\", correlation_coef)\n", "# print(\"p-value:\", p_value)" ] }, { "cell_type": "code", "execution_count": null, "id": "d5f30f29", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "df8da20e", "metadata": {}, "outputs": [], "source": [ "np.mean(correlation_coef_objects)" ] }, { "cell_type": "code", "execution_count": null, "id": "f4c9834d", "metadata": { "scrolled": true }, "outputs": [], "source": [ "np.std(correlation_coef_objects)" ] }, { "cell_type": "code", "execution_count": null, "id": "9465aa0e", "metadata": {}, "outputs": [], "source": [ "plt.plot(correlation_coef_objects, 'o')" ] }, { "cell_type": "code", "execution_count": null, "id": "beb428b2", "metadata": { "scrolled": true }, "outputs": [], "source": [ "correlation_coef_objects" ] }, { "cell_type": "code", "execution_count": null, "id": "802ebc6d", "metadata": {}, "outputs": [], "source": [ "len(correlation_coef_objects)" ] }, { "cell_type": "code", "execution_count": null, "id": "0403de67", "metadata": { "scrolled": true }, "outputs": [], "source": [ "#dg_pareidolia = []\n", "#eg_pareidolia = []\n", "ke_pareidolia = []\n", "correlation_coef_pareidolia = []\n", "\n", "for k in range(len(x_loaded_q_reshaped)):\n", " if x_loaded_q_reshaped[k][1] in y_pareidolia:\n", " #print(k)\n", " ke_pareidolia.append(k)\n", " # print(np.shape(x[k]))\n", " # print(y_faces[k])\n", " # dg_pareidolia.append(scaler.fit_transform(np.array(x[k])).flatten())\n", " # eg_pareidolia.append(scaler.fit_transform(np.array(y_pareidolia[k]['a'])).flatten())\n", " correlation_coef_pareidolia.append(spearmanr(x_loaded_q_reshaped[k][0].flatten(), \n", " x_loaded_q_reshaped[k][3].flatten())[0])\n", " \n", " #break\n", "\n", "# correlation_coef, p_value = spearmanr(np.array(dg_pareidolia).flatten(), np.array(eg_pareidolia).flatten())\n", "# correlation_coef = np.corrcoef(a, b)\n", "# print(\"Correlation coefficient:\", correlation_coef)\n", "# print(\"p-value:\", p_value)" ] }, { "cell_type": "code", "execution_count": null, "id": "14e6d358", "metadata": {}, "outputs": [], "source": [ "np.mean(correlation_coef_pareidolia)" ] }, { "cell_type": "code", "execution_count": null, "id": "30812b76", "metadata": {}, "outputs": [], "source": [ "np.std(correlation_coef_pareidolia)" ] }, { "cell_type": "code", "execution_count": null, "id": "ab74cd89", "metadata": {}, "outputs": [], "source": [ "plt.plot(correlation_coef_pareidolia, 'o')" ] }, { "cell_type": "code", "execution_count": null, "id": "c4980b11", "metadata": {}, "outputs": [], "source": [ "len(correlation_coef_pareidolia)" ] }, { "cell_type": "code", "execution_count": null, "id": "10f01cdd", "metadata": {}, "outputs": [], "source": [ "303+252+297" ] }, { "cell_type": "code", "execution_count": null, "id": "98aa156c", "metadata": { "scrolled": false }, "outputs": [], "source": [ "import pandas as pd\n", "\n", "# Sample data with different lengths\n", "#correlation_coef_faces = [0.5, 0.6, 0.7]\n", "#correlation_coef_objects = [0.3, 0.4]\n", "#correlation_coef_pareidolia = [0.2, 0.3, 0.1, 0.4]\n", "\n", "# Create a DataFrame with a common index\n", "index = range(max(len(correlation_coef_faces), len(correlation_coef_objects), len(correlation_coef_pareidolia)))\n", "\n", "data = {\n", " 'sr_f': correlation_coef_faces + [None] * (len(index) - len(correlation_coef_faces)),\n", " 'sr_o': correlation_coef_objects + [None] * (len(index) - len(correlation_coef_objects)),\n", " 'sr_p': correlation_coef_pareidolia + [None] * (len(index) - len(correlation_coef_pareidolia))\n", "}\n", "\n", "df = pd.DataFrame(data, index=index)\n", "\n", "# Specify the file name\n", "csv_file = 'data.csv'\n", "\n", "# Save DataFrame to CSV file\n", "df.to_csv(csv_file)\n", "\n", "print(f'Data saved to {csv_file}')\n" ] }, { "cell_type": "code", "execution_count": null, "id": "37692ab8", "metadata": {}, "outputs": [], "source": [ "import csv\n", "\n", "# Sample data\n", "data = [\n", " ['Name', 'Age', 'City'],\n", " ['Alice', 28, 'New York'],\n", " ['Bob', 35, 'Los Angeles'],\n", " ['Charlie', 22, 'Chicago']\n", "]\n", "\n", "# Specify the file name\n", "csv_file = 'data.csv'\n", "\n", "# Write data to CSV file\n", "with open(csv_file, mode='w', newline='') as file:\n", " writer = csv.writer(file)\n", " writer.writerows(data)\n", "\n", "print(f'Data saved to {csv_file}')\n" ] }, { "cell_type": "code", "execution_count": null, "id": "f8816fcc", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "1d73414c", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "0e53bda0", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "7fe39537", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "6a51040d", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "89a93508", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "c297d11a", "metadata": {}, "outputs": [], "source": [ "correlation_coef, p_value = spearmanr(np.array(dg_faces).flatten(), np.array(eg_faces).flatten())\n", "# correlation_coef = np.corrcoef(a, b)\n", "print(\"Correlation coefficient:\", correlation_coef)\n", "print(\"p-value:\", p_value)" ] }, { "cell_type": "code", "execution_count": null, "id": "85e5edb2", "metadata": {}, "outputs": [], "source": [ "correlation_coef, p_value = spearmanr(np.array(dg_objects).flatten(), np.array(eg_objects).flatten())\n", "# correlation_coef = np.corrcoef(a, b)\n", "print(\"Correlation coefficient:\", correlation_coef)\n", "print(\"p-value:\", p_value)" ] }, { "cell_type": "code", "execution_count": null, "id": "d99c7309", "metadata": {}, "outputs": [], "source": [ "correlation_coef, p_value = spearmanr(np.array(dg_pareidolia).flatten(), np.array(eg_pareidolia).flatten())\n", "# correlation_coef = np.corrcoef(a, b)\n", "print(\"Correlation coefficient:\", correlation_coef)\n", "print(\"p-value:\", p_value)" ] }, { "cell_type": "code", "execution_count": null, "id": "e9702314", "metadata": {}, "outputs": [], "source": [ "len(dg_pareidolia)" ] }, { "cell_type": "code", "execution_count": null, "id": "c319f00b", "metadata": {}, "outputs": [], "source": [ "len(dg_faces[:83])" ] }, { "cell_type": "code", "execution_count": null, "id": "1fa45e93", "metadata": {}, "outputs": [], "source": [ "len(dg_objects[:83])" ] }, { "cell_type": "code", "execution_count": null, "id": "5e9fd2b6", "metadata": {}, "outputs": [], "source": [ "correlation_coef, p_value = spearmanr(np.array(dg_faces[:83]).flatten(), np.array(dg_objects[:83]).flatten())\n", "# correlation_coef = np.corrcoef(a, b)\n", "print(\"Correlation coefficient:\", correlation_coef)\n", "print(\"p-value:\", p_value)" ] }, { "cell_type": "code", "execution_count": null, "id": "a9357f4f", "metadata": {}, "outputs": [], "source": [ "correlation_coef, p_value = spearmanr(np.array(dg_faces[:83]).flatten(), np.array(dg_pareidolia[:83]).flatten())\n", "# correlation_coef = np.corrcoef(a, b)\n", "print(\"Correlation coefficient:\", correlation_coef)\n", "print(\"p-value:\", p_value)" ] }, { "cell_type": "code", "execution_count": null, "id": "f70021f3", "metadata": {}, "outputs": [], "source": [ "correlation_coef, p_value = spearmanr(np.array(dg_pareidolia[:83]).flatten(), np.array(dg_objects[:83]).flatten())\n", "# correlation_coef = np.corrcoef(a, b)\n", "print(\"Correlation coefficient:\", correlation_coef)\n", "print(\"p-value:\", p_value)" ] }, { "cell_type": "code", "execution_count": null, "id": "0df004a4", "metadata": {}, "outputs": [], "source": [ "correlation_coef, p_value = spearmanr(np.array(eg_pareidolia[:83]).flatten(), np.array(eg_objects[:83]).flatten())\n", "# correlation_coef = np.corrcoef(a, b)\n", "print(\"Correlation coefficient:\", correlation_coef)\n", "print(\"p-value:\", p_value)" ] }, { "cell_type": "code", "execution_count": null, "id": "1742ecb1", "metadata": {}, "outputs": [], "source": [ "correlation_coef, p_value = spearmanr(np.array(eg_pareidolia[:83]).flatten(), np.array(eg_faces[:83]).flatten())\n", "# correlation_coef = np.corrcoef(a, b)\n", "print(\"Correlation coefficient:\", correlation_coef)\n", "print(\"p-value:\", p_value)" ] }, { "cell_type": "code", "execution_count": null, "id": "0a02bf88", "metadata": {}, "outputs": [], "source": [ "correlation_coef, p_value = spearmanr(np.array(eg_faces[:83]).flatten(), np.array(eg_objects[:83]).flatten())\n", "# correlation_coef = np.corrcoef(a, b)\n", "print(\"Correlation coefficient:\", correlation_coef)\n", "print(\"p-value:\", p_value)" ] }, { "cell_type": "code", "execution_count": null, "id": "b7ccd13e", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "fdeb2fc5", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "7faa17c1", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "f9af3d41", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "from scipy.stats import spearmanr\n", "\n", "# Generate two arrays with random data\n", "array1 = np.random.rand(100)\n", "array2 = np.random.rand(100)\n", "\n", "# Calculate Spearman's correlation coefficient and p-value\n", "correlation, p_value = spearmanr(array1, array2)\n", "\n", "print(\"Spearman's correlation coefficient:\", correlation)\n", "print(\"p-value:\", p_value)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "f7cd0d61", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "3570f454", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "3a0a92be", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "from scipy.stats import pearsonr\n", "\n", "# define two eye gaze heatmaps\n", "heatmap1 = np.array([[0.2, 0.3, 0.1],\n", " [0.1, 0.4, 0.3],\n", " [0.3, 0.2, 0.1]])\n", "\n", "heatmap2 = np.array([[0.1, 0.2, 0.3],\n", " [0.2, 0.3, 0.2],\n", " [0.3, 0.1, 0.1]])\n", "\n", "# flatten the heatmaps into 1D arrays\n", "flat_heatmap1 = heatmap1.flatten()\n", "flat_heatmap2 = heatmap2.flatten()\n", "\n", "# calculate the Pearson correlation coefficient and p-value\n", "corr, p_value = pearsonr(flat_heatmap1, flat_heatmap2)\n", "\n", "print(\"Correlation coefficient:\", corr)\n", "print(\"p-value:\", p_value)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "98a8e3c1", "metadata": {}, "outputs": [], "source": [ "np.shape(b)" ] }, { "cell_type": "code", "execution_count": null, "id": "55b352bd", "metadata": {}, "outputs": [], "source": [ "np.shape(a)" ] }, { "cell_type": "code", "execution_count": null, "id": "3fe648aa", "metadata": {}, "outputs": [], "source": [ "np.shape(correlation_coef)" ] }, { "cell_type": "code", "execution_count": null, "id": "cd8e091b", "metadata": {}, "outputs": [], "source": [ "plt.imshow(correlation_coef)" ] }, { "cell_type": "code", "execution_count": null, "id": "884bf73a", "metadata": {}, "outputs": [], "source": [ "correlation_coef[83:, :83]" ] }, { "cell_type": "code", "execution_count": null, "id": "4a540fa9", "metadata": { "scrolled": true }, "outputs": [], "source": [ "plt.imshow(correlation_coef[83:, :83])" ] }, { "cell_type": "code", "execution_count": null, "id": "62cadea8", "metadata": {}, "outputs": [], "source": [ "plt.plot(np.diagonal(correlation_coef[100:, :100]), 'o') #faces" ] }, { "cell_type": "code", "execution_count": null, "id": "f05bd895", "metadata": {}, "outputs": [], "source": [ "np.mean(np.diagonal(correlation_coef[100:, :100]))" ] }, { "cell_type": "code", "execution_count": null, "id": "6e227007", "metadata": {}, "outputs": [], "source": [ "plt.plot(np.diagonal(correlation_coef[100:, :100]), 'o') #obj" ] }, { "cell_type": "code", "execution_count": null, "id": "12ed25e7", "metadata": {}, "outputs": [], "source": [ "np.mean(np.diagonal(correlation_coef[86:, :86]))" ] }, { "cell_type": "code", "execution_count": null, "id": "ccf0a569", "metadata": {}, "outputs": [], "source": [ "plt.plot(np.diagonal(correlation_coef[83:, :83]), 'o') #pare" ] }, { "cell_type": "code", "execution_count": null, "id": "923ed911", "metadata": {}, "outputs": [], "source": [ "np.mean(np.diagonal(correlation_coef[83:, :83]))" ] }, { "cell_type": "code", "execution_count": null, "id": "27bac165", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "f751bc29", "metadata": {}, "outputs": [], "source": [ "plt.imshow(y_objects['1153']['a'])" ] }, { "cell_type": "code", "execution_count": null, "id": "0c6a4bb4", "metadata": {}, "outputs": [], "source": [ "y_faces" ] }, { "cell_type": "code", "execution_count": null, "id": "40a93053", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "4a9b5849", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "76eee42b", "metadata": {}, "outputs": [], "source": [ "np.shape(imgs)" ] }, { "cell_type": "code", "execution_count": null, "id": "1cc44e0e", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "1d14b8ad", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "feddeb52", "metadata": {}, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "import numpy as np\n", "from scipy.misc import face\n", "from scipy.ndimage import zoom\n", "from scipy.special import logsumexp\n", "import torch\n", "\n", "import deepgaze_pytorch\n", "\n", "DEVICE = 'cuda'\n", "\n", "# you can use DeepGazeI or DeepGazeIIE\n", "model = deepgaze_pytorch.DeepGazeIII(pretrained=True).to(DEVICE)\n", "\n", "image = face()\n", "\n", "# location of previous scanpath fixations in x and y (pixel coordinates), starting with the initial fixation on the image.\n", "fixation_history_x = np.array([1024//2, 300, 500, 200, 200, 700])\n", "fixation_history_y = np.array([768//2, 300, 100, 300, 100, 500])\n", "\n", "# load precomputed centerbias log density (from MIT1003) over a 1024x1024 image\n", "# you can download the centerbias from https://github.com/matthias-k/DeepGaze/releases/download/v1.0.0/centerbias_mit1003.npy\n", "# alternatively, you can use a uniform centerbias via `centerbias_template = np.zeros((1024, 1024))`.\n", "centerbias_template = np.load('centerbias_mit1003.npy')\n", "# rescale to match image size\n", "centerbias = zoom(centerbias_template, (image.shape[0]/centerbias_template.shape[0], image.shape[1]/centerbias_template.shape[1]), order=0, mode='nearest')\n", "# renormalize log density\n", "centerbias -= logsumexp(centerbias)\n", "\n", "image_tensor = torch.tensor([image.transpose(2, 0, 1)]).to(DEVICE)\n", "centerbias_tensor = torch.tensor([centerbias]).to(DEVICE)\n", "x_hist_tensor = torch.tensor([fixation_history_x[model.included_fixations]]).to(DEVICE)\n", "y_hist_tensor = torch.tensor([fixation_history_x[model.included_fixations]]).to(DEVICE)\n", "\n", "log_density_prediction = model(image_tensor, centerbias_tensor, x_hist_tensor, y_hist_tensor)\n", "\n", "f, axs = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n", "axs[0].imshow(image)\n", "axs[0].plot(fixation_history_x, fixation_history_y, 'o-', color='red')\n", "axs[0].scatter(fixation_history_x[-1], fixation_history_y[-1], 100, color='yellow', zorder=100)\n", "axs[0].set_axis_off()\n", "axs[1].matshow(log_density_prediction.detach().cpu().numpy()[0, 0]) # first image in batch, first (and only) channel\n", "axs[1].plot(fixation_history_x, fixation_history_y, 'o-', color='red')\n", "axs[1].scatter(fixation_history_x[-1], fixation_history_y[-1], 100, color='yellow', zorder=100)\n", "axs[1].set_axis_off()" ] }, { "cell_type": "code", "execution_count": null, "id": "2b512963", "metadata": {}, "outputs": [], "source": [ "model.included_fixations" ] }, { "cell_type": "code", "execution_count": null, "id": "33d6872d", "metadata": {}, "outputs": [], "source": [ "fixation_history_x" ] }, { "cell_type": "code", "execution_count": null, "id": "8bce1d25", "metadata": {}, "outputs": [], "source": [ "fixation_history_x[model.included_fixations]" ] }, { "cell_type": "code", "execution_count": null, "id": "751cb04e", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "b3160caa", "metadata": {}, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "import numpy as np\n", "from scipy.misc import face\n", "from scipy.ndimage import zoom\n", "from scipy.special import logsumexp\n", "import torch\n", "\n", "import deepgaze_pytorch\n", "\n", "DEVICE = 'cuda'\n", "\n", "# you can use DeepGazeI or DeepGazeIIE\n", "model = deepgaze_pytorch.DeepGazeIII(pretrained=True).to(DEVICE)\n", "\n", "#image = face()\n", "\n", "x = {}\n", "\n", "for i in range(len(imgs)):\n", " \n", " image = imgs[i]\n", " \n", " # location of previous scanpath fixations in x and y (pixel coordinates), starting with the initial fixation on the image.\n", " fixation_history_x = np.array([1024//2, 300, 500, 200, 200, 700])\n", " fixation_history_y = np.array([768//2, 300, 100, 300, 100, 500])\n", "\n", " # load precomputed centerbias log density (from MIT1003) over a 1024x1024 image\n", " # you can download the centerbias from https://github.com/matthias-k/DeepGaze/releases/download/v1.0.0/centerbias_mit1003.npy\n", " # alternatively, you can use a uniform centerbias via `centerbias_template = np.zeros((1024, 1024))`.\n", " centerbias_template = np.load('centerbias_mit1003.npy')\n", " # rescale to match image size\n", " centerbias = zoom(centerbias_template, (image.shape[0]/centerbias_template.shape[0], image.shape[1]/centerbias_template.shape[1]), order=0, mode='nearest')\n", " # renormalize log density\n", " centerbias -= logsumexp(centerbias)\n", "\n", " image_tensor = torch.tensor([image.transpose(2, 0, 1)]).to(DEVICE)\n", " centerbias_tensor = torch.tensor([centerbias]).to(DEVICE)\n", " x_hist_tensor = torch.tensor([fixation_history_x[model.included_fixations]]).to(DEVICE)\n", " y_hist_tensor = torch.tensor([fixation_history_x[model.included_fixations]]).to(DEVICE)\n", "\n", " log_density_prediction = model(image_tensor, centerbias_tensor, x_hist_tensor, y_hist_tensor)\n", "\n", " f, axs = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n", " axs[0].imshow(image)\n", " axs[0].plot(fixation_history_x, fixation_history_y, 'o-', color='red')\n", " axs[0].scatter(fixation_history_x[-1], fixation_history_y[-1], 100, color='yellow', zorder=100)\n", " axs[0].set_axis_off()\n", " axs[1].matshow(log_density_prediction.detach().cpu().numpy()[0, 0]) # first image in batch, first (and only) channel\n", " axs[1].plot(fixation_history_x, fixation_history_y, 'o-', color='red')\n", " axs[1].scatter(fixation_history_x[-1], fixation_history_y[-1], 100, color='yellow', zorder=100)\n", " axs[1].set_axis_off()" ] }, { "cell_type": "code", "execution_count": null, "id": "aa2d7d4e", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "274b461a", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "f71d7915", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "6c4adce6", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "from scipy.misc import face\n", "from scipy.ndimage import zoom\n", "from scipy.special import logsumexp\n", "import torch\n", "import matplotlib.pyplot as plt\n", "\n", "import deepgaze_pytorch\n", "\n", "DEVICE = 'cuda'\n", "\n", "# you can use DeepGazeI or DeepGazeIIE\n", "model = deepgaze_pytorch.DeepGazeIIE(pretrained=True).to(DEVICE)\n", "\n", "# image = face()\n", "\n", "x = {}\n", "\n", "for i in range(len(imgs)):\n", " \n", " image = imgs[i]\n", " \n", " # load precomputed centerbias log density (from MIT1003) over a 1024x1024 image\n", " # you can download the centerbias from https://github.com/matthias-k/DeepGaze/releases/download/v1.0.0/centerbias_mit1003.npy\n", " # alternatively, you can use a uniform centerbias via `centerbias_template = np.zeros((1024, 1024))`.\n", " centerbias_template = np.load('centerbias_mit1003.npy')\n", " # rescale to match image size\n", " centerbias = zoom(centerbias_template, (image.shape[0]/centerbias_template.shape[0], image.shape[1]/centerbias_template.shape[1]), order=0, mode='nearest')\n", " # renormalize log density\n", " centerbias -= logsumexp(centerbias)\n", "\n", " image_tensor = torch.tensor([image.transpose(2, 0, 1)]).to(DEVICE)\n", " centerbias_tensor = torch.tensor([centerbias]).to(DEVICE)\n", "\n", " log_density_prediction = model(image_tensor, centerbias_tensor)\n", " \n", " a = log_density_prediction.detach().cpu().numpy()[0, 0]\n", " \n", " x[img_name[i].split('.')[0]] = a\n", " \n", " '''\n", " f, axs = plt.subplots(nrows=1, ncols=2, figsize=(16, 9))\n", " axs[0].imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n", " # axs[0].plot(fixation_history_x, fixation_history_y, 'o-', color='red')\n", " # axs[0].scatter(fixation_history_x[-1], fixation_history_y[-1], 100, color='yellow', zorder=100)\n", " axs[0].set_axis_off()\n", " axs[1].matshow(log_density_prediction.detach().cpu().numpy()[0, 0]) # first image in batch, first (and only) channel\n", " # axs[1].plot(fixation_history_x, fixation_history_y, 'o-', color='red')\n", " # axs[1].scatter(fixation_history_x[-1], fixation_history_y[-1], 100, color='yellow', zorder=100)\n", " axs[1].set_axis_off()\n", " # plt.savefig(os.path.join('DG2_heatmaps', '{0}.jpg'.format(i)))\n", " '''\n", " \n", " #break" ] }, { "cell_type": "code", "execution_count": null, "id": "eca95def", "metadata": {}, "outputs": [], "source": [ "image" ] }, { "cell_type": "code", "execution_count": null, "id": "d69ce384", "metadata": {}, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "import numpy as np\n", "from scipy.misc import face\n", "from scipy.ndimage import zoom\n", "from scipy.special import logsumexp\n", "import torch\n", "\n", "import deepgaze_pytorch\n", "\n", "DEVICE = 'cuda'\n", "\n", "# you can use DeepGazeI or DeepGazeIIE\n", "model = deepgaze_pytorch.DeepGazeI(pretrained=True).to(DEVICE)" ] }, { "cell_type": "code", "execution_count": null, "id": "c8207585", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "b9d406ff", "metadata": { "scrolled": true }, "outputs": [], "source": [ "%%capture captured_output\n", "# Your code here\n", "print(model)" ] }, { "cell_type": "code", "execution_count": null, "id": "984c0e9c", "metadata": { "scrolled": true }, "outputs": [], "source": [ "with open(\"DG1_arch.txt\", \"w\") as f:\n", " f.write(captured_output.stdout)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "6d170109", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.5" } }, "nbformat": 4, "nbformat_minor": 5 }