diff --git a/.gitattributes b/.gitattributes index 283fbfa5f5b59c93d9e4e77879c65a409bbe2afc..b0630cc8529b839158d73672b83e582bff22f03c 100644 --- a/.gitattributes +++ b/.gitattributes @@ -35,3 +35,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text *.mp4 filter=lfs diff=lfs merge=lfs -text *.png filter=lfs diff=lfs merge=lfs -text +thirdparty/Metric3D/media/gifs/demo_1.gif filter=lfs diff=lfs merge=lfs -text +thirdparty/Metric3D/training/kitti_json_files/eigen_train.json filter=lfs diff=lfs merge=lfs -text +*.gif filter=lfs diff=lfs merge=lfs -text +*.jpg filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d66ad568ab667d3ab129bd06bddbdea23f57cda9 --- /dev/null +++ b/.gitignore @@ -0,0 +1,73 @@ +# Project specific data and submodule + +/example/video_0 +/weights + +.vscode/ +**/.DS_Store +data/pretrain/*.pth +data/pretrain/*.pth.tar +data/smpl/SMPL_*.pkl +*.mov +example_video/ +/thirdparty/detection +experiments/ +logs/ +hot3d_*/ +File/ +thirdparty/ZoeDepth +eval_vis_mdslam/ +eval_vis_*/ +pred_vis/ +_DATA.zip +train_ddp_process* +logs* +/*_trainset_export/ +/*.png +/*.zip +/dataset_tars/ +/dataset_untars/ +/datasets/ +/eval_log*/ +*.pth +*.pkl +/dataset*/ +/eval*/ +/thirdparty/aitviewer + +# Byte-compiled / optimized / DLL files +__pycache__/ + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# Jupyter Notebook +.ipynb_checkpoints +*.ipynb + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +vis.mp4 +imgui.ini diff --git a/app.py b/app.py index c1941d9d66bd72a659e49a0eb4bc89c6f6f6cf4c..a581a3d6bf2c9596fb0d148b45f931eb7068451f 100644 --- a/app.py +++ b/app.py @@ -121,7 +121,7 @@ header = (''' Jinglei Zhang1, Jiankang Deng2,
- Chao Ma1 + Chao Ma1, Rolandos Alexandros Potamias2

diff --git a/thirdparty/DROID-SLAM/.gitignore b/thirdparty/DROID-SLAM/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..a180f698f4396a8fd6323c75ffe4b1867eff1d85 --- /dev/null +++ b/thirdparty/DROID-SLAM/.gitignore @@ -0,0 +1,158 @@ +a# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + + + +__pycache__ +build +dist +*.egg-info +*.vscode/ +*.pth +tests +checkpoints +datasets +runs +cache +*.out +*.o +data +figures/*.pdf + + diff --git a/thirdparty/DROID-SLAM/.gitmodules b/thirdparty/DROID-SLAM/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..d47acde92d97b1012ceacb1e37136ad293cec25e --- /dev/null +++ b/thirdparty/DROID-SLAM/.gitmodules @@ -0,0 +1,6 @@ +[submodule "thirdparty/lietorch"] + path = thirdparty/lietorch + url = https://github.com/princeton-vl/lietorch +[submodule "thirdparty/eigen"] + path = thirdparty/eigen + url = https://gitlab.com/libeigen/eigen.git diff --git a/thirdparty/DROID-SLAM/LICENSE b/thirdparty/DROID-SLAM/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..cc1e23bde2b18bbab3569b0dd4d8677ac3babedb --- /dev/null +++ b/thirdparty/DROID-SLAM/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2021, Princeton Vision & Learning Lab +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/thirdparty/DROID-SLAM/README.md b/thirdparty/DROID-SLAM/README.md new file mode 100644 index 0000000000000000000000000000000000000000..dc73a219e51f1c0912fdca4b54b61dcdc80d01ba --- /dev/null +++ b/thirdparty/DROID-SLAM/README.md @@ -0,0 +1,139 @@ +# DROID-SLAM + + + + + +[![IMAGE ALT TEXT HERE](misc/screenshot.png)](https://www.youtube.com/watch?v=GG78CSlSHSA) + + + +[DROID-SLAM: Deep Visual SLAM for Monocular, Stereo, and RGB-D Cameras](https://arxiv.org/abs/2108.10869) +Zachary Teed and Jia Deng + +``` +@article{teed2021droid, + title={{DROID-SLAM: Deep Visual SLAM for Monocular, Stereo, and RGB-D Cameras}}, + author={Teed, Zachary and Deng, Jia}, + journal={Advances in neural information processing systems}, + year={2021} +} +``` + +**Initial Code Release:** This repo currently provides a single GPU implementation of our monocular, stereo, and RGB-D SLAM systems. It currently contains demos, training, and evaluation scripts. + + +## Requirements + +To run the code you will need ... +* **Inference:** Running the demos will require a GPU with at least 11G of memory. + +* **Training:** Training requires a GPU with at least 24G of memory. We train on 4 x RTX-3090 GPUs. + +## Getting Started +1. Clone the repo using the `--recursive` flag +```Bash +git clone --recursive https://github.com/princeton-vl/DROID-SLAM.git +``` + +2. Creating a new anaconda environment using the provided .yaml file. Use `environment_novis.yaml` to if you do not want to use the visualization +```Bash +conda env create -f environment.yaml +pip install evo --upgrade --no-binary evo +pip install gdown +``` + +3. Compile the extensions (takes about 10 minutes) +```Bash +python setup.py install +``` + + +## Demos + +1. Download the model from google drive: [droid.pth](https://drive.google.com/file/d/1PpqVt1H4maBa_GbPJp4NwxRsd9jk-elh/view?usp=sharing) + +2. Download some sample videos using the provided script. +```Bash +./tools/download_sample_data.sh +``` + +Run the demo on any of the samples (all demos can be run on a GPU with 11G of memory). While running, press the "s" key to increase the filtering threshold (= more points) and "a" to decrease the filtering threshold (= fewer points). To save the reconstruction with full resolution depth maps use the `--reconstruction_path` flag. + + +```Python +python demo.py --imagedir=data/abandonedfactory --calib=calib/tartan.txt --stride=2 +``` + +```Python +python demo.py --imagedir=data/sfm_bench/rgb --calib=calib/eth.txt +``` + +```Python +python demo.py --imagedir=data/Barn --calib=calib/barn.txt --stride=1 --backend_nms=4 +``` + +```Python +python demo.py --imagedir=data/mav0/cam0/data --calib=calib/euroc.txt --t0=150 +``` + +```Python +python demo.py --imagedir=data/rgbd_dataset_freiburg3_cabinet/rgb --calib=calib/tum3.txt +``` + + +**Running on your own data:** All you need is a calibration file. Calibration files are in the form +``` +fx fy cx cy [k1 k2 p1 p2 [ k3 [ k4 k5 k6 ]]] +``` +with parameters in brackets optional. + +## Evaluation +We provide evaluation scripts for TartanAir, EuRoC, and TUM. EuRoC and TUM can be run on a 1080Ti. The TartanAir and ETH will require 24G of memory. + +### TartanAir (Mono + Stereo) +Download the [TartanAir](https://theairlab.org/tartanair-dataset/) dataset using the script `thirdparty/tartanair_tools/download_training.py` and put them in `datasets/TartanAir` +```Bash +./tools/validate_tartanair.sh --plot_curve # monocular eval +./tools/validate_tartanair.sh --plot_curve --stereo # stereo eval +``` + +### EuRoC (Mono + Stereo) +Download the [EuRoC](https://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets) sequences (ASL format) and put them in `datasets/EuRoC` +```Bash +./tools/evaluate_euroc.sh # monocular eval +./tools/evaluate_euroc.sh --stereo # stereo eval +``` + +### TUM-RGBD (Mono) +Download the fr1 sequences from [TUM-RGBD](https://vision.in.tum.de/data/datasets/rgbd-dataset/download) and put them in `datasets/TUM-RGBD` +```Bash +./tools/evaluate_tum.sh # monocular eval +``` + +### ETH3D (RGB-D) +Download the [ETH3D](https://www.eth3d.net/slam_datasets) dataset +```Bash +./tools/evaluate_eth3d.sh # RGB-D eval +``` + +## Training + +First download the TartanAir dataset. The download script can be found in `thirdparty/tartanair_tools/download_training.py`. You will only need the `rgb` and `depth` data. + +``` +python download_training.py --rgb --depth +``` + +You can then run the training script. We use 4x3090 RTX GPUs for training which takes approximatly 1 week. If you use a different number of GPUs, adjust the learning rate accordingly. + +**Note:** On the first training run, covisibility is computed between all pairs of frames. This can take several hours, but the results are cached so that future training runs will start immediately. + + +``` +python train.py --datapath= --gpus=4 --lr=0.00025 +``` + + +## Acknowledgements +Data from [TartanAir](https://theairlab.org/tartanair-dataset/) was used to train our model. We additionally use evaluation tools from [evo](https://github.com/MichaelGrupp/evo) and [tartanair_tools](https://github.com/castacks/tartanair_tools). diff --git a/thirdparty/DROID-SLAM/demo.py b/thirdparty/DROID-SLAM/demo.py new file mode 100644 index 0000000000000000000000000000000000000000..4e059ac8f5b5eb05bcb594ef82cc1bb18b9fc4bc --- /dev/null +++ b/thirdparty/DROID-SLAM/demo.py @@ -0,0 +1,135 @@ +import sys +sys.path.append('droid_slam') + +from tqdm import tqdm +import numpy as np +import torch +import lietorch +import cv2 +import os +import glob +import time +import argparse + +from torch.multiprocessing import Process +from droid import Droid + +from pycocotools import mask as masktool +import torch.nn.functional as F + + +def show_image(image): + image = image.permute(1, 2, 0).cpu().numpy() + cv2.imshow('image', image / 255.0) + cv2.waitKey(1) + +def image_stream(imagedir, calib, stride): + """ image generator """ + # calib = np.loadtxt(calib, delimiter=" ") + fx, fy, cx, cy = calib[:4] + + K = np.eye(3) + K[0,0] = fx + K[0,2] = cx + K[1,1] = fy + K[1,2] = cy + + image_list = sorted(glob.glob(f'{imagedir}/*.jpg')) + image_list = image_list[::stride] + + for t, imfile in enumerate(image_list): + image = cv2.imread(imfile) + if len(calib) > 4: + image = cv2.undistort(image, K, calib[4:]) + + h0, w0, _ = image.shape + h1 = int(h0 * np.sqrt((384 * 512) / (h0 * w0))) + w1 = int(w0 * np.sqrt((384 * 512) / (h0 * w0))) + + image = cv2.resize(image, (w1, h1)) + image = image[:h1-h1%8, :w1-w1%8] + image = torch.as_tensor(image).permute(2, 0, 1) + + intrinsics = torch.as_tensor([fx, fy, cx, cy]) + intrinsics[0::2] *= (w1 / w0) + intrinsics[1::2] *= (h1 / h0) + + yield t, image[None], intrinsics + + +def save_reconstruction(droid, reconstruction_path): + + from pathlib import Path + import random + import string + + t = droid.video.counter.value + tstamps = droid.video.tstamp[:t].cpu().numpy() + images = droid.video.images[:t].cpu().numpy() + disps = droid.video.disps_up[:t].cpu().numpy() + poses = droid.video.poses[:t].cpu().numpy() + intrinsics = droid.video.intrinsics[:t].cpu().numpy() + + Path("reconstructions/{}".format(reconstruction_path)).mkdir(parents=True, exist_ok=True) + np.save("reconstructions/{}/tstamps.npy".format(reconstruction_path), tstamps) + np.save("reconstructions/{}/images.npy".format(reconstruction_path), images) + np.save("reconstructions/{}/disps.npy".format(reconstruction_path), disps) + np.save("reconstructions/{}/poses.npy".format(reconstruction_path), poses) + np.save("reconstructions/{}/intrinsics.npy".format(reconstruction_path), intrinsics) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--imagedir", type=str, help="path to image directory") + parser.add_argument("--calib", type=str, help="path to calibration file") + parser.add_argument("--t0", default=0, type=int, help="starting frame") + parser.add_argument("--stride", default=3, type=int, help="frame stride") + + parser.add_argument("--weights", default="droid.pth") + parser.add_argument("--buffer", type=int, default=512) + parser.add_argument("--image_size", default=[240, 320]) + parser.add_argument("--disable_vis", action="store_true") + + parser.add_argument("--beta", type=float, default=0.3, help="weight for translation / rotation components of flow") + parser.add_argument("--filter_thresh", type=float, default=2.4, help="how much motion before considering new keyframe") + parser.add_argument("--warmup", type=int, default=8, help="number of warmup frames") + parser.add_argument("--keyframe_thresh", type=float, default=4.0, help="threshold to create a new keyframe") + parser.add_argument("--frontend_thresh", type=float, default=16.0, help="add edges between frames whithin this distance") + parser.add_argument("--frontend_window", type=int, default=25, help="frontend optimization window") + parser.add_argument("--frontend_radius", type=int, default=2, help="force edges between frames within radius") + parser.add_argument("--frontend_nms", type=int, default=1, help="non-maximal supression of edges") + + parser.add_argument("--backend_thresh", type=float, default=22.0) + parser.add_argument("--backend_radius", type=int, default=2) + parser.add_argument("--backend_nms", type=int, default=3) + parser.add_argument("--upsample", action="store_true") + parser.add_argument("--reconstruction_path", help="path to saved reconstruction") + args = parser.parse_args() + + args.stereo = False + torch.multiprocessing.set_start_method('spawn') + + droid = None + + # need high resolution depths + if args.reconstruction_path is not None: + args.upsample = True + + tstamps = [] + for (t, image, intrinsics) in tqdm(image_stream(args.imagedir, args.calib, args.stride)): + if t < args.t0: + continue + + if not args.disable_vis: + show_image(image[0]) + + if droid is None: + args.image_size = [image.shape[2], image.shape[3]] + droid = Droid(args) + + droid.track(t, image, intrinsics=intrinsics) + + if args.reconstruction_path is not None: + save_reconstruction(droid, args.reconstruction_path) + + traj_est = droid.terminate(image_stream(args.imagedir, args.calib, args.stride)) diff --git a/thirdparty/DROID-SLAM/droid_slam/data_readers/__init__.py b/thirdparty/DROID-SLAM/droid_slam/data_readers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/data_readers/__init__.py @@ -0,0 +1 @@ + diff --git a/thirdparty/DROID-SLAM/droid_slam/data_readers/augmentation.py b/thirdparty/DROID-SLAM/droid_slam/data_readers/augmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..ba5341457c2ca025605f3f2a60821016aba0e8fd --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/data_readers/augmentation.py @@ -0,0 +1,58 @@ +import torch +import torchvision.transforms as transforms +import numpy as np +import torch.nn.functional as F + + +class RGBDAugmentor: + """ perform augmentation on RGB-D video """ + + def __init__(self, crop_size): + self.crop_size = crop_size + self.augcolor = transforms.Compose([ + transforms.ToPILImage(), + transforms.ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.4/3.14), + transforms.RandomGrayscale(p=0.1), + transforms.ToTensor()]) + + self.max_scale = 0.25 + + def spatial_transform(self, images, depths, poses, intrinsics): + """ cropping and resizing """ + ht, wd = images.shape[2:] + + max_scale = self.max_scale + min_scale = np.log2(np.maximum( + (self.crop_size[0] + 1) / float(ht), + (self.crop_size[1] + 1) / float(wd))) + + scale = 2 ** np.random.uniform(min_scale, max_scale) + intrinsics = scale * intrinsics + depths = depths.unsqueeze(dim=1) + + images = F.interpolate(images, scale_factor=scale, mode='bilinear', + align_corners=False, recompute_scale_factor=True) + + depths = F.interpolate(depths, scale_factor=scale, recompute_scale_factor=True) + + # always perform center crop (TODO: try non-center crops) + y0 = (images.shape[2] - self.crop_size[0]) // 2 + x0 = (images.shape[3] - self.crop_size[1]) // 2 + + intrinsics = intrinsics - torch.tensor([0.0, 0.0, x0, y0]) + images = images[:, :, y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + depths = depths[:, :, y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + + depths = depths.squeeze(dim=1) + return images, poses, depths, intrinsics + + def color_transform(self, images): + """ color jittering """ + num, ch, ht, wd = images.shape + images = images.permute(1, 2, 3, 0).reshape(ch, ht, wd*num) + images = 255 * self.augcolor(images[[2,1,0]] / 255.0) + return images[[2,1,0]].reshape(ch, ht, wd, num).permute(3,0,1,2).contiguous() + + def __call__(self, images, poses, depths, intrinsics): + images = self.color_transform(images) + return self.spatial_transform(images, depths, poses, intrinsics) diff --git a/thirdparty/DROID-SLAM/droid_slam/data_readers/base.py b/thirdparty/DROID-SLAM/droid_slam/data_readers/base.py new file mode 100644 index 0000000000000000000000000000000000000000..f676697b8a94ff9f5b5d2b0eaf618c64411bec1d --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/data_readers/base.py @@ -0,0 +1,157 @@ + +import numpy as np +import torch +import torch.utils.data as data +import torch.nn.functional as F + +import csv +import os +import cv2 +import math +import random +import json +import pickle +import os.path as osp + +from .augmentation import RGBDAugmentor +from .rgbd_utils import * + +class RGBDDataset(data.Dataset): + def __init__(self, name, datapath, n_frames=4, crop_size=[384,512], fmin=8.0, fmax=75.0, do_aug=True): + """ Base class for RGBD dataset """ + self.aug = None + self.root = datapath + self.name = name + + self.n_frames = n_frames + self.fmin = fmin # exclude very easy examples + self.fmax = fmax # exclude very hard examples + + if do_aug: + self.aug = RGBDAugmentor(crop_size=crop_size) + + # building dataset is expensive, cache so only needs to be performed once + cur_path = osp.dirname(osp.abspath(__file__)) + if not os.path.isdir(osp.join(cur_path, 'cache')): + os.mkdir(osp.join(cur_path, 'cache')) + + cache_path = osp.join(cur_path, 'cache', '{}.pickle'.format(self.name)) + + if osp.isfile(cache_path): + scene_info = pickle.load(open(cache_path, 'rb'))[0] + else: + scene_info = self._build_dataset() + with open(cache_path, 'wb') as cachefile: + pickle.dump((scene_info,), cachefile) + + self.scene_info = scene_info + self._build_dataset_index() + + def _build_dataset_index(self): + self.dataset_index = [] + for scene in self.scene_info: + if not self.__class__.is_test_scene(scene): + graph = self.scene_info[scene]['graph'] + for i in graph: + if len(graph[i][0]) > self.n_frames: + self.dataset_index.append((scene, i)) + else: + print("Reserving {} for validation".format(scene)) + + @staticmethod + def image_read(image_file): + return cv2.imread(image_file) + + @staticmethod + def depth_read(depth_file): + return np.load(depth_file) + + def build_frame_graph(self, poses, depths, intrinsics, f=16, max_flow=256): + """ compute optical flow distance between all pairs of frames """ + def read_disp(fn): + depth = self.__class__.depth_read(fn)[f//2::f, f//2::f] + depth[depth < 0.01] = np.mean(depth) + return 1.0 / depth + + poses = np.array(poses) + intrinsics = np.array(intrinsics) / f + + disps = np.stack(list(map(read_disp, depths)), 0) + d = f * compute_distance_matrix_flow(poses, disps, intrinsics) + + # uncomment for nice visualization + # import matplotlib.pyplot as plt + # plt.imshow(d) + # plt.show() + + graph = {} + for i in range(d.shape[0]): + j, = np.where(d[i] < max_flow) + graph[i] = (j, d[i,j]) + + return graph + + def __getitem__(self, index): + """ return training video """ + + index = index % len(self.dataset_index) + scene_id, ix = self.dataset_index[index] + + frame_graph = self.scene_info[scene_id]['graph'] + images_list = self.scene_info[scene_id]['images'] + depths_list = self.scene_info[scene_id]['depths'] + poses_list = self.scene_info[scene_id]['poses'] + intrinsics_list = self.scene_info[scene_id]['intrinsics'] + + inds = [ ix ] + while len(inds) < self.n_frames: + # get other frames within flow threshold + k = (frame_graph[ix][1] > self.fmin) & (frame_graph[ix][1] < self.fmax) + frames = frame_graph[ix][0][k] + + # prefer frames forward in time + if np.count_nonzero(frames[frames > ix]): + ix = np.random.choice(frames[frames > ix]) + + elif np.count_nonzero(frames): + ix = np.random.choice(frames) + + inds += [ ix ] + + images, depths, poses, intrinsics = [], [], [], [] + for i in inds: + images.append(self.__class__.image_read(images_list[i])) + depths.append(self.__class__.depth_read(depths_list[i])) + poses.append(poses_list[i]) + intrinsics.append(intrinsics_list[i]) + + images = np.stack(images).astype(np.float32) + depths = np.stack(depths).astype(np.float32) + poses = np.stack(poses).astype(np.float32) + intrinsics = np.stack(intrinsics).astype(np.float32) + + images = torch.from_numpy(images).float() + images = images.permute(0, 3, 1, 2) + + disps = torch.from_numpy(1.0 / depths) + poses = torch.from_numpy(poses) + intrinsics = torch.from_numpy(intrinsics) + + if self.aug is not None: + images, poses, disps, intrinsics = \ + self.aug(images, poses, disps, intrinsics) + + # scale scene + if len(disps[disps>0.01]) > 0: + s = disps[disps>0.01].mean() + disps = disps / s + poses[...,:3] *= s + + return images, poses, disps, intrinsics + + def __len__(self): + return len(self.dataset_index) + + def __imul__(self, x): + self.dataset_index *= x + return self diff --git a/thirdparty/DROID-SLAM/droid_slam/data_readers/factory.py b/thirdparty/DROID-SLAM/droid_slam/data_readers/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..830dca09d712c626c80b734713b1efe19bf7656e --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/data_readers/factory.py @@ -0,0 +1,82 @@ + +import pickle +import os +import os.path as osp + +# RGBD-Dataset +from .tartan import TartanAir + +from .stream import ImageStream +from .stream import StereoStream +from .stream import RGBDStream + +# streaming datasets for inference +from .tartan import TartanAirStream +from .tartan import TartanAirTestStream + +def dataset_factory(dataset_list, **kwargs): + """ create a combined dataset """ + + from torch.utils.data import ConcatDataset + + dataset_map = { 'tartan': (TartanAir, ) } + db_list = [] + for key in dataset_list: + # cache datasets for faster future loading + db = dataset_map[key][0](**kwargs) + + print("Dataset {} has {} images".format(key, len(db))) + db_list.append(db) + + return ConcatDataset(db_list) + + +def create_datastream(dataset_path, **kwargs): + """ create data_loader to stream images 1 by 1 """ + + from torch.utils.data import DataLoader + + if osp.isfile(osp.join(dataset_path, 'calibration.txt')): + db = ETH3DStream(dataset_path, **kwargs) + + elif osp.isdir(osp.join(dataset_path, 'image_left')): + db = TartanAirStream(dataset_path, **kwargs) + + elif osp.isfile(osp.join(dataset_path, 'rgb.txt')): + db = TUMStream(dataset_path, **kwargs) + + elif osp.isdir(osp.join(dataset_path, 'mav0')): + db = EurocStream(dataset_path, **kwargs) + + elif osp.isfile(osp.join(dataset_path, 'calib.txt')): + db = KITTIStream(dataset_path, **kwargs) + + else: + # db = TartanAirStream(dataset_path, **kwargs) + db = TartanAirTestStream(dataset_path, **kwargs) + + stream = DataLoader(db, shuffle=False, batch_size=1, num_workers=4) + return stream + + +def create_imagestream(dataset_path, **kwargs): + """ create data_loader to stream images 1 by 1 """ + from torch.utils.data import DataLoader + + db = ImageStream(dataset_path, **kwargs) + return DataLoader(db, shuffle=False, batch_size=1, num_workers=4) + +def create_stereostream(dataset_path, **kwargs): + """ create data_loader to stream images 1 by 1 """ + from torch.utils.data import DataLoader + + db = StereoStream(dataset_path, **kwargs) + return DataLoader(db, shuffle=False, batch_size=1, num_workers=4) + +def create_rgbdstream(dataset_path, **kwargs): + """ create data_loader to stream images 1 by 1 """ + from torch.utils.data import DataLoader + + db = RGBDStream(dataset_path, **kwargs) + return DataLoader(db, shuffle=False, batch_size=1, num_workers=4) + diff --git a/thirdparty/DROID-SLAM/droid_slam/data_readers/rgbd_utils.py b/thirdparty/DROID-SLAM/droid_slam/data_readers/rgbd_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..678cc043808f7e9c017190c7d54dcdfd941d9228 --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/data_readers/rgbd_utils.py @@ -0,0 +1,190 @@ +import numpy as np +import os.path as osp + +import torch +from lietorch import SE3 + +import geom.projective_ops as pops +from scipy.spatial.transform import Rotation + + +def parse_list(filepath, skiprows=0): + """ read list data """ + data = np.loadtxt(filepath, delimiter=' ', dtype=np.unicode_, skiprows=skiprows) + return data + +def associate_frames(tstamp_image, tstamp_depth, tstamp_pose, max_dt=1.0): + """ pair images, depths, and poses """ + associations = [] + for i, t in enumerate(tstamp_image): + if tstamp_pose is None: + j = np.argmin(np.abs(tstamp_depth - t)) + if (np.abs(tstamp_depth[j] - t) < max_dt): + associations.append((i, j)) + + else: + j = np.argmin(np.abs(tstamp_depth - t)) + k = np.argmin(np.abs(tstamp_pose - t)) + + if (np.abs(tstamp_depth[j] - t) < max_dt) and \ + (np.abs(tstamp_pose[k] - t) < max_dt): + associations.append((i, j, k)) + + return associations + +def loadtum(datapath, frame_rate=-1): + """ read video data in tum-rgbd format """ + if osp.isfile(osp.join(datapath, 'groundtruth.txt')): + pose_list = osp.join(datapath, 'groundtruth.txt') + + elif osp.isfile(osp.join(datapath, 'pose.txt')): + pose_list = osp.join(datapath, 'pose.txt') + + else: + return None, None, None, None + + image_list = osp.join(datapath, 'rgb.txt') + depth_list = osp.join(datapath, 'depth.txt') + + calib_path = osp.join(datapath, 'calibration.txt') + intrinsic = None + if osp.isfile(calib_path): + intrinsic = np.loadtxt(calib_path, delimiter=' ') + intrinsic = intrinsic.astype(np.float64) + + image_data = parse_list(image_list) + depth_data = parse_list(depth_list) + pose_data = parse_list(pose_list, skiprows=1) + pose_vecs = pose_data[:,1:].astype(np.float64) + + tstamp_image = image_data[:,0].astype(np.float64) + tstamp_depth = depth_data[:,0].astype(np.float64) + tstamp_pose = pose_data[:,0].astype(np.float64) + associations = associate_frames(tstamp_image, tstamp_depth, tstamp_pose) + + # print(len(tstamp_image)) + # print(len(associations)) + + indicies = range(len(associations))[::5] + + # indicies = [ 0 ] + # for i in range(1, len(associations)): + # t0 = tstamp_image[associations[indicies[-1]][0]] + # t1 = tstamp_image[associations[i][0]] + # if t1 - t0 > 1.0 / frame_rate: + # indicies += [ i ] + + images, poses, depths, intrinsics, tstamps = [], [], [], [], [] + for ix in indicies: + (i, j, k) = associations[ix] + images += [ osp.join(datapath, image_data[i,1]) ] + depths += [ osp.join(datapath, depth_data[j,1]) ] + poses += [ pose_vecs[k] ] + tstamps += [ tstamp_image[i] ] + + if intrinsic is not None: + intrinsics += [ intrinsic ] + + return images, depths, poses, intrinsics, tstamps + + +def all_pairs_distance_matrix(poses, beta=2.5): + """ compute distance matrix between all pairs of poses """ + poses = np.array(poses, dtype=np.float32) + poses[:,:3] *= beta # scale to balence rot + trans + poses = SE3(torch.from_numpy(poses)) + + r = (poses[:,None].inv() * poses[None,:]).log() + return r.norm(dim=-1).cpu().numpy() + +def pose_matrix_to_quaternion(pose): + """ convert 4x4 pose matrix to (t, q) """ + q = Rotation.from_matrix(pose[:3, :3]).as_quat() + return np.concatenate([pose[:3, 3], q], axis=0) + +def compute_distance_matrix_flow(poses, disps, intrinsics): + """ compute flow magnitude between all pairs of frames """ + if not isinstance(poses, SE3): + poses = torch.from_numpy(poses).float().cuda()[None] + poses = SE3(poses).inv() + + disps = torch.from_numpy(disps).float().cuda()[None] + intrinsics = torch.from_numpy(intrinsics).float().cuda()[None] + + N = poses.shape[1] + + ii, jj = torch.meshgrid(torch.arange(N), torch.arange(N), indexing='ij') + ii = ii.reshape(-1).cuda() + jj = jj.reshape(-1).cuda() + + MAX_FLOW = 100.0 + matrix = np.zeros((N, N), dtype=np.float32) + + s = 2048 + for i in range(0, ii.shape[0], s): + flow1, val1 = pops.induced_flow(poses, disps, intrinsics, ii[i:i+s], jj[i:i+s]) + flow2, val2 = pops.induced_flow(poses, disps, intrinsics, jj[i:i+s], ii[i:i+s]) + + flow = torch.stack([flow1, flow2], dim=2) + val = torch.stack([val1, val2], dim=2) + + mag = flow.norm(dim=-1).clamp(max=MAX_FLOW) + mag = mag.view(mag.shape[1], -1) + val = val.view(val.shape[1], -1) + + mag = (mag * val).mean(-1) / val.mean(-1) + mag[val.mean(-1) < 0.7] = np.inf + + i1 = ii[i:i+s].cpu().numpy() + j1 = jj[i:i+s].cpu().numpy() + matrix[i1, j1] = mag.cpu().numpy() + + return matrix + + +def compute_distance_matrix_flow2(poses, disps, intrinsics, beta=0.4): + """ compute flow magnitude between all pairs of frames """ + # if not isinstance(poses, SE3): + # poses = torch.from_numpy(poses).float().cuda()[None] + # poses = SE3(poses).inv() + + # disps = torch.from_numpy(disps).float().cuda()[None] + # intrinsics = torch.from_numpy(intrinsics).float().cuda()[None] + + N = poses.shape[1] + + ii, jj = torch.meshgrid(torch.arange(N), torch.arange(N), indexing='ij') + ii = ii.reshape(-1) + jj = jj.reshape(-1) + + MAX_FLOW = 128.0 + matrix = np.zeros((N, N), dtype=np.float32) + + s = 2048 + for i in range(0, ii.shape[0], s): + flow1a, val1a = pops.induced_flow(poses, disps, intrinsics, ii[i:i+s], jj[i:i+s], tonly=True) + flow1b, val1b = pops.induced_flow(poses, disps, intrinsics, ii[i:i+s], jj[i:i+s]) + flow2a, val2a = pops.induced_flow(poses, disps, intrinsics, jj[i:i+s], ii[i:i+s], tonly=True) + flow2b, val2b = pops.induced_flow(poses, disps, intrinsics, ii[i:i+s], jj[i:i+s]) + + flow1 = flow1a + beta * flow1b + val1 = val1a * val2b + + flow2 = flow2a + beta * flow2b + val2 = val2a * val2b + + flow = torch.stack([flow1, flow2], dim=2) + val = torch.stack([val1, val2], dim=2) + + mag = flow.norm(dim=-1).clamp(max=MAX_FLOW) + mag = mag.view(mag.shape[1], -1) + val = val.view(val.shape[1], -1) + + mag = (mag * val).mean(-1) / val.mean(-1) + mag[val.mean(-1) < 0.8] = np.inf + + i1 = ii[i:i+s].cpu().numpy() + j1 = jj[i:i+s].cpu().numpy() + matrix[i1, j1] = mag.cpu().numpy() + + return matrix diff --git a/thirdparty/DROID-SLAM/droid_slam/data_readers/stream.py b/thirdparty/DROID-SLAM/droid_slam/data_readers/stream.py new file mode 100644 index 0000000000000000000000000000000000000000..1b17d706f6f7edb39c1ce8c806c09011375e0255 --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/data_readers/stream.py @@ -0,0 +1,234 @@ + +import numpy as np +import torch +import torch.utils.data as data +import torch.nn.functional as F + +import csv +import os +import cv2 +import math +import random +import json +import pickle +import os.path as osp + +from .rgbd_utils import * + +class RGBDStream(data.Dataset): + def __init__(self, datapath, frame_rate=-1, image_size=[384,512], crop_size=[0,0]): + self.datapath = datapath + self.frame_rate = frame_rate + self.image_size = image_size + self.crop_size = crop_size + self._build_dataset_index() + + @staticmethod + def image_read(image_file): + return cv2.imread(image_file) + + @staticmethod + def depth_read(depth_file): + return np.load(depth_file) + + def __len__(self): + return len(self.images) + + def __getitem__(self, index): + """ return training video """ + image = self.__class__.image_read(self.images[index]) + image = torch.from_numpy(image).float() + image = image.permute(2, 0, 1) + + try: + tstamp = self.tstamps[index] + except: + tstamp = index + + pose = torch.from_numpy(self.poses[index]).float() + intrinsic = torch.from_numpy(self.intrinsics[index]).float() + + # resize image + sx = self.image_size[1] / image.shape[2] + sy = self.image_size[0] / image.shape[1] + + image = F.interpolate(image[None], self.image_size, mode='bilinear', align_corners=False)[0] + + fx, fy, cx, cy = intrinsic.unbind(dim=0) + fx, cx = sx * fx, sx * cx + fy, cy = sy * fy, sy * cy + + # crop image + if self.crop_size[0] > 0: + cy = cy - self.crop_size[0] + image = image[:,self.crop_size[0]:-self.crop_size[0],:] + + if self.crop_size[1] > 0: + cx = cx - self.crop_size[1] + image = image[:,:,self.crop_size[1]:-self.crop_size[1]] + + intrinsic = torch.stack([fx, fy, cx, cy]) + + return tstamp, image, pose, intrinsic + + +class ImageStream(data.Dataset): + def __init__(self, datapath, intrinsics, rate=1, image_size=[384,512]): + rgb_list = osp.join(datapath, 'rgb.txt') + if os.path.isfile(rgb_list): + rgb_list = np.loadtxt(rgb_list, delimiter=' ', dtype=np.unicode_) + self.timestamps = rgb_list[:,0].astype(np.float) + self.images = [os.path.join(datapath, x) for x in rgb_list[:,1]] + self.images = self.images[::rate] + self.timestamps = self.timestamps[::rate] + + else: + import glob + self.images = sorted(glob.glob(osp.join(datapath, '*.jpg'))) + sorted(glob.glob(osp.join(datapath, '*.png'))) + self.images = self.images[::rate] + + self.intrinsics = intrinsics + self.image_size = image_size + + def __len__(self): + return len(self.images) + + @staticmethod + def image_read(imfile): + return cv2.imread(imfile) + + def __getitem__(self, index): + """ return training video """ + image = self.__class__.image_read(self.images[index]) + + try: + tstamp = self.timestamps[index] + except: + tstamp = index + + ht0, wd0 = image.shape[:2] + ht1, wd1 = self.image_size + + intrinsics = torch.as_tensor(self.intrinsics) + intrinsics[0] *= wd1 / wd0 + intrinsics[1] *= ht1 / ht0 + intrinsics[2] *= wd1 / wd0 + intrinsics[3] *= ht1 / ht0 + + # resize image + ikwargs = {'mode': 'bilinear', 'align_corners': True} + image = torch.from_numpy(image).float().permute(2, 0, 1) + image = F.interpolate(image[None], self.image_size, **ikwargs)[0] + + return tstamp, image, intrinsics + + + +class StereoStream(data.Dataset): + def __init__(self, datapath, intrinsics, rate=1, image_size=[384,512], + map_left=None, map_right=None, left_root='image_left', right_root='image_right'): + import glob + self.intrinsics = intrinsics + self.image_size = image_size + + imgs = sorted(glob.glob(osp.join(datapath, left_root, '*.png')))[::rate] + self.images_l = [] + self.images_r = [] + self.tstamps = [] + + for img_l in imgs: + img_r = img_l.replace(left_root, right_root) + if os.path.isfile(img_r): + t = np.float(img_l.split('/')[-1].replace('.png', '')) + self.tstamps.append(t) + self.images_l += [ img_l ] + self.images_r += [ img_r ] + + self.map_left = map_left + self.map_right = map_right + + def __len__(self): + return len(self.images_l) + + @staticmethod + def image_read(imfile, imap=None): + image = cv2.imread(imfile) + if imap is not None: + image = cv2.remap(image, imap[0], imap[1], interpolation=cv2.INTER_LINEAR) + return image + + def __getitem__(self, index): + """ return training video """ + tstamp = self.tstamps[index] + image_l = self.__class__.image_read(self.images_l[index], self.map_left) + image_r = self.__class__.image_read(self.images_r[index], self.map_right) + + ht0, wd0 = image_l.shape[:2] + ht1, wd1 = self.image_size + + intrinsics = torch.as_tensor(self.intrinsics) + intrinsics[0] *= wd1 / wd0 + intrinsics[1] *= ht1 / ht0 + intrinsics[2] *= wd1 / wd0 + intrinsics[3] *= ht1 / ht0 + + image_l = torch.from_numpy(image_l).float().permute(2, 0, 1) + image_r = torch.from_numpy(image_r).float().permute(2, 0, 1) + + # resize image + ikwargs = {'mode': 'bilinear', 'align_corners': True} + image_l = F.interpolate(image_l[None], self.image_size, **ikwargs)[0] + image_r = F.interpolate(image_r[None], self.image_size, **ikwargs)[0] + + return tstamp, image_l, image_r, intrinsics + + + +# class RGBDStream(data.Dataset): +# def __init__(self, datapath, intrinsics=None, rate=1, image_size=[384,512]): +# assoc_file = osp.join(datapath, 'associated.txt') +# assoc_list = np.loadtxt(assoc_file, delimiter=' ', dtype=np.unicode_) + +# self.intrinsics = intrinsics +# self.image_size = image_size + +# self.timestamps = assoc_list[:,0].astype(np.float)[::rate] +# self.images = [os.path.join(datapath, x) for x in assoc_list[:,1]][::rate] +# self.depths = [os.path.join(datapath, x) for x in assoc_list[:,3]][::rate] + +# def __len__(self): +# return len(self.images) + +# @staticmethod +# def image_read(imfile): +# return cv2.imread(imfile) + +# @staticmethod +# def depth_read(depth_file): +# depth = cv2.imread(depth_file, cv2.IMREAD_ANYDEPTH) +# return depth.astype(np.float32) / 5000.0 + +# def __getitem__(self, index): +# """ return training video """ +# tstamp = self.timestamps[index] +# image = self.__class__.image_read(self.images[index]) +# depth = self.__class__.depth_read(self.depths[index]) + +# ht0, wd0 = image.shape[:2] +# ht1, wd1 = self.image_size + +# intrinsics = torch.as_tensor(self.intrinsics) +# intrinsics[0] *= wd1 / wd0 +# intrinsics[1] *= ht1 / ht0 +# intrinsics[2] *= wd1 / wd0 +# intrinsics[3] *= ht1 / ht0 + +# # resize image +# ikwargs = {'mode': 'bilinear', 'align_corners': True} +# image = torch.from_numpy(image).float().permute(2, 0, 1) +# image = F.interpolate(image[None], self.image_size, **ikwargs)[0] + +# depth = torch.from_numpy(depth).float()[None,None] +# depth = F.interpolate(depth, self.image_size, mode='nearest').squeeze() + +# return tstamp, image, depth, intrinsics diff --git a/thirdparty/DROID-SLAM/droid_slam/data_readers/tartan.py b/thirdparty/DROID-SLAM/droid_slam/data_readers/tartan.py new file mode 100644 index 0000000000000000000000000000000000000000..49780ca91919e83cae8513dba7f9f9702b65b65a --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/data_readers/tartan.py @@ -0,0 +1,138 @@ + +import numpy as np +import torch +import glob +import cv2 +import os +import os.path as osp + +from lietorch import SE3 +from .base import RGBDDataset +from .stream import RGBDStream + +cur_path = osp.dirname(osp.abspath(__file__)) +test_split = osp.join(cur_path, 'tartan_test.txt') +test_split = open(test_split).read().split() + + +class TartanAir(RGBDDataset): + + # scale depths to balance rot & trans + DEPTH_SCALE = 5.0 + + def __init__(self, mode='training', **kwargs): + self.mode = mode + self.n_frames = 2 + super(TartanAir, self).__init__(name='TartanAir', **kwargs) + + @staticmethod + def is_test_scene(scene): + # print(scene, any(x in scene for x in test_split)) + return any(x in scene for x in test_split) + + def _build_dataset(self): + from tqdm import tqdm + print("Building TartanAir dataset") + + scene_info = {} + scenes = glob.glob(osp.join(self.root, '*/*/*/*')) + for scene in tqdm(sorted(scenes)): + images = sorted(glob.glob(osp.join(scene, 'image_left/*.png'))) + depths = sorted(glob.glob(osp.join(scene, 'depth_left/*.npy'))) + + poses = np.loadtxt(osp.join(scene, 'pose_left.txt'), delimiter=' ') + poses = poses[:, [1, 2, 0, 4, 5, 3, 6]] + poses[:,:3] /= TartanAir.DEPTH_SCALE + intrinsics = [TartanAir.calib_read()] * len(images) + + # graph of co-visible frames based on flow + graph = self.build_frame_graph(poses, depths, intrinsics) + + scene = '/'.join(scene.split('/')) + scene_info[scene] = {'images': images, 'depths': depths, + 'poses': poses, 'intrinsics': intrinsics, 'graph': graph} + + return scene_info + + @staticmethod + def calib_read(): + return np.array([320.0, 320.0, 320.0, 240.0]) + + @staticmethod + def image_read(image_file): + return cv2.imread(image_file) + + @staticmethod + def depth_read(depth_file): + depth = np.load(depth_file) / TartanAir.DEPTH_SCALE + depth[depth==np.nan] = 1.0 + depth[depth==np.inf] = 1.0 + return depth + + +class TartanAirStream(RGBDStream): + def __init__(self, datapath, **kwargs): + super(TartanAirStream, self).__init__(datapath=datapath, **kwargs) + + def _build_dataset_index(self): + """ build list of images, poses, depths, and intrinsics """ + self.root = 'datasets/TartanAir' + + scene = osp.join(self.root, self.datapath) + image_glob = osp.join(scene, 'image_left/*.png') + images = sorted(glob.glob(image_glob)) + + poses = np.loadtxt(osp.join(scene, 'pose_left.txt'), delimiter=' ') + poses = poses[:, [1, 2, 0, 4, 5, 3, 6]] + + poses = SE3(torch.as_tensor(poses)) + poses = poses[[0]].inv() * poses + poses = poses.data.cpu().numpy() + + intrinsic = self.calib_read(self.datapath) + intrinsics = np.tile(intrinsic[None], (len(images), 1)) + + self.images = images[::int(self.frame_rate)] + self.poses = poses[::int(self.frame_rate)] + self.intrinsics = intrinsics[::int(self.frame_rate)] + + @staticmethod + def calib_read(datapath): + return np.array([320.0, 320.0, 320.0, 240.0]) + + @staticmethod + def image_read(image_file): + return cv2.imread(image_file) + + +class TartanAirTestStream(RGBDStream): + def __init__(self, datapath, **kwargs): + super(TartanAirTestStream, self).__init__(datapath=datapath, **kwargs) + + def _build_dataset_index(self): + """ build list of images, poses, depths, and intrinsics """ + self.root = 'datasets/mono' + image_glob = osp.join(self.root, self.datapath, '*.png') + images = sorted(glob.glob(image_glob)) + + poses = np.loadtxt(osp.join(self.root, 'mono_gt', self.datapath + '.txt'), delimiter=' ') + poses = poses[:, [1, 2, 0, 4, 5, 3, 6]] + + poses = SE3(torch.as_tensor(poses)) + poses = poses[[0]].inv() * poses + poses = poses.data.cpu().numpy() + + intrinsic = self.calib_read(self.datapath) + intrinsics = np.tile(intrinsic[None], (len(images), 1)) + + self.images = images[::int(self.frame_rate)] + self.poses = poses[::int(self.frame_rate)] + self.intrinsics = intrinsics[::int(self.frame_rate)] + + @staticmethod + def calib_read(datapath): + return np.array([320.0, 320.0, 320.0, 240.0]) + + @staticmethod + def image_read(image_file): + return cv2.imread(image_file) \ No newline at end of file diff --git a/thirdparty/DROID-SLAM/droid_slam/data_readers/tartan_test.txt b/thirdparty/DROID-SLAM/droid_slam/data_readers/tartan_test.txt new file mode 100644 index 0000000000000000000000000000000000000000..d52d88da87b3b2096624162da3872116eefb6003 --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/data_readers/tartan_test.txt @@ -0,0 +1,32 @@ +abandonedfactory/abandonedfactory/Easy/P011 +abandonedfactory/abandonedfactory/Hard/P011 +abandonedfactory_night/abandonedfactory_night/Easy/P013 +abandonedfactory_night/abandonedfactory_night/Hard/P014 +amusement/amusement/Easy/P008 +amusement/amusement/Hard/P007 +carwelding/carwelding/Easy/P007 +endofworld/endofworld/Easy/P009 +gascola/gascola/Easy/P008 +gascola/gascola/Hard/P009 +hospital/hospital/Easy/P036 +hospital/hospital/Hard/P049 +japanesealley/japanesealley/Easy/P007 +japanesealley/japanesealley/Hard/P005 +neighborhood/neighborhood/Easy/P021 +neighborhood/neighborhood/Hard/P017 +ocean/ocean/Easy/P013 +ocean/ocean/Hard/P009 +office2/office2/Easy/P011 +office2/office2/Hard/P010 +office/office/Hard/P007 +oldtown/oldtown/Easy/P007 +oldtown/oldtown/Hard/P008 +seasidetown/seasidetown/Easy/P009 +seasonsforest/seasonsforest/Easy/P011 +seasonsforest/seasonsforest/Hard/P006 +seasonsforest_winter/seasonsforest_winter/Easy/P009 +seasonsforest_winter/seasonsforest_winter/Hard/P018 +soulcity/soulcity/Easy/P012 +soulcity/soulcity/Hard/P009 +westerndesert/westerndesert/Easy/P013 +westerndesert/westerndesert/Hard/P007 diff --git a/thirdparty/DROID-SLAM/droid_slam/depth_video.py b/thirdparty/DROID-SLAM/droid_slam/depth_video.py new file mode 100644 index 0000000000000000000000000000000000000000..a23e262f6d9404307fde5b134735b8a87f3d3741 --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/depth_video.py @@ -0,0 +1,197 @@ +import numpy as np +import torch +import lietorch +import droid_backends + +from torch.multiprocessing import Process, Queue, Lock, Value +from collections import OrderedDict + +from droid_net import cvx_upsample +import geom.projective_ops as pops + +class DepthVideo: + def __init__(self, image_size=[480, 640], buffer=1024, stereo=False, device="cuda:0"): + + # current keyframe count + self.counter = Value('i', 0) + self.ready = Value('i', 0) + self.ht = ht = image_size[0] + self.wd = wd = image_size[1] + + ### state attributes ### + self.tstamp = torch.zeros(buffer, device="cuda", dtype=torch.float).share_memory_() + self.images = torch.zeros(buffer, 3, ht, wd, device="cuda", dtype=torch.uint8) + self.dirty = torch.zeros(buffer, device="cuda", dtype=torch.bool).share_memory_() + self.red = torch.zeros(buffer, device="cuda", dtype=torch.bool).share_memory_() + self.poses = torch.zeros(buffer, 7, device="cuda", dtype=torch.float).share_memory_() + self.disps = torch.ones(buffer, ht//8, wd//8, device="cuda", dtype=torch.float).share_memory_() + self.disps_sens = torch.zeros(buffer, ht//8, wd//8, device="cuda", dtype=torch.float).share_memory_() + self.disps_up = torch.zeros(buffer, ht, wd, device="cuda", dtype=torch.float).share_memory_() + self.intrinsics = torch.zeros(buffer, 4, device="cuda", dtype=torch.float).share_memory_() + + self.masks = torch.zeros(buffer, ht//8, wd//8, device="cuda", dtype=torch.float).share_memory_() + self.stereo = stereo + c = 1 if not self.stereo else 2 + + ### feature attributes ### + self.fmaps = torch.zeros(buffer, c, 128, ht//8, wd//8, dtype=torch.half, device="cuda").share_memory_() + self.nets = torch.zeros(buffer, 128, ht//8, wd//8, dtype=torch.half, device="cuda").share_memory_() + self.inps = torch.zeros(buffer, 128, ht//8, wd//8, dtype=torch.half, device="cuda").share_memory_() + + # initialize poses to identity transformation + self.poses[:] = torch.as_tensor([0, 0, 0, 0, 0, 0, 1], dtype=torch.float, device="cuda") + + def get_lock(self): + return self.counter.get_lock() + + def __item_setter(self, index, item): + if isinstance(index, int) and index >= self.counter.value: + self.counter.value = index + 1 + + elif isinstance(index, torch.Tensor) and index.max().item() > self.counter.value: + self.counter.value = index.max().item() + 1 + + # self.dirty[index] = True + self.tstamp[index] = item[0] + self.images[index] = item[1] + + if item[2] is not None: + self.poses[index] = item[2] + + if item[3] is not None: + self.disps[index] = item[3] + + if item[4] is not None: + depth = item[4][3::8,3::8] + self.disps_sens[index] = torch.where(depth>0, 1.0/depth, depth) + + if item[5] is not None: + self.intrinsics[index] = item[5] + + if len(item) > 6: + self.fmaps[index] = item[6] + + if len(item) > 7: + self.nets[index] = item[7] + + if len(item) > 8: + self.inps[index] = item[8] + + if len(item) > 9: + self.masks[index] = item[9] + + def __setitem__(self, index, item): + with self.get_lock(): + self.__item_setter(index, item) + + def __getitem__(self, index): + """ index the depth video """ + + with self.get_lock(): + # support negative indexing + if isinstance(index, int) and index < 0: + index = self.counter.value + index + + item = ( + self.poses[index], + self.disps[index], + self.intrinsics[index], + self.fmaps[index], + self.nets[index], + self.inps[index]) + + return item + + def append(self, *item): + with self.get_lock(): + self.__item_setter(self.counter.value, item) + + + ### geometric operations ### + + @staticmethod + def format_indicies(ii, jj): + """ to device, long, {-1} """ + + if not isinstance(ii, torch.Tensor): + ii = torch.as_tensor(ii) + + if not isinstance(jj, torch.Tensor): + jj = torch.as_tensor(jj) + + ii = ii.to(device="cuda", dtype=torch.long).reshape(-1) + jj = jj.to(device="cuda", dtype=torch.long).reshape(-1) + + return ii, jj + + def upsample(self, ix, mask): + """ upsample disparity """ + + disps_up = cvx_upsample(self.disps[ix].unsqueeze(-1), mask) + self.disps_up[ix] = disps_up.squeeze() + + def normalize(self): + """ normalize depth and poses """ + + with self.get_lock(): + s = self.disps[:self.counter.value].mean() + self.disps[:self.counter.value] /= s + self.poses[:self.counter.value,:3] *= s + self.dirty[:self.counter.value] = True + + + def reproject(self, ii, jj): + """ project points from ii -> jj """ + ii, jj = DepthVideo.format_indicies(ii, jj) + Gs = lietorch.SE3(self.poses[None]) + + coords, valid_mask = \ + pops.projective_transform(Gs, self.disps[None], self.intrinsics[None], ii, jj) + + return coords, valid_mask + + def distance(self, ii=None, jj=None, beta=0.3, bidirectional=True): + """ frame distance metric """ + + return_matrix = False + if ii is None: + return_matrix = True + N = self.counter.value + ii, jj = torch.meshgrid(torch.arange(N), torch.arange(N), indexing='ij') + + ii, jj = DepthVideo.format_indicies(ii, jj) + + if bidirectional: + + poses = self.poses[:self.counter.value].clone() + + d1 = droid_backends.frame_distance( + poses, self.disps, self.intrinsics[0], ii, jj, beta) + + d2 = droid_backends.frame_distance( + poses, self.disps, self.intrinsics[0], jj, ii, beta) + + d = .5 * (d1 + d2) + + else: + d = droid_backends.frame_distance( + self.poses, self.disps, self.intrinsics[0], ii, jj, beta) + + if return_matrix: + return d.reshape(N, N) + + return d + + def ba(self, target, weight, eta, ii, jj, t0=1, t1=None, itrs=2, lm=1e-4, ep=0.1, motion_only=False): + """ dense bundle adjustment (DBA) """ + + with self.get_lock(): + + # [t0, t1] window of bundle adjustment optimization + if t1 is None: + t1 = max(ii.max().item(), jj.max().item()) + 1 + + droid_backends.ba(self.poses, self.disps, self.intrinsics[0], self.disps_sens, + target, weight, eta, ii, jj, t0, t1, itrs, lm, ep, motion_only) + + self.disps.clamp_(min=0.001) diff --git a/thirdparty/DROID-SLAM/droid_slam/droid.py b/thirdparty/DROID-SLAM/droid_slam/droid.py new file mode 100644 index 0000000000000000000000000000000000000000..827e13e03ed31435290867ad061f5459766ae85f --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/droid.py @@ -0,0 +1,102 @@ +import torch +import lietorch +import numpy as np + +from droid_net import DroidNet +from depth_video import DepthVideo +from motion_filter import MotionFilter +from droid_frontend import DroidFrontend +from droid_backend import DroidBackend +from trajectory_filler import PoseTrajectoryFiller + +from collections import OrderedDict +from torch.multiprocessing import Process + + +class Droid: + def __init__(self, args): + super(Droid, self).__init__() + self.load_weights(args.weights) + self.args = args + self.disable_vis = args.disable_vis + + # store images, depth, poses, intrinsics (shared between processes) + self.video = DepthVideo(args.image_size, args.buffer, stereo=args.stereo) + + # filter incoming frames so that there is enough motion + self.filterx = MotionFilter(self.net, self.video, thresh=args.filter_thresh) + + # frontend process + self.frontend = DroidFrontend(self.net, self.video, self.args) + + # backend process + self.backend = DroidBackend(self.net, self.video, self.args) + + # visualizer + if not self.disable_vis: + # from visualization import droid_visualization + from vis_headless import droid_visualization + print('Using headless ...') + self.visualizer = Process(target=droid_visualization, args=(self.video, '.')) + self.visualizer.start() + + # post processor - fill in poses for non-keyframes + self.traj_filler = PoseTrajectoryFiller(self.net, self.video) + + + def load_weights(self, weights): + """ load trained model weights """ + + self.net = DroidNet() + state_dict = OrderedDict([ + (k.replace("module.", ""), v) for (k, v) in torch.load(weights).items()]) + + state_dict["update.weight.2.weight"] = state_dict["update.weight.2.weight"][:2] + state_dict["update.weight.2.bias"] = state_dict["update.weight.2.bias"][:2] + state_dict["update.delta.2.weight"] = state_dict["update.delta.2.weight"][:2] + state_dict["update.delta.2.bias"] = state_dict["update.delta.2.bias"][:2] + + self.net.load_state_dict(state_dict) + self.net.to("cuda:0").eval() + + def track(self, tstamp, image, depth=None, intrinsics=None, mask=None): + """ main thread - update map """ + + with torch.no_grad(): + # check there is enough motion + self.filterx.track(tstamp, image, depth, intrinsics, mask) + + # local bundle adjustment + self.frontend() + + # global bundle adjustment + # self.backend() + + def terminate(self, stream=None, backend=True): + """ terminate the visualization process, return poses [t, q] """ + + del self.frontend + + if backend: + torch.cuda.empty_cache() + # print("#" * 32) + self.backend(7) + + torch.cuda.empty_cache() + # print("#" * 32) + self.backend(12) + + camera_trajectory = self.traj_filler(stream) + return camera_trajectory.inv().data.cpu().numpy() + + def compute_error(self): + """ compute slam reprojection error """ + + del self.frontend + + torch.cuda.empty_cache() + self.backend(12) + + return self.backend.errors[-1] + + diff --git a/thirdparty/DROID-SLAM/droid_slam/droid_backend.py b/thirdparty/DROID-SLAM/droid_slam/droid_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..49e9fe0b10c6334c8773200dd529c975c8d2913d --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/droid_backend.py @@ -0,0 +1,52 @@ +import torch +import lietorch +import numpy as np + +from lietorch import SE3 +from factor_graph import FactorGraph + + +class DroidBackend: + def __init__(self, net, video, args): + self.video = video + self.update_op = net.update + + # global optimization window + self.t0 = 0 + self.t1 = 0 + + self.upsample = args.upsample + self.beta = args.beta + self.backend_thresh = args.backend_thresh + self.backend_radius = args.backend_radius + self.backend_nms = args.backend_nms + self.errors = [] + + @torch.no_grad() + def __call__(self, steps=12): + """ main update """ + + t = self.video.counter.value + if not self.video.stereo and not torch.any(self.video.disps_sens): + self.video.normalize() + + graph = FactorGraph(self.video, self.update_op, corr_impl="alt", max_factors=16*t, upsample=self.upsample) + + graph.add_proximity_factors(rad=self.backend_radius, + nms=self.backend_nms, + thresh=self.backend_thresh, + beta=self.beta) + + graph.update_lowmem(steps=steps) + self.errors.append(self.cal_err(graph)) + graph.clear_edges() + self.video.dirty[:t] = True + + return + + def cal_err(self, graph): + coord, _ = graph.video.reproject(graph.ii, graph.jj) + diff = graph.target - coord + err = diff.norm(dim=-1).mean().item() + return err + diff --git a/thirdparty/DROID-SLAM/droid_slam/droid_frontend.py b/thirdparty/DROID-SLAM/droid_slam/droid_frontend.py new file mode 100644 index 0000000000000000000000000000000000000000..5d2c56d616486dc35e41356c85aba687cd0acd4a --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/droid_frontend.py @@ -0,0 +1,119 @@ +import torch +import lietorch +import numpy as np + +from lietorch import SE3 +from factor_graph import FactorGraph + + +class DroidFrontend: + def __init__(self, net, video, args): + self.video = video + self.update_op = net.update + self.graph = FactorGraph(video, net.update, max_factors=48, upsample=args.upsample) + + # local optimization window + self.t0 = 0 + self.t1 = 0 + + # frontent variables + self.is_initialized = False + self.count = 0 + + self.max_age = 25 + self.iters1 = 4 + self.iters2 = 2 + + self.warmup = args.warmup + self.beta = args.beta + self.frontend_nms = args.frontend_nms + self.keyframe_thresh = args.keyframe_thresh + self.frontend_window = args.frontend_window + self.frontend_thresh = args.frontend_thresh + self.frontend_radius = args.frontend_radius + + def __update(self): + """ add edges, perform update """ + + self.count += 1 + self.t1 += 1 + + if self.graph.corr is not None: + self.graph.rm_factors(self.graph.age > self.max_age, store=True) + + self.graph.add_proximity_factors(self.t1-5, max(self.t1-self.frontend_window, 0), + rad=self.frontend_radius, nms=self.frontend_nms, thresh=self.frontend_thresh, beta=self.beta, remove=True) + + self.video.disps[self.t1-1] = torch.where(self.video.disps_sens[self.t1-1] > 0, + self.video.disps_sens[self.t1-1], self.video.disps[self.t1-1]) + + for itr in range(self.iters1): + self.graph.update(None, None, use_inactive=True) + + # set initial pose for next frame + poses = SE3(self.video.poses) + d = self.video.distance([self.t1-3], [self.t1-2], beta=self.beta, bidirectional=True) + + if d.item() < self.keyframe_thresh: + self.graph.rm_keyframe(self.t1 - 2) + + with self.video.get_lock(): + self.video.counter.value -= 1 + self.t1 -= 1 + + else: + for itr in range(self.iters2): + self.graph.update(None, None, use_inactive=True) + + # set pose for next itration + self.video.poses[self.t1] = self.video.poses[self.t1-1] + self.video.disps[self.t1] = self.video.disps[self.t1-1].mean() + + # update visualization + self.video.dirty[self.graph.ii.min():self.t1] = True + + def __initialize(self): + """ initialize the SLAM system """ + + self.t0 = 0 + self.t1 = self.video.counter.value + + self.graph.add_neighborhood_factors(self.t0, self.t1, r=3) + + for itr in range(8): + self.graph.update(1, use_inactive=True) + + self.graph.add_proximity_factors(0, 0, rad=2, nms=2, thresh=self.frontend_thresh, remove=False) + + for itr in range(8): + self.graph.update(1, use_inactive=True) + + + # self.video.normalize() + self.video.poses[self.t1] = self.video.poses[self.t1-1].clone() + self.video.disps[self.t1] = self.video.disps[self.t1-4:self.t1].mean() + + # initialization complete + self.is_initialized = True + self.last_pose = self.video.poses[self.t1-1].clone() + self.last_disp = self.video.disps[self.t1-1].clone() + self.last_time = self.video.tstamp[self.t1-1].clone() + + with self.video.get_lock(): + self.video.ready.value = 1 + self.video.dirty[:self.t1] = True + + self.graph.rm_factors(self.graph.ii < self.warmup-4, store=True) + + def __call__(self): + """ main update """ + + # do initialization + if not self.is_initialized and self.video.counter.value == self.warmup: + self.__initialize() + + # do update + elif self.is_initialized and self.t1 < self.video.counter.value: + self.__update() + + diff --git a/thirdparty/DROID-SLAM/droid_slam/droid_net.py b/thirdparty/DROID-SLAM/droid_slam/droid_net.py new file mode 100644 index 0000000000000000000000000000000000000000..2765ecddf5674f622cb9cbfa02043102e2366fb9 --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/droid_net.py @@ -0,0 +1,226 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from collections import OrderedDict + +from modules.extractor import BasicEncoder +from modules.corr import CorrBlock +from modules.gru import ConvGRU +from modules.clipping import GradientClip + +from lietorch import SE3 +from geom.ba import BA + +import geom.projective_ops as pops +from geom.graph_utils import graph_to_edge_list, keyframe_indicies + +from torch_scatter import scatter_mean + + +def cvx_upsample(data, mask): + """ upsample pixel-wise transformation field """ + batch, ht, wd, dim = data.shape + data = data.permute(0, 3, 1, 2) + mask = mask.view(batch, 1, 9, 8, 8, ht, wd) + mask = torch.softmax(mask, dim=2) + + up_data = F.unfold(data, [3,3], padding=1) + up_data = up_data.view(batch, dim, 9, 1, 1, ht, wd) + + up_data = torch.sum(mask * up_data, dim=2) + up_data = up_data.permute(0, 4, 2, 5, 3, 1) + up_data = up_data.reshape(batch, 8*ht, 8*wd, dim) + + return up_data + +def upsample_disp(disp, mask): + batch, num, ht, wd = disp.shape + disp = disp.view(batch*num, ht, wd, 1) + mask = mask.view(batch*num, -1, ht, wd) + return cvx_upsample(disp, mask).view(batch, num, 8*ht, 8*wd) + + +class GraphAgg(nn.Module): + def __init__(self): + super(GraphAgg, self).__init__() + self.conv1 = nn.Conv2d(128, 128, 3, padding=1) + self.conv2 = nn.Conv2d(128, 128, 3, padding=1) + self.relu = nn.ReLU(inplace=True) + + self.eta = nn.Sequential( + nn.Conv2d(128, 1, 3, padding=1), + GradientClip(), + nn.Softplus()) + + self.upmask = nn.Sequential( + nn.Conv2d(128, 8*8*9, 1, padding=0)) + + def forward(self, net, ii): + batch, num, ch, ht, wd = net.shape + net = net.view(batch*num, ch, ht, wd) + + _, ix = torch.unique(ii, return_inverse=True) + net = self.relu(self.conv1(net)) + + net = net.view(batch, num, 128, ht, wd) + net = scatter_mean(net, ix, dim=1) + net = net.view(-1, 128, ht, wd) + + net = self.relu(self.conv2(net)) + + eta = self.eta(net).view(batch, -1, ht, wd) + upmask = self.upmask(net).view(batch, -1, 8*8*9, ht, wd) + + return .01 * eta, upmask + + +class UpdateModule(nn.Module): + def __init__(self): + super(UpdateModule, self).__init__() + cor_planes = 4 * (2*3 + 1)**2 + + self.corr_encoder = nn.Sequential( + nn.Conv2d(cor_planes, 128, 1, padding=0), + nn.ReLU(inplace=True), + nn.Conv2d(128, 128, 3, padding=1), + nn.ReLU(inplace=True)) + + self.flow_encoder = nn.Sequential( + nn.Conv2d(4, 128, 7, padding=3), + nn.ReLU(inplace=True), + nn.Conv2d(128, 64, 3, padding=1), + nn.ReLU(inplace=True)) + + self.weight = nn.Sequential( + nn.Conv2d(128, 128, 3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(128, 2, 3, padding=1), + GradientClip(), + nn.Sigmoid()) + + self.delta = nn.Sequential( + nn.Conv2d(128, 128, 3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(128, 2, 3, padding=1), + GradientClip()) + + self.gru = ConvGRU(128, 128+128+64) + self.agg = GraphAgg() + + def forward(self, net, inp, corr, flow=None, ii=None, jj=None, mask=None): + """ RaftSLAM update operator """ + + batch, num, ch, ht, wd = net.shape + + if flow is None: + flow = torch.zeros(batch, num, 4, ht, wd, device=net.device) + + output_dim = (batch, num, -1, ht, wd) + net = net.view(batch*num, -1, ht, wd) + inp = inp.view(batch*num, -1, ht, wd) + corr = corr.view(batch*num, -1, ht, wd) + flow = flow.view(batch*num, -1, ht, wd) + + corr = self.corr_encoder(corr) + flow = self.flow_encoder(flow) + net = self.gru(net, inp, corr, flow) + + ### update variables ### + delta = self.delta(net).view(*output_dim) + weight = self.weight(net).view(*output_dim) + + # print('Update') + # print('delta:', delta.shape) # [1,1,2,64,48] + # print('weight:', weight.shape) # [1,1,2,64,48] + + delta = delta.permute(0,1,3,4,2)[...,:2].contiguous() + weight = weight.permute(0,1,3,4,2)[...,:2].contiguous() + + net = net.view(*output_dim) + + if ii is not None: + eta, upmask = self.agg(net, ii.to(net.device)) + return net, delta, weight, eta, upmask + + else: + return net, delta, weight + + +class DroidNet(nn.Module): + def __init__(self): + super(DroidNet, self).__init__() + self.fnet = BasicEncoder(output_dim=128, norm_fn='instance') + self.cnet = BasicEncoder(output_dim=256, norm_fn='none') + self.update = UpdateModule() + + + def extract_features(self, images): + """ run feeature extraction networks """ + + # normalize images + images = images[:, :, [2,1,0]] / 255.0 + mean = torch.as_tensor([0.485, 0.456, 0.406], device=images.device) + std = torch.as_tensor([0.229, 0.224, 0.225], device=images.device) + images = images.sub_(mean[:, None, None]).div_(std[:, None, None]) + + fmaps = self.fnet(images) + net = self.cnet(images) + + net, inp = net.split([128,128], dim=2) + net = torch.tanh(net) + inp = torch.relu(inp) + return fmaps, net, inp + + + def forward(self, Gs, images, disps, intrinsics, graph=None, num_steps=12, fixedp=2): + """ Estimates SE3 or Sim3 between pair of frames """ + + u = keyframe_indicies(graph) + ii, jj, kk = graph_to_edge_list(graph) + + ii = ii.to(device=images.device, dtype=torch.long) + jj = jj.to(device=images.device, dtype=torch.long) + + fmaps, net, inp = self.extract_features(images) + net, inp = net[:,ii], inp[:,ii] + corr_fn = CorrBlock(fmaps[:,ii], fmaps[:,jj], num_levels=4, radius=3) + + ht, wd = images.shape[-2:] + coords0 = pops.coords_grid(ht//8, wd//8, device=images.device) + + coords1, _ = pops.projective_transform(Gs, disps, intrinsics, ii, jj) + target = coords1.clone() + + Gs_list, disp_list, residual_list = [], [], [] + for step in range(num_steps): + Gs = Gs.detach() + disps = disps.detach() + coords1 = coords1.detach() + target = target.detach() + + # extract motion features + corr = corr_fn(coords1) + resd = target - coords1 + flow = coords1 - coords0 + + motion = torch.cat([flow, resd], dim=-1) + motion = motion.permute(0,1,4,2,3).clamp(-64.0, 64.0) + + net, delta, weight, eta, upmask = \ + self.update(net, inp, corr, motion, ii, jj) + + target = coords1 + delta + + for i in range(2): + Gs, disps = BA(target, weight, eta, Gs, disps, intrinsics, ii, jj, fixedp=2) + + coords1, valid_mask = pops.projective_transform(Gs, disps, intrinsics, ii, jj) + residual = (target - coords1) + + Gs_list.append(Gs) + disp_list.append(upsample_disp(disps, upmask)) + residual_list.append(valid_mask * residual) + + + return Gs_list, disp_list, residual_list diff --git a/thirdparty/DROID-SLAM/droid_slam/factor_graph.py b/thirdparty/DROID-SLAM/droid_slam/factor_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..af7e7c58c0d915ed26be8fca316b799bb0d5bca4 --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/factor_graph.py @@ -0,0 +1,397 @@ +import torch +import lietorch +import numpy as np + +import matplotlib.pyplot as plt +from lietorch import SE3 +from modules.corr import CorrBlock, AltCorrBlock +import geom.projective_ops as pops +from glob import glob + +class FactorGraph: + def __init__(self, video, update_op, device="cuda:0", corr_impl="volume", max_factors=-1, upsample=False): + self.video = video + self.update_op = update_op + self.device = device + self.max_factors = max_factors + self.corr_impl = corr_impl + self.upsample = upsample + + # operator at 1/8 resolution + self.ht = ht = video.ht // 8 + self.wd = wd = video.wd // 8 + + self.coords0 = pops.coords_grid(ht, wd, device=device) + self.ii = torch.as_tensor([], dtype=torch.long, device=device) + self.jj = torch.as_tensor([], dtype=torch.long, device=device) + self.age = torch.as_tensor([], dtype=torch.long, device=device) + + self.corr, self.net, self.inp = None, None, None + self.damping = 1e-6 * torch.ones_like(self.video.disps) + + self.target = torch.zeros([1, 0, ht, wd, 2], device=device, dtype=torch.float) + self.weight = torch.zeros([1, 0, ht, wd, 2], device=device, dtype=torch.float) + + # inactive factors + self.ii_inac = torch.as_tensor([], dtype=torch.long, device=device) + self.jj_inac = torch.as_tensor([], dtype=torch.long, device=device) + self.ii_bad = torch.as_tensor([], dtype=torch.long, device=device) + self.jj_bad = torch.as_tensor([], dtype=torch.long, device=device) + + self.target_inac = torch.zeros([1, 0, ht, wd, 2], device=device, dtype=torch.float) + self.weight_inac = torch.zeros([1, 0, ht, wd, 2], device=device, dtype=torch.float) + + def __filter_repeated_edges(self, ii, jj): + """ remove duplicate edges """ + + keep = torch.zeros(ii.shape[0], dtype=torch.bool, device=ii.device) + eset = set( + [(i.item(), j.item()) for i, j in zip(self.ii, self.jj)] + + [(i.item(), j.item()) for i, j in zip(self.ii_inac, self.jj_inac)]) + + for k, (i, j) in enumerate(zip(ii, jj)): + keep[k] = (i.item(), j.item()) not in eset + + return ii[keep], jj[keep] + + def print_edges(self): + ii = self.ii.cpu().numpy() + jj = self.jj.cpu().numpy() + + ix = np.argsort(ii) + ii = ii[ix] + jj = jj[ix] + + w = torch.mean(self.weight, dim=[0,2,3,4]).cpu().numpy() + w = w[ix] + for e in zip(ii, jj, w): + print(e) + print() + + def filter_edges(self): + """ remove bad edges """ + conf = torch.mean(self.weight, dim=[0,2,3,4]) + mask = (torch.abs(self.ii-self.jj) > 2) & (conf < 0.001) + + self.ii_bad = torch.cat([self.ii_bad, self.ii[mask]]) + self.jj_bad = torch.cat([self.jj_bad, self.jj[mask]]) + self.rm_factors(mask, store=False) + + def clear_edges(self): + self.rm_factors(self.ii >= 0) + self.net = None + self.inp = None + + @torch.cuda.amp.autocast(enabled=True) + def add_factors(self, ii, jj, remove=False): + """ add edges to factor graph """ + + if not isinstance(ii, torch.Tensor): + ii = torch.as_tensor(ii, dtype=torch.long, device=self.device) + + if not isinstance(jj, torch.Tensor): + jj = torch.as_tensor(jj, dtype=torch.long, device=self.device) + + # remove duplicate edges + ii, jj = self.__filter_repeated_edges(ii, jj) + + + if ii.shape[0] == 0: + return + + # place limit on number of factors + if self.max_factors > 0 and self.ii.shape[0] + ii.shape[0] > self.max_factors \ + and self.corr is not None and remove: + + ix = torch.arange(len(self.age))[torch.argsort(self.age).cpu()] + self.rm_factors(ix >= self.max_factors - ii.shape[0], store=True) + + net = self.video.nets[ii].to(self.device).unsqueeze(0) + + # correlation volume for new edges + if self.corr_impl == "volume": + c = (ii == jj).long() + fmap1 = self.video.fmaps[ii,0].to(self.device).unsqueeze(0) + fmap2 = self.video.fmaps[jj,c].to(self.device).unsqueeze(0) + corr = CorrBlock(fmap1, fmap2) + self.corr = corr if self.corr is None else self.corr.cat(corr) + + inp = self.video.inps[ii].to(self.device).unsqueeze(0) + self.inp = inp if self.inp is None else torch.cat([self.inp, inp], 1) + + with torch.cuda.amp.autocast(enabled=False): + target, _ = self.video.reproject(ii, jj) + weight = torch.zeros_like(target) + + self.ii = torch.cat([self.ii, ii], 0) + self.jj = torch.cat([self.jj, jj], 0) + self.age = torch.cat([self.age, torch.zeros_like(ii)], 0) + + # reprojection factors + self.net = net if self.net is None else torch.cat([self.net, net], 1) + + self.target = torch.cat([self.target, target], 1) + self.weight = torch.cat([self.weight, weight], 1) + + @torch.cuda.amp.autocast(enabled=True) + def rm_factors(self, mask, store=False): + """ drop edges from factor graph """ + + # store estimated factors + if store: + self.ii_inac = torch.cat([self.ii_inac, self.ii[mask]], 0) + self.jj_inac = torch.cat([self.jj_inac, self.jj[mask]], 0) + self.target_inac = torch.cat([self.target_inac, self.target[:,mask]], 1) + self.weight_inac = torch.cat([self.weight_inac, self.weight[:,mask]], 1) + + self.ii = self.ii[~mask] + self.jj = self.jj[~mask] + self.age = self.age[~mask] + + if self.corr_impl == "volume": + self.corr = self.corr[~mask] + + if self.net is not None: + self.net = self.net[:,~mask] + + if self.inp is not None: + self.inp = self.inp[:,~mask] + + self.target = self.target[:,~mask] + self.weight = self.weight[:,~mask] + + + @torch.cuda.amp.autocast(enabled=True) + def rm_keyframe(self, ix): + """ drop edges from factor graph """ + + + with self.video.get_lock(): + self.video.images[ix] = self.video.images[ix+1] + self.video.poses[ix] = self.video.poses[ix+1] + self.video.disps[ix] = self.video.disps[ix+1] + self.video.disps_sens[ix] = self.video.disps_sens[ix+1] + self.video.intrinsics[ix] = self.video.intrinsics[ix+1] + + self.video.nets[ix] = self.video.nets[ix+1] + self.video.inps[ix] = self.video.inps[ix+1] + self.video.fmaps[ix] = self.video.fmaps[ix+1] + self.video.tstamp[ix] = self.video.tstamp[ix+1] + self.video.masks[ix] = self.video.masks[ix+1] + + m = (self.ii_inac == ix) | (self.jj_inac == ix) + self.ii_inac[self.ii_inac >= ix] -= 1 + self.jj_inac[self.jj_inac >= ix] -= 1 + + if torch.any(m): + self.ii_inac = self.ii_inac[~m] + self.jj_inac = self.jj_inac[~m] + self.target_inac = self.target_inac[:,~m] + self.weight_inac = self.weight_inac[:,~m] + + m = (self.ii == ix) | (self.jj == ix) + + self.ii[self.ii >= ix] -= 1 + self.jj[self.jj >= ix] -= 1 + self.rm_factors(m, store=False) + + + @torch.cuda.amp.autocast(enabled=True) + def update(self, t0=None, t1=None, itrs=3, use_inactive=False, EP=1e-7, motion_only=False): + """ run update operator on factor graph """ + + # motion features + with torch.cuda.amp.autocast(enabled=False): + coords1, mask = self.video.reproject(self.ii, self.jj) + motn = torch.cat([coords1 - self.coords0, self.target - coords1], dim=-1) + motn = motn.permute(0,1,4,2,3).clamp(-64.0, 64.0) + + # correlation features + corr = self.corr(coords1) + self.net, delta, weight, damping, upmask = \ + self.update_op(self.net, self.inp, corr, motn, self.ii, self.jj) + + ##### save confidecnce weight for vis ##### + # for k in range(len(self.ii)): + # w = weight[:, k].detach().cpu().numpy() + # idx_i = self.ii[k] + # idx_j = self.jj[k] + # np.save(f'pred_conf/{idx_i:04d}_{idx_j:04d}.npy', w) + ############################################# + + # Shapes: + # weight: [1, k, h//8, w//8, 2] + # self.ii: [k]; self.jj: [k] + msk = self.video.masks[self.ii] > 0 + weight[:,msk] = 0.0 + + if t0 is None: + t0 = max(1, self.ii.min().item()+1) + + with torch.cuda.amp.autocast(enabled=False): + self.target = coords1 + delta.to(dtype=torch.float) + self.weight = weight.to(dtype=torch.float) + + ht, wd = self.coords0.shape[0:2] + self.damping[torch.unique(self.ii)] = damping + + if use_inactive: + m = (self.ii_inac >= t0 - 3) & (self.jj_inac >= t0 - 3) + ii = torch.cat([self.ii_inac[m], self.ii], 0) + jj = torch.cat([self.jj_inac[m], self.jj], 0) + target = torch.cat([self.target_inac[:,m], self.target], 1) + weight = torch.cat([self.weight_inac[:,m], self.weight], 1) + + else: + ii, jj, target, weight = self.ii, self.jj, self.target, self.weight + + + damping = .2 * self.damping[torch.unique(ii)].contiguous() + EP + + target = target.view(-1, ht, wd, 2).permute(0,3,1,2).contiguous() + weight = weight.view(-1, ht, wd, 2).permute(0,3,1,2).contiguous() + + # dense bundle adjustment + self.video.ba(target, weight, damping, ii, jj, t0, t1, + itrs=itrs, lm=1e-4, ep=0.1, motion_only=motion_only) + + if self.upsample: + self.video.upsample(torch.unique(self.ii), upmask) + + self.age += 1 + + + @torch.cuda.amp.autocast(enabled=False) + def update_lowmem(self, t0=None, t1=None, itrs=2, use_inactive=False, EP=1e-7, steps=8): + """ run update operator on factor graph - reduced memory implementation """ + + # alternate corr implementation + t = self.video.counter.value + + num, rig, ch, ht, wd = self.video.fmaps.shape + corr_op = AltCorrBlock(self.video.fmaps.view(1, num*rig, ch, ht, wd)) + + print("Global BA Iteration with {} steps".format(steps)) + for step in range(steps): + # print("Global BA Iteration #{}".format(step+1)) + with torch.cuda.amp.autocast(enabled=False): + coords1, mask = self.video.reproject(self.ii, self.jj) + motn = torch.cat([coords1 - self.coords0, self.target - coords1], dim=-1) + motn = motn.permute(0,1,4,2,3).clamp(-64.0, 64.0) + + s = 8 + for i in range(0, self.jj.max()+1, s): + v = (self.ii >= i) & (self.ii < i + s) + iis = self.ii[v] + jjs = self.jj[v] + + ht, wd = self.coords0.shape[0:2] + corr1 = corr_op(coords1[:,v], rig * iis, rig * jjs + (iis == jjs).long()) + + with torch.cuda.amp.autocast(enabled=True): + + net, delta, weight, damping, upmask = \ + self.update_op(self.net[:,v], self.video.inps[None,iis], corr1, motn[:,v], iis, jjs) + + if self.upsample: + self.video.upsample(torch.unique(iis), upmask) + + # Shapes: + # weight: [1, k, h//8, w//8, 2] + # self.ii: [k]; self.jj: [k] + msk = self.video.masks[iis] > 0 + weight[:,msk] = 0.0 + + self.net[:,v] = net + self.target[:,v] = coords1[:,v] + delta.float() + self.weight[:,v] = weight.float() + self.damping[torch.unique(iis)] = damping + + damping = .2 * self.damping[torch.unique(self.ii)].contiguous() + EP + target = self.target.view(-1, ht, wd, 2).permute(0,3,1,2).contiguous() + weight = self.weight.view(-1, ht, wd, 2).permute(0,3,1,2).contiguous() + + # dense bundle adjustment + self.video.ba(target, weight, damping, self.ii, self.jj, 1, t, + itrs=itrs, lm=1e-5, ep=1e-2, motion_only=False) + + self.video.dirty[:t] = True + + def add_neighborhood_factors(self, t0, t1, r=3): + """ add edges between neighboring frames within radius r """ + + ii, jj = torch.meshgrid(torch.arange(t0,t1), torch.arange(t0,t1), indexing='ij') + ii = ii.reshape(-1).to(dtype=torch.long, device=self.device) + jj = jj.reshape(-1).to(dtype=torch.long, device=self.device) + + c = 1 if self.video.stereo else 0 + + keep = ((ii - jj).abs() > c) & ((ii - jj).abs() <= r) + self.add_factors(ii[keep], jj[keep]) + + + def add_proximity_factors(self, t0=0, t1=0, rad=2, nms=2, beta=0.25, thresh=16.0, remove=False): + """ add edges to the factor graph based on distance """ + + t = self.video.counter.value + ix = torch.arange(t0, t) + jx = torch.arange(t1, t) + + ii, jj = torch.meshgrid(ix, jx, indexing='ij') + ii = ii.reshape(-1) + jj = jj.reshape(-1) + + d = self.video.distance(ii, jj, beta=beta) + d[ii - rad < jj] = np.inf + d[d > 100] = np.inf + + ii1 = torch.cat([self.ii, self.ii_bad, self.ii_inac], 0) + jj1 = torch.cat([self.jj, self.jj_bad, self.jj_inac], 0) + for i, j in zip(ii1.cpu().numpy(), jj1.cpu().numpy()): + for di in range(-nms, nms+1): + for dj in range(-nms, nms+1): + if abs(di) + abs(dj) <= max(min(abs(i-j)-2, nms), 0): + i1 = i + di + j1 = j + dj + + if (t0 <= i1 < t) and (t1 <= j1 < t): + d[(i1-t0)*(t-t1) + (j1-t1)] = np.inf + + + es = [] + for i in range(t0, t): + if self.video.stereo: + es.append((i, i)) + d[(i-t0)*(t-t1) + (i-t1)] = np.inf + + for j in range(max(i-rad-1,0), i): + es.append((i,j)) + es.append((j,i)) + d[(i-t0)*(t-t1) + (j-t1)] = np.inf + + ix = torch.argsort(d) + for k in ix: + if d[k].item() > thresh: + continue + + if len(es) > self.max_factors: + break + + i = ii[k] + j = jj[k] + + # bidirectional + es.append((i, j)) + es.append((j, i)) + + for di in range(-nms, nms+1): + for dj in range(-nms, nms+1): + if abs(di) + abs(dj) <= max(min(abs(i-j)-2, nms), 0): + i1 = i + di + j1 = j + dj + + if (t0 <= i1 < t) and (t1 <= j1 < t): + d[(i1-t0)*(t-t1) + (j1-t1)] = np.inf + + ii, jj = torch.as_tensor(es, device=self.device).unbind(dim=-1) + self.add_factors(ii, jj, remove) diff --git a/thirdparty/DROID-SLAM/droid_slam/geom/__init__.py b/thirdparty/DROID-SLAM/droid_slam/geom/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/thirdparty/DROID-SLAM/droid_slam/geom/ba.py b/thirdparty/DROID-SLAM/droid_slam/geom/ba.py new file mode 100644 index 0000000000000000000000000000000000000000..34c95c8a7910d5cbe288aeca2533f199580ab04c --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/geom/ba.py @@ -0,0 +1,158 @@ +import lietorch +import torch +import torch.nn.functional as F + +from .chol import block_solve, schur_solve +import geom.projective_ops as pops + +from torch_scatter import scatter_sum + + +# utility functions for scattering ops +def safe_scatter_add_mat(A, ii, jj, n, m): + v = (ii >= 0) & (jj >= 0) & (ii < n) & (jj < m) + return scatter_sum(A[:,v], ii[v]*m + jj[v], dim=1, dim_size=n*m) + +def safe_scatter_add_vec(b, ii, n): + v = (ii >= 0) & (ii < n) + return scatter_sum(b[:,v], ii[v], dim=1, dim_size=n) + +# apply retraction operator to inv-depth maps +def disp_retr(disps, dz, ii): + ii = ii.to(device=dz.device) + return disps + scatter_sum(dz, ii, dim=1, dim_size=disps.shape[1]) + +# apply retraction operator to poses +def pose_retr(poses, dx, ii): + ii = ii.to(device=dx.device) + return poses.retr(scatter_sum(dx, ii, dim=1, dim_size=poses.shape[1])) + + +def BA(target, weight, eta, poses, disps, intrinsics, ii, jj, fixedp=1, rig=1): + """ Full Bundle Adjustment """ + + B, P, ht, wd = disps.shape + N = ii.shape[0] + D = poses.manifold_dim + + ### 1: commpute jacobians and residuals ### + coords, valid, (Ji, Jj, Jz) = pops.projective_transform( + poses, disps, intrinsics, ii, jj, jacobian=True) + + r = (target - coords).view(B, N, -1, 1) + w = .001 * (valid * weight).view(B, N, -1, 1) + + ### 2: construct linear system ### + Ji = Ji.reshape(B, N, -1, D) + Jj = Jj.reshape(B, N, -1, D) + wJiT = (w * Ji).transpose(2,3) + wJjT = (w * Jj).transpose(2,3) + + Jz = Jz.reshape(B, N, ht*wd, -1) + + Hii = torch.matmul(wJiT, Ji) + Hij = torch.matmul(wJiT, Jj) + Hji = torch.matmul(wJjT, Ji) + Hjj = torch.matmul(wJjT, Jj) + + vi = torch.matmul(wJiT, r).squeeze(-1) + vj = torch.matmul(wJjT, r).squeeze(-1) + + Ei = (wJiT.view(B,N,D,ht*wd,-1) * Jz[:,:,None]).sum(dim=-1) + Ej = (wJjT.view(B,N,D,ht*wd,-1) * Jz[:,:,None]).sum(dim=-1) + + w = w.view(B, N, ht*wd, -1) + r = r.view(B, N, ht*wd, -1) + wk = torch.sum(w*r*Jz, dim=-1) + Ck = torch.sum(w*Jz*Jz, dim=-1) + + kx, kk = torch.unique(ii, return_inverse=True) + M = kx.shape[0] + + # only optimize keyframe poses + P = P // rig - fixedp + ii = ii // rig - fixedp + jj = jj // rig - fixedp + + H = safe_scatter_add_mat(Hii, ii, ii, P, P) + \ + safe_scatter_add_mat(Hij, ii, jj, P, P) + \ + safe_scatter_add_mat(Hji, jj, ii, P, P) + \ + safe_scatter_add_mat(Hjj, jj, jj, P, P) + + E = safe_scatter_add_mat(Ei, ii, kk, P, M) + \ + safe_scatter_add_mat(Ej, jj, kk, P, M) + + v = safe_scatter_add_vec(vi, ii, P) + \ + safe_scatter_add_vec(vj, jj, P) + + C = safe_scatter_add_vec(Ck, kk, M) + w = safe_scatter_add_vec(wk, kk, M) + + C = C + eta.view(*C.shape) + 1e-7 + + H = H.view(B, P, P, D, D) + E = E.view(B, P, M, D, ht*wd) + + ### 3: solve the system ### + dx, dz = schur_solve(H, E, C, v, w) + + ### 4: apply retraction ### + poses = pose_retr(poses, dx, torch.arange(P) + fixedp) + disps = disp_retr(disps, dz.view(B,-1,ht,wd), kx) + + disps = torch.where(disps > 10, torch.zeros_like(disps), disps) + disps = disps.clamp(min=0.0) + + return poses, disps + + +def MoBA(target, weight, eta, poses, disps, intrinsics, ii, jj, fixedp=1, rig=1): + """ Motion only bundle adjustment """ + + B, P, ht, wd = disps.shape + N = ii.shape[0] + D = poses.manifold_dim + + ### 1: commpute jacobians and residuals ### + coords, valid, (Ji, Jj, Jz) = pops.projective_transform( + poses, disps, intrinsics, ii, jj, jacobian=True) + + r = (target - coords).view(B, N, -1, 1) + w = .001 * (valid * weight).view(B, N, -1, 1) + + ### 2: construct linear system ### + Ji = Ji.reshape(B, N, -1, D) + Jj = Jj.reshape(B, N, -1, D) + wJiT = (w * Ji).transpose(2,3) + wJjT = (w * Jj).transpose(2,3) + + Hii = torch.matmul(wJiT, Ji) + Hij = torch.matmul(wJiT, Jj) + Hji = torch.matmul(wJjT, Ji) + Hjj = torch.matmul(wJjT, Jj) + + vi = torch.matmul(wJiT, r).squeeze(-1) + vj = torch.matmul(wJjT, r).squeeze(-1) + + # only optimize keyframe poses + P = P // rig - fixedp + ii = ii // rig - fixedp + jj = jj // rig - fixedp + + H = safe_scatter_add_mat(Hii, ii, ii, P, P) + \ + safe_scatter_add_mat(Hij, ii, jj, P, P) + \ + safe_scatter_add_mat(Hji, jj, ii, P, P) + \ + safe_scatter_add_mat(Hjj, jj, jj, P, P) + + v = safe_scatter_add_vec(vi, ii, P) + \ + safe_scatter_add_vec(vj, jj, P) + + H = H.view(B, P, P, D, D) + + ### 3: solve the system ### + dx = block_solve(H, v) + + ### 4: apply retraction ### + poses = pose_retr(poses, dx, torch.arange(P) + fixedp) + return poses + diff --git a/thirdparty/DROID-SLAM/droid_slam/geom/chol.py b/thirdparty/DROID-SLAM/droid_slam/geom/chol.py new file mode 100644 index 0000000000000000000000000000000000000000..86bc8f9b61f16d4faccab8a8d210065e2437f968 --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/geom/chol.py @@ -0,0 +1,73 @@ +import torch +import torch.nn.functional as F +import geom.projective_ops as pops + +class CholeskySolver(torch.autograd.Function): + @staticmethod + def forward(ctx, H, b): + # don't crash training if cholesky decomp fails + try: + U = torch.linalg.cholesky(H) + xs = torch.cholesky_solve(b, U) + ctx.save_for_backward(U, xs) + ctx.failed = False + except Exception as e: + print(e) + ctx.failed = True + xs = torch.zeros_like(b) + + return xs + + @staticmethod + def backward(ctx, grad_x): + if ctx.failed: + return None, None + + U, xs = ctx.saved_tensors + dz = torch.cholesky_solve(grad_x, U) + dH = -torch.matmul(xs, dz.transpose(-1,-2)) + + return dH, dz + +def block_solve(H, b, ep=0.1, lm=0.0001): + """ solve normal equations """ + B, N, _, D, _ = H.shape + I = torch.eye(D).to(H.device) + H = H + (ep + lm*H) * I + + H = H.permute(0,1,3,2,4) + H = H.reshape(B, N*D, N*D) + b = b.reshape(B, N*D, 1) + + x = CholeskySolver.apply(H,b) + return x.reshape(B, N, D) + + +def schur_solve(H, E, C, v, w, ep=0.1, lm=0.0001, sless=False): + """ solve using shur complement """ + + B, P, M, D, HW = E.shape + H = H.permute(0,1,3,2,4).reshape(B, P*D, P*D) + E = E.permute(0,1,3,2,4).reshape(B, P*D, M*HW) + Q = (1.0 / C).view(B, M*HW, 1) + + # damping + I = torch.eye(P*D).to(H.device) + H = H + (ep + lm*H) * I + + v = v.reshape(B, P*D, 1) + w = w.reshape(B, M*HW, 1) + + Et = E.transpose(1,2) + S = H - torch.matmul(E, Q*Et) + v = v - torch.matmul(E, Q*w) + + dx = CholeskySolver.apply(S, v) + if sless: + return dx.reshape(B, P, D) + + dz = Q * (w - Et @ dx) + dx = dx.reshape(B, P, D) + dz = dz.reshape(B, M, HW) + + return dx, dz \ No newline at end of file diff --git a/thirdparty/DROID-SLAM/droid_slam/geom/graph_utils.py b/thirdparty/DROID-SLAM/droid_slam/geom/graph_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..706fb67cdedc1b4d18b57ad65afb77fa8d23d5dc --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/geom/graph_utils.py @@ -0,0 +1,113 @@ + +import torch +import numpy as np +from collections import OrderedDict + +import lietorch +from data_readers.rgbd_utils import compute_distance_matrix_flow, compute_distance_matrix_flow2 + + +def graph_to_edge_list(graph): + ii, jj, kk = [], [], [] + for s, u in enumerate(graph): + for v in graph[u]: + ii.append(u) + jj.append(v) + kk.append(s) + + ii = torch.as_tensor(ii) + jj = torch.as_tensor(jj) + kk = torch.as_tensor(kk) + return ii, jj, kk + +def keyframe_indicies(graph): + return torch.as_tensor([u for u in graph]) + +def meshgrid(m, n, device='cuda'): + ii, jj = torch.meshgrid(torch.arange(m), torch.arange(n), indexing='ij') + return ii.reshape(-1).to(device), jj.reshape(-1).to(device) + +def neighbourhood_graph(n, r): + ii, jj = meshgrid(n, n) + d = (ii - jj).abs() + keep = (d >= 1) & (d <= r) + return ii[keep], jj[keep] + + +def build_frame_graph(poses, disps, intrinsics, num=16, thresh=24.0, r=2): + """ construct a frame graph between co-visible frames """ + N = poses.shape[1] + poses = poses[0].cpu().numpy() + disps = disps[0][:,3::8,3::8].cpu().numpy() + intrinsics = intrinsics[0].cpu().numpy() / 8.0 + d = compute_distance_matrix_flow(poses, disps, intrinsics) + + count = 0 + graph = OrderedDict() + + for i in range(N): + graph[i] = [] + d[i,i] = np.inf + for j in range(i-r, i+r+1): + if 0 <= j < N and i != j: + graph[i].append(j) + d[i,j] = np.inf + count += 1 + + while count < num: + ix = np.argmin(d) + i, j = ix // N, ix % N + + if d[i,j] < thresh: + graph[i].append(j) + d[i,j] = np.inf + count += 1 + else: + break + + return graph + + + +def build_frame_graph_v2(poses, disps, intrinsics, num=16, thresh=24.0, r=2): + """ construct a frame graph between co-visible frames """ + N = poses.shape[1] + # poses = poses[0].cpu().numpy() + # disps = disps[0].cpu().numpy() + # intrinsics = intrinsics[0].cpu().numpy() + d = compute_distance_matrix_flow2(poses, disps, intrinsics) + + # import matplotlib.pyplot as plt + # plt.imshow(d) + # plt.show() + + count = 0 + graph = OrderedDict() + + for i in range(N): + graph[i] = [] + d[i,i] = np.inf + for j in range(i-r, i+r+1): + if 0 <= j < N and i != j: + graph[i].append(j) + d[i,j] = np.inf + count += 1 + + while 1: + ix = np.argmin(d) + i, j = ix // N, ix % N + + if d[i,j] < thresh: + graph[i].append(j) + + for i1 in range(i-1, i+2): + for j1 in range(j-1, j+2): + if 0 <= i1 < N and 0 <= j1 < N: + d[i1, j1] = np.inf + + count += 1 + else: + break + + return graph + diff --git a/thirdparty/DROID-SLAM/droid_slam/geom/losses.py b/thirdparty/DROID-SLAM/droid_slam/geom/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..7fa91cf3939beabd4894dc3184cc7a6f32ff662c --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/geom/losses.py @@ -0,0 +1,118 @@ +from collections import OrderedDict +import numpy as np +import torch +from lietorch import SO3, SE3, Sim3 +from .graph_utils import graph_to_edge_list +from .projective_ops import projective_transform + + +def pose_metrics(dE): + """ Translation/Rotation/Scaling metrics from Sim3 """ + t, q, s = dE.data.split([3, 4, 1], -1) + ang = SO3(q).log().norm(dim=-1) + + # convert radians to degrees + r_err = (180 / np.pi) * ang + t_err = t.norm(dim=-1) + s_err = (s - 1.0).abs() + return r_err, t_err, s_err + + +def fit_scale(Ps, Gs): + b = Ps.shape[0] + t1 = Ps.data[...,:3].detach().reshape(b, -1) + t2 = Gs.data[...,:3].detach().reshape(b, -1) + + s = (t1*t2).sum(-1) / ((t2*t2).sum(-1) + 1e-8) + return s + + +def geodesic_loss(Ps, Gs, graph, gamma=0.9, do_scale=True): + """ Loss function for training network """ + + # relative pose + ii, jj, kk = graph_to_edge_list(graph) + dP = Ps[:,jj] * Ps[:,ii].inv() + + n = len(Gs) + geodesic_loss = 0.0 + + for i in range(n): + w = gamma ** (n - i - 1) + dG = Gs[i][:,jj] * Gs[i][:,ii].inv() + + if do_scale: + s = fit_scale(dP, dG) + dG = dG.scale(s[:,None]) + + # pose error + d = (dG * dP.inv()).log() + + if isinstance(dG, SE3): + tau, phi = d.split([3,3], dim=-1) + geodesic_loss += w * ( + tau.norm(dim=-1).mean() + + phi.norm(dim=-1).mean()) + + elif isinstance(dG, Sim3): + tau, phi, sig = d.split([3,3,1], dim=-1) + geodesic_loss += w * ( + tau.norm(dim=-1).mean() + + phi.norm(dim=-1).mean() + + 0.05 * sig.norm(dim=-1).mean()) + + dE = Sim3(dG * dP.inv()).detach() + r_err, t_err, s_err = pose_metrics(dE) + + metrics = { + 'rot_error': r_err.mean().item(), + 'tr_error': t_err.mean().item(), + 'bad_rot': (r_err < .1).float().mean().item(), + 'bad_tr': (t_err < .01).float().mean().item(), + } + + return geodesic_loss, metrics + + +def residual_loss(residuals, gamma=0.9): + """ loss on system residuals """ + residual_loss = 0.0 + n = len(residuals) + + for i in range(n): + w = gamma ** (n - i - 1) + residual_loss += w * residuals[i].abs().mean() + + return residual_loss, {'residual': residual_loss.item()} + + +def flow_loss(Ps, disps, poses_est, disps_est, intrinsics, graph, gamma=0.9): + """ optical flow loss """ + + N = Ps.shape[1] + graph = OrderedDict() + for i in range(N): + graph[i] = [j for j in range(N) if abs(i-j)==1] + + ii, jj, kk = graph_to_edge_list(graph) + coords0, val0 = projective_transform(Ps, disps, intrinsics, ii, jj) + val0 = val0 * (disps[:,ii] > 0).float().unsqueeze(dim=-1) + + n = len(poses_est) + flow_loss = 0.0 + + for i in range(n): + w = gamma ** (n - i - 1) + coords1, val1 = projective_transform(poses_est[i], disps_est[i], intrinsics, ii, jj) + + v = (val0 * val1).squeeze(dim=-1) + epe = v * (coords1 - coords0).norm(dim=-1) + flow_loss += w * epe.mean() + + epe = epe.reshape(-1)[v.reshape(-1) > 0.5] + metrics = { + 'f_error': epe.mean().item(), + '1px': (epe<1.0).float().mean().item(), + } + + return flow_loss, metrics diff --git a/thirdparty/DROID-SLAM/droid_slam/geom/projective_ops.py b/thirdparty/DROID-SLAM/droid_slam/geom/projective_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..82fc6100ed261649bea1edd820139d9c750841f8 --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/geom/projective_ops.py @@ -0,0 +1,139 @@ +import torch +import torch.nn.functional as F + +from lietorch import SE3, Sim3 + +MIN_DEPTH = 0.2 + +def extract_intrinsics(intrinsics): + return intrinsics[...,None,None,:].unbind(dim=-1) + +def coords_grid(ht, wd, **kwargs): + y, x = torch.meshgrid( + torch.arange(ht).to(**kwargs).float(), + torch.arange(wd).to(**kwargs).float(), indexing='ij') + + return torch.stack([x, y], dim=-1) + +def iproj(disps, intrinsics, jacobian=False): + """ pinhole camera inverse projection """ + ht, wd = disps.shape[2:] + fx, fy, cx, cy = extract_intrinsics(intrinsics) + + y, x = torch.meshgrid( + torch.arange(ht).to(disps.device).float(), + torch.arange(wd).to(disps.device).float(), indexing='ij') + + i = torch.ones_like(disps) + X = (x - cx) / fx + Y = (y - cy) / fy + pts = torch.stack([X, Y, i, disps], dim=-1) + + if jacobian: + J = torch.zeros_like(pts) + J[...,-1] = 1.0 + return pts, J + + return pts, None + +def proj(Xs, intrinsics, jacobian=False, return_depth=False): + """ pinhole camera projection """ + fx, fy, cx, cy = extract_intrinsics(intrinsics) + X, Y, Z, D = Xs.unbind(dim=-1) + + Z = torch.where(Z < 0.5*MIN_DEPTH, torch.ones_like(Z), Z) + d = 1.0 / Z + + x = fx * (X * d) + cx + y = fy * (Y * d) + cy + if return_depth: + coords = torch.stack([x, y, D*d], dim=-1) + else: + coords = torch.stack([x, y], dim=-1) + + if jacobian: + B, N, H, W = d.shape + o = torch.zeros_like(d) + proj_jac = torch.stack([ + fx*d, o, -fx*X*d*d, o, + o, fy*d, -fy*Y*d*d, o, + # o, o, -D*d*d, d, + ], dim=-1).view(B, N, H, W, 2, 4) + + return coords, proj_jac + + return coords, None + +def actp(Gij, X0, jacobian=False): + """ action on point cloud """ + X1 = Gij[:,:,None,None] * X0 + + if jacobian: + X, Y, Z, d = X1.unbind(dim=-1) + o = torch.zeros_like(d) + B, N, H, W = d.shape + + if isinstance(Gij, SE3): + Ja = torch.stack([ + d, o, o, o, Z, -Y, + o, d, o, -Z, o, X, + o, o, d, Y, -X, o, + o, o, o, o, o, o, + ], dim=-1).view(B, N, H, W, 4, 6) + + elif isinstance(Gij, Sim3): + Ja = torch.stack([ + d, o, o, o, Z, -Y, X, + o, d, o, -Z, o, X, Y, + o, o, d, Y, -X, o, Z, + o, o, o, o, o, o, o + ], dim=-1).view(B, N, H, W, 4, 7) + + return X1, Ja + + return X1, None + +def projective_transform(poses, depths, intrinsics, ii, jj, jacobian=False, return_depth=False): + """ map points from ii->jj """ + + # inverse project (pinhole) + X0, Jz = iproj(depths[:,ii], intrinsics[:,ii], jacobian=jacobian) + + # transform + Gij = poses[:,jj] * poses[:,ii].inv() + + Gij.data[:,ii==jj] = torch.as_tensor([-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], device="cuda") + X1, Ja = actp(Gij, X0, jacobian=jacobian) + + # project (pinhole) + x1, Jp = proj(X1, intrinsics[:,jj], jacobian=jacobian, return_depth=return_depth) + + # exclude points too close to camera + valid = ((X1[...,2] > MIN_DEPTH) & (X0[...,2] > MIN_DEPTH)).float() + valid = valid.unsqueeze(-1) + + if jacobian: + # Ji transforms according to dual adjoint + Jj = torch.matmul(Jp, Ja) + Ji = -Gij[:,:,None,None,None].adjT(Jj) + + Jz = Gij[:,:,None,None] * Jz + Jz = torch.matmul(Jp, Jz.unsqueeze(-1)) + + return x1, valid, (Ji, Jj, Jz) + + return x1, valid + +def induced_flow(poses, disps, intrinsics, ii, jj): + """ optical flow induced by camera motion """ + + ht, wd = disps.shape[2:] + y, x = torch.meshgrid( + torch.arange(ht).to(disps.device).float(), + torch.arange(wd).to(disps.device).float(), indexing='ij') + + coords0 = torch.stack([x, y], dim=-1) + coords1, valid = projective_transform(poses, disps, intrinsics, ii, jj, False) + + return coords1[...,:2] - coords0, valid + diff --git a/thirdparty/DROID-SLAM/droid_slam/logger.py b/thirdparty/DROID-SLAM/droid_slam/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..3af7a2cb8f648dbb29abed182aa5eb99ceba015c --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/logger.py @@ -0,0 +1,54 @@ + +import torch +from torch.utils.tensorboard import SummaryWriter + + +SUM_FREQ = 100 + +class Logger: + def __init__(self, name, scheduler): + self.total_steps = 0 + self.running_loss = {} + self.writer = None + self.name = name + self.scheduler = scheduler + + def _print_training_status(self): + if self.writer is None: + self.writer = SummaryWriter('runs/%s' % self.name) + print([k for k in self.running_loss]) + + lr = self.scheduler.get_lr().pop() + metrics_data = [self.running_loss[k]/SUM_FREQ for k in self.running_loss.keys()] + training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, lr) + metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data) + + # print the training status + print(training_str + metrics_str) + + for key in self.running_loss: + val = self.running_loss[key] / SUM_FREQ + self.writer.add_scalar(key, val, self.total_steps) + self.running_loss[key] = 0.0 + + def push(self, metrics): + + for key in metrics: + if key not in self.running_loss: + self.running_loss[key] = 0.0 + + self.running_loss[key] += metrics[key] + + if self.total_steps % SUM_FREQ == SUM_FREQ-1: + self._print_training_status() + self.running_loss = {} + + self.total_steps += 1 + + def write_dict(self, results): + for key in results: + self.writer.add_scalar(key, results[key], self.total_steps) + + def close(self): + self.writer.close() + diff --git a/thirdparty/DROID-SLAM/droid_slam/modules/__init__.py b/thirdparty/DROID-SLAM/droid_slam/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/thirdparty/DROID-SLAM/droid_slam/modules/clipping.py b/thirdparty/DROID-SLAM/droid_slam/modules/clipping.py new file mode 100644 index 0000000000000000000000000000000000000000..e59307f533f7612920ef9eca10cc7ea0e871c928 --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/modules/clipping.py @@ -0,0 +1,24 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +GRAD_CLIP = .01 + +class GradClip(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + return x + + @staticmethod + def backward(ctx, grad_x): + o = torch.zeros_like(grad_x) + grad_x = torch.where(grad_x.abs()>GRAD_CLIP, o, grad_x) + grad_x = torch.where(torch.isnan(grad_x), o, grad_x) + return grad_x + +class GradientClip(nn.Module): + def __init__(self): + super(GradientClip, self).__init__() + + def forward(self, x): + return GradClip.apply(x) \ No newline at end of file diff --git a/thirdparty/DROID-SLAM/droid_slam/modules/corr.py b/thirdparty/DROID-SLAM/droid_slam/modules/corr.py new file mode 100644 index 0000000000000000000000000000000000000000..6a1e32ad58fd18fa367a5c4897feb16aad7485c4 --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/modules/corr.py @@ -0,0 +1,140 @@ +import torch +import torch.nn.functional as F + +import droid_backends + +class CorrSampler(torch.autograd.Function): + + @staticmethod + def forward(ctx, volume, coords, radius): + ctx.save_for_backward(volume,coords) + ctx.radius = radius + corr, = droid_backends.corr_index_forward(volume, coords, radius) + return corr + + @staticmethod + def backward(ctx, grad_output): + volume, coords = ctx.saved_tensors + grad_output = grad_output.contiguous() + grad_volume, = droid_backends.corr_index_backward(volume, coords, grad_output, ctx.radius) + return grad_volume, None, None + + +class CorrBlock: + def __init__(self, fmap1, fmap2, num_levels=4, radius=3): + self.num_levels = num_levels + self.radius = radius + self.corr_pyramid = [] + + # all pairs correlation + corr = CorrBlock.corr(fmap1, fmap2) + + batch, num, h1, w1, h2, w2 = corr.shape + corr = corr.reshape(batch*num*h1*w1, 1, h2, w2) + + for i in range(self.num_levels): + self.corr_pyramid.append( + corr.view(batch*num, h1, w1, h2//2**i, w2//2**i)) + corr = F.avg_pool2d(corr, 2, stride=2) + + def __call__(self, coords): + out_pyramid = [] + batch, num, ht, wd, _ = coords.shape + coords = coords.permute(0,1,4,2,3) + coords = coords.contiguous().view(batch*num, 2, ht, wd) + + for i in range(self.num_levels): + corr = CorrSampler.apply(self.corr_pyramid[i], coords/2**i, self.radius) + out_pyramid.append(corr.view(batch, num, -1, ht, wd)) + + return torch.cat(out_pyramid, dim=2) + + def cat(self, other): + for i in range(self.num_levels): + self.corr_pyramid[i] = torch.cat([self.corr_pyramid[i], other.corr_pyramid[i]], 0) + return self + + def __getitem__(self, index): + for i in range(self.num_levels): + self.corr_pyramid[i] = self.corr_pyramid[i][index] + return self + + + @staticmethod + def corr(fmap1, fmap2): + """ all-pairs correlation """ + batch, num, dim, ht, wd = fmap1.shape + fmap1 = fmap1.reshape(batch*num, dim, ht*wd) / 4.0 + fmap2 = fmap2.reshape(batch*num, dim, ht*wd) / 4.0 + + corr = torch.matmul(fmap1.transpose(1,2), fmap2) + return corr.view(batch, num, ht, wd, ht, wd) + + +class CorrLayer(torch.autograd.Function): + @staticmethod + def forward(ctx, fmap1, fmap2, coords, r): + ctx.r = r + ctx.save_for_backward(fmap1, fmap2, coords) + corr, = droid_backends.altcorr_forward(fmap1, fmap2, coords, ctx.r) + return corr + + @staticmethod + def backward(ctx, grad_corr): + fmap1, fmap2, coords = ctx.saved_tensors + grad_corr = grad_corr.contiguous() + fmap1_grad, fmap2_grad, coords_grad = \ + droid_backends.altcorr_backward(fmap1, fmap2, coords, grad_corr, ctx.r) + return fmap1_grad, fmap2_grad, coords_grad, None + + +class AltCorrBlock: + def __init__(self, fmaps, num_levels=4, radius=3): + self.num_levels = num_levels + self.radius = radius + + B, N, C, H, W = fmaps.shape + fmaps = fmaps.view(B*N, C, H, W) / 4.0 + + self.pyramid = [] + for i in range(self.num_levels): + sz = (B, N, H//2**i, W//2**i, C) + fmap_lvl = fmaps.permute(0, 2, 3, 1).contiguous() + self.pyramid.append(fmap_lvl.view(*sz)) + fmaps = F.avg_pool2d(fmaps, 2, stride=2) + + def corr_fn(self, coords, ii, jj): + B, N, H, W, S, _ = coords.shape + coords = coords.permute(0, 1, 4, 2, 3, 5) + + corr_list = [] + for i in range(self.num_levels): + r = self.radius + fmap1_i = self.pyramid[0][:, ii] + fmap2_i = self.pyramid[i][:, jj] + + coords_i = (coords / 2**i).reshape(B*N, S, H, W, 2).contiguous() + fmap1_i = fmap1_i.reshape((B*N,) + fmap1_i.shape[2:]) + fmap2_i = fmap2_i.reshape((B*N,) + fmap2_i.shape[2:]) + + corr = CorrLayer.apply(fmap1_i.float(), fmap2_i.float(), coords_i, self.radius) + corr = corr.view(B, N, S, -1, H, W).permute(0, 1, 3, 4, 5, 2) + corr_list.append(corr) + + corr = torch.cat(corr_list, dim=2) + return corr + + + def __call__(self, coords, ii, jj): + squeeze_output = False + if len(coords.shape) == 5: + coords = coords.unsqueeze(dim=-2) + squeeze_output = True + + corr = self.corr_fn(coords, ii, jj) + + if squeeze_output: + corr = corr.squeeze(dim=-1) + + return corr.contiguous() + diff --git a/thirdparty/DROID-SLAM/droid_slam/modules/extractor.py b/thirdparty/DROID-SLAM/droid_slam/modules/extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..5807d45f7494033a54ec06acb651d46a86afb3dc --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/modules/extractor.py @@ -0,0 +1,198 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class ResidualBlock(nn.Module): + def __init__(self, in_planes, planes, norm_fn='group', stride=1): + super(ResidualBlock, self).__init__() + + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1) + self.relu = nn.ReLU(inplace=True) + + num_groups = planes // 8 + + if norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + if not stride == 1: + self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + + elif norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(planes) + self.norm2 = nn.BatchNorm2d(planes) + if not stride == 1: + self.norm3 = nn.BatchNorm2d(planes) + + elif norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(planes) + self.norm2 = nn.InstanceNorm2d(planes) + if not stride == 1: + self.norm3 = nn.InstanceNorm2d(planes) + + elif norm_fn == 'none': + self.norm1 = nn.Sequential() + self.norm2 = nn.Sequential() + if not stride == 1: + self.norm3 = nn.Sequential() + + if stride == 1: + self.downsample = None + + else: + self.downsample = nn.Sequential( + nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3) + + def forward(self, x): + y = x + y = self.relu(self.norm1(self.conv1(y))) + y = self.relu(self.norm2(self.conv2(y))) + + if self.downsample is not None: + x = self.downsample(x) + + return self.relu(x+y) + + +class BottleneckBlock(nn.Module): + def __init__(self, in_planes, planes, norm_fn='group', stride=1): + super(BottleneckBlock, self).__init__() + + self.conv1 = nn.Conv2d(in_planes, planes//4, kernel_size=1, padding=0) + self.conv2 = nn.Conv2d(planes//4, planes//4, kernel_size=3, padding=1, stride=stride) + self.conv3 = nn.Conv2d(planes//4, planes, kernel_size=1, padding=0) + self.relu = nn.ReLU(inplace=True) + + num_groups = planes // 8 + + if norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4) + self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4) + self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + if not stride == 1: + self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + + elif norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(planes//4) + self.norm2 = nn.BatchNorm2d(planes//4) + self.norm3 = nn.BatchNorm2d(planes) + if not stride == 1: + self.norm4 = nn.BatchNorm2d(planes) + + elif norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(planes//4) + self.norm2 = nn.InstanceNorm2d(planes//4) + self.norm3 = nn.InstanceNorm2d(planes) + if not stride == 1: + self.norm4 = nn.InstanceNorm2d(planes) + + elif norm_fn == 'none': + self.norm1 = nn.Sequential() + self.norm2 = nn.Sequential() + self.norm3 = nn.Sequential() + if not stride == 1: + self.norm4 = nn.Sequential() + + if stride == 1: + self.downsample = None + + else: + self.downsample = nn.Sequential( + nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4) + + def forward(self, x): + y = x + y = self.relu(self.norm1(self.conv1(y))) + y = self.relu(self.norm2(self.conv2(y))) + y = self.relu(self.norm3(self.conv3(y))) + + if self.downsample is not None: + x = self.downsample(x) + + return self.relu(x+y) + + +DIM=32 + +class BasicEncoder(nn.Module): + def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0, multidim=False): + super(BasicEncoder, self).__init__() + self.norm_fn = norm_fn + self.multidim = multidim + + if self.norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=8, num_channels=DIM) + + elif self.norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(DIM) + + elif self.norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(DIM) + + elif self.norm_fn == 'none': + self.norm1 = nn.Sequential() + + self.conv1 = nn.Conv2d(3, DIM, kernel_size=7, stride=2, padding=3) + self.relu1 = nn.ReLU(inplace=True) + + self.in_planes = DIM + self.layer1 = self._make_layer(DIM, stride=1) + self.layer2 = self._make_layer(2*DIM, stride=2) + self.layer3 = self._make_layer(4*DIM, stride=2) + + # output convolution + self.conv2 = nn.Conv2d(4*DIM, output_dim, kernel_size=1) + + if self.multidim: + self.layer4 = self._make_layer(256, stride=2) + self.layer5 = self._make_layer(512, stride=2) + + self.in_planes = 256 + self.layer6 = self._make_layer(256, stride=1) + + self.in_planes = 128 + self.layer7 = self._make_layer(128, stride=1) + + self.up1 = nn.Conv2d(512, 256, 1) + self.up2 = nn.Conv2d(256, 128, 1) + self.conv3 = nn.Conv2d(128, output_dim, kernel_size=1) + + if dropout > 0: + self.dropout = nn.Dropout2d(p=dropout) + else: + self.dropout = None + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _make_layer(self, dim, stride=1): + layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride) + layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1) + layers = (layer1, layer2) + + self.in_planes = dim + return nn.Sequential(*layers) + + def forward(self, x): + b, n, c1, h1, w1 = x.shape + x = x.view(b*n, c1, h1, w1) + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + + x = self.conv2(x) + + _, c2, h2, w2 = x.shape + return x.view(b, n, c2, h2, w2) diff --git a/thirdparty/DROID-SLAM/droid_slam/modules/gru.py b/thirdparty/DROID-SLAM/droid_slam/modules/gru.py new file mode 100644 index 0000000000000000000000000000000000000000..84ea5038fb45bbe8cfd92428d04ce5612d74d43a --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/modules/gru.py @@ -0,0 +1,34 @@ +import torch +import torch.nn as nn + + +class ConvGRU(nn.Module): + def __init__(self, h_planes=128, i_planes=128): + super(ConvGRU, self).__init__() + self.do_checkpoint = False + self.convz = nn.Conv2d(h_planes+i_planes, h_planes, 3, padding=1) + self.convr = nn.Conv2d(h_planes+i_planes, h_planes, 3, padding=1) + self.convq = nn.Conv2d(h_planes+i_planes, h_planes, 3, padding=1) + + self.w = nn.Conv2d(h_planes, h_planes, 1, padding=0) + + self.convz_glo = nn.Conv2d(h_planes, h_planes, 1, padding=0) + self.convr_glo = nn.Conv2d(h_planes, h_planes, 1, padding=0) + self.convq_glo = nn.Conv2d(h_planes, h_planes, 1, padding=0) + + def forward(self, net, *inputs): + inp = torch.cat(inputs, dim=1) + net_inp = torch.cat([net, inp], dim=1) + + b, c, h, w = net.shape + glo = torch.sigmoid(self.w(net)) * net + glo = glo.view(b, c, h*w).mean(-1).view(b, c, 1, 1) + + z = torch.sigmoid(self.convz(net_inp) + self.convz_glo(glo)) + r = torch.sigmoid(self.convr(net_inp) + self.convr_glo(glo)) + q = torch.tanh(self.convq(torch.cat([r*net, inp], dim=1)) + self.convq_glo(glo)) + + net = (1-z) * net + z * q + return net + + diff --git a/thirdparty/DROID-SLAM/droid_slam/motion_filter.py b/thirdparty/DROID-SLAM/droid_slam/motion_filter.py new file mode 100644 index 0000000000000000000000000000000000000000..511c845961e108478c6edc566d4a2952780fbc75 --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/motion_filter.py @@ -0,0 +1,92 @@ +import cv2 +import torch +import lietorch + +from collections import OrderedDict +from droid_net import DroidNet + +import geom.projective_ops as pops +from modules.corr import CorrBlock + + +class MotionFilter: + """ This class is used to filter incoming frames and extract features """ + + def __init__(self, net, video, thresh=2.5, device="cuda:0"): + + # split net modules + self.cnet = net.cnet + self.fnet = net.fnet + self.update = net.update + + self.video = video + self.thresh = thresh + self.device = device + + self.count = 0 + + # mean, std for image normalization + self.MEAN = torch.as_tensor([0.485, 0.456, 0.406], device=self.device)[:, None, None] + self.STDV = torch.as_tensor([0.229, 0.224, 0.225], device=self.device)[:, None, None] + + @torch.cuda.amp.autocast(enabled=True) + def __context_encoder(self, image): + """ context features """ + net, inp = self.cnet(image).split([128,128], dim=2) + return net.tanh().squeeze(0), inp.relu().squeeze(0) + + @torch.cuda.amp.autocast(enabled=True) + def __feature_encoder(self, image): + """ features for correlation volume """ + return self.fnet(image).squeeze(0) + + @torch.cuda.amp.autocast(enabled=True) + @torch.no_grad() + def track(self, tstamp, image, depth=None, intrinsics=None, mask=None): + """ main update operation - run on every frame in video """ + + Id = lietorch.SE3.Identity(1,).data.squeeze() + ht = image.shape[-2] // 8 + wd = image.shape[-1] // 8 + + # normalize images + inputs = image[None, :, [2,1,0]].to(self.device) / 255.0 + inputs = inputs.sub_(self.MEAN).div_(self.STDV) + + # extract features + gmap = self.__feature_encoder(inputs) # [1, 128, gh, gw] + if mask is None: + mask = torch.zeros([gmap.shape[-2], gmap.shape[-1]]).to(gmap) + # if mask is not None: + # # bias = self.fnet.conv2.bias.detach().clone().half() + # # gmap[:,:,mask>0.0] = bias[:, None].repeat(1, (mask>0.0).sum()) + # gmap[:,:,mask>0.0] = 0 + + ### always add first frame to the depth video ### + if self.video.counter.value == 0: + net, inp = self.__context_encoder(inputs[:,[0]]) + self.net, self.inp, self.fmap = net, inp, gmap + self.video.append(tstamp, image[0], Id, 1.0, depth, intrinsics / 8.0, gmap, net[0,0], inp[0,0], mask) + # msk: torch.Size([64, 48]) + # gmap: torch.Size([1, 128, 64, 48]) + # net: torch.Size([1, 128, 64, 48]) + # inp: torch.Size([1, 128, 64, 48]) + + ### only add new frame if there is enough motion ### + else: + # index correlation volume + coords0 = pops.coords_grid(ht, wd, device=self.device)[None,None] + corr = CorrBlock(self.fmap[None,[0]], gmap[None,[0]])(coords0) + + # approximate flow magnitude using 1 update iteration + _, delta, weight = self.update(self.net[None], self.inp[None], corr) + + # check motion magnitue / add new frame to video + if delta.norm(dim=-1).mean().item() > self.thresh: + self.count = 0 + net, inp = self.__context_encoder(inputs[:,[0]]) + self.net, self.inp, self.fmap = net, inp, gmap + self.video.append(tstamp, image[0], None, None, depth, intrinsics / 8.0, gmap, net[0], inp[0], mask) + + else: + self.count += 1 diff --git a/thirdparty/DROID-SLAM/droid_slam/trajectory_filler.py b/thirdparty/DROID-SLAM/droid_slam/trajectory_filler.py new file mode 100644 index 0000000000000000000000000000000000000000..1ec197e8a95106eeddead995ef3953d1a1458e6c --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/trajectory_filler.py @@ -0,0 +1,112 @@ +import cv2 +import torch +import lietorch + +from lietorch import SE3 +from collections import OrderedDict +from factor_graph import FactorGraph +from droid_net import DroidNet +import geom.projective_ops as pops + + +class PoseTrajectoryFiller: + """ This class is used to fill in non-keyframe poses """ + + def __init__(self, net, video, device="cuda:0"): + + # split net modules + self.cnet = net.cnet + self.fnet = net.fnet + self.update = net.update + + self.count = 0 + self.video = video + self.device = device + + # mean, std for image normalization + self.MEAN = torch.as_tensor([0.485, 0.456, 0.406], device=self.device)[:, None, None] + self.STDV = torch.as_tensor([0.229, 0.224, 0.225], device=self.device)[:, None, None] + + @torch.cuda.amp.autocast(enabled=True) + def __feature_encoder(self, image): + """ features for correlation volume """ + return self.fnet(image) + + def __fill(self, tstamps, images, intrinsics): + """ fill operator """ + + tt = torch.as_tensor(tstamps, device="cuda") + images = torch.stack(images, 0) + intrinsics = torch.stack(intrinsics, 0) + inputs = images[:,:,[2,1,0]].to(self.device) / 255.0 + + ### linear pose interpolation ### + N = self.video.counter.value # number of keyframes + M = len(tstamps) # 16 frames to fill + + ts = self.video.tstamp[:N] # tstamp of keyframes + Ps = SE3(self.video.poses[:N]) # pose of keyframes + + t0 = torch.as_tensor([ts[ts<=t].shape[0] - 1 for t in tstamps]) + t1 = torch.where(t0 0: + pose_list += self.__fill(tstamps, images, intrinsics) + + # stitch pose segments together + return lietorch.cat(pose_list, 0) + diff --git a/thirdparty/DROID-SLAM/droid_slam/vis_headless.py b/thirdparty/DROID-SLAM/droid_slam/vis_headless.py new file mode 100644 index 0000000000000000000000000000000000000000..4595470ff225eb24e93bae3b77c2a36b3f4b94df --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/vis_headless.py @@ -0,0 +1,185 @@ +import torch +import cv2 +import lietorch +import droid_backends +import time +import argparse +import numpy as np + +# import os +# os.environ['PYOPENGL_PLATFORM'] = 'egl' +#os.environ['PYOPENGL_PLATFORM'] = 'osmesa' +import open3d as o3d + +# o3d.visualization.webrtc_server.enable_webrtc() + +from lietorch import SE3 +import geom.projective_ops as pops + + +CAM_POINTS = np.array([ + [ 0, 0, 0], + [-1, -1, 1.5], + [ 1, -1, 1.5], + [ 1, 1, 1.5], + [-1, 1, 1.5], + [-0.5, 1, 1.5], + [ 0.5, 1, 1.5], + [ 0, 1.2, 1.5]]) + +CAM_LINES = np.array([ + [1,2], [2,3], [3,4], [4,1], [1,0], [0,2], [3,0], [0,4], [5,7], [7,6]]) + +def white_balance(img): + # from https://stackoverflow.com/questions/46390779/automatic-white-balancing-with-grayworld-assumption + result = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) + avg_a = np.average(result[:, :, 1]) + avg_b = np.average(result[:, :, 2]) + result[:, :, 1] = result[:, :, 1] - ((avg_a - 128) * (result[:, :, 0] / 255.0) * 1.1) + result[:, :, 2] = result[:, :, 2] - ((avg_b - 128) * (result[:, :, 0] / 255.0) * 1.1) + result = cv2.cvtColor(result, cv2.COLOR_LAB2BGR) + return result + + +def create_camera_actor(g, scale=0.05): + """ build open3d camera polydata """ + camera_actor = o3d.geometry.LineSet( + points=o3d.utility.Vector3dVector(scale * CAM_POINTS), + lines=o3d.utility.Vector2iVector(CAM_LINES)) + + color = (g * 1.0, 0.5 * (1-g), 0.9 * (1-g)) + camera_actor.paint_uniform_color(color) + return camera_actor + + +def create_point_actor(points, colors): + """ open3d point cloud from numpy array """ + point_cloud = o3d.geometry.PointCloud() + point_cloud.points = o3d.utility.Vector3dVector(points) + point_cloud.colors = o3d.utility.Vector3dVector(colors) + return point_cloud + + +def droid_visualization(video, save_path, device="cuda:0"): + """ DROID visualization frontend """ + + torch.cuda.set_device(0) + droid_visualization.video = video + droid_visualization.cameras = {} + droid_visualization.points = {} + droid_visualization.warmup = 8 + droid_visualization.scale = 1.0 + droid_visualization.ix = 0 + print("headless droid_visualization") + + + droid_visualization.filter_thresh = 0.3 #0.005 + + def increase_filter(vis): + droid_visualization.filter_thresh *= 2 + with droid_visualization.video.get_lock(): + droid_visualization.video.dirty[:droid_visualization.video.counter.value] = True + + def decrease_filter(vis): + droid_visualization.filter_thresh *= 0.5 + with droid_visualization.video.get_lock(): + droid_visualization.video.dirty[:droid_visualization.video.counter.value] = True + + def animation_callback(vis): + cam = vis.get_view_control().convert_to_pinhole_camera_parameters() + + with torch.no_grad(): + + with video.get_lock(): + t = video.counter.value + dirty_index, = torch.where(video.dirty.clone()) + dirty_index = dirty_index + + if len(dirty_index) == 0: + return + + video.dirty[dirty_index] = False + + # convert poses to 4x4 matrix + poses = torch.index_select(video.poses, 0, dirty_index) + disps = torch.index_select(video.disps, 0, dirty_index) + Ps = SE3(poses).inv().matrix().cpu().numpy() + + images = torch.index_select(video.images, 0, dirty_index) + images = images.cpu()[:,[2,1,0],3::8,3::8].permute(0,2,3,1) / 255.0 + points = droid_backends.iproj(SE3(poses).inv().data, disps, video.intrinsics[0]).cpu() + + thresh = droid_visualization.filter_thresh * torch.ones_like(disps.mean(dim=[1,2])) + + count = droid_backends.depth_filter( + video.poses, video.disps, video.intrinsics[0], dirty_index, thresh) + + count = count.cpu() + disps = disps.cpu() + masks = ((count >= 2) & (disps > .5*disps.mean(dim=[1,2], keepdim=True))) + + for i in range(len(dirty_index)): + pose = Ps[i] + ix = dirty_index[i].item() + + if ix in droid_visualization.cameras: + vis.remove_geometry(droid_visualization.cameras[ix]) + del droid_visualization.cameras[ix] + + if ix in droid_visualization.points: + vis.remove_geometry(droid_visualization.points[ix]) + del droid_visualization.points[ix] + + ### add camera actor ### + cam_actor = create_camera_actor(True) + cam_actor.transform(pose) + vis.add_geometry(cam_actor) + droid_visualization.cameras[ix] = cam_actor + + + mask = masks[i].reshape(-1) + pts = points[i].reshape(-1, 3)[mask].cpu().numpy() + clr = images[i].reshape(-1, 3)[mask].cpu().numpy() + + ## add point actor ### + point_actor = create_point_actor(pts, clr) + vis.add_geometry(point_actor) + droid_visualization.points[ix] = point_actor + + ### Hack to save Point Cloud Data and Camnera results ### + + # Save points + pcd_points = o3d.geometry.PointCloud() + for p in droid_visualization.points.items(): + pcd_points += p[1] + o3d.io.write_point_cloud(f"{save_path}/points.ply", pcd_points, write_ascii=False) + + # Save pose + pcd_camera = create_camera_actor(True) + for c in droid_visualization.cameras.items(): + pcd_camera += c[1] + + o3d.io.write_line_set(f"{save_path}/camera.ply", pcd_camera, write_ascii=False) + + ### end ### + + # hack to allow interacting with vizualization during inference + if len(droid_visualization.cameras) >= droid_visualization.warmup: + cam = vis.get_view_control().convert_from_pinhole_camera_parameters(cam) + + droid_visualization.ix += 1 + vis.poll_events() + vis.update_renderer() + + ### create Open3D visualization ### + vis = o3d.visualization.VisualizerWithKeyCallback() + vis.register_animation_callback(animation_callback) + vis.register_key_callback(ord("S"), increase_filter) + vis.register_key_callback(ord("A"), decrease_filter) + + vis.create_window(height=540, width=960) + # vis.create_window(height=512, width=384) + vis.get_render_option().load_from_json("thirdparty/DROID-SLAM//misc/renderoption.json") + + vis.run() + vis.destroy_window() diff --git a/thirdparty/DROID-SLAM/droid_slam/visualization.py b/thirdparty/DROID-SLAM/droid_slam/visualization.py new file mode 100644 index 0000000000000000000000000000000000000000..208ad32debe055fdba7f325ee9c97bd6def40e91 --- /dev/null +++ b/thirdparty/DROID-SLAM/droid_slam/visualization.py @@ -0,0 +1,189 @@ +import torch +import cv2 +import lietorch +import droid_backends +import time +import argparse +import numpy as np +import open3d as o3d + +from lietorch import SE3 +import geom.projective_ops as pops + +CAM_POINTS = np.array([ + [ 0, 0, 0], + [-1, -1, 1.5], + [ 1, -1, 1.5], + [ 1, 1, 1.5], + [-1, 1, 1.5], + [-0.5, 1, 1.5], + [ 0.5, 1, 1.5], + [ 0, 1.2, 1.5]]) + +CAM_LINES = np.array([ + [1,2], [2,3], [3,4], [4,1], [1,0], [0,2], [3,0], [0,4], [5,7], [7,6]]) + +def white_balance(img): + # from https://stackoverflow.com/questions/46390779/automatic-white-balancing-with-grayworld-assumption + result = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) + avg_a = np.average(result[:, :, 1]) + avg_b = np.average(result[:, :, 2]) + result[:, :, 1] = result[:, :, 1] - ((avg_a - 128) * (result[:, :, 0] / 255.0) * 1.1) + result[:, :, 2] = result[:, :, 2] - ((avg_b - 128) * (result[:, :, 0] / 255.0) * 1.1) + result = cv2.cvtColor(result, cv2.COLOR_LAB2BGR) + return result + +def create_camera_actor(g, scale=0.05): + """ build open3d camera polydata """ + camera_actor = o3d.geometry.LineSet( + points=o3d.utility.Vector3dVector(scale * CAM_POINTS), + lines=o3d.utility.Vector2iVector(CAM_LINES)) + + color = (g * 1.0, 0.5 * (1-g), 0.9 * (1-g)) + camera_actor.paint_uniform_color(color) + return camera_actor + +def create_point_actor(points, colors): + """ open3d point cloud from numpy array """ + point_cloud = o3d.geometry.PointCloud() + point_cloud.points = o3d.utility.Vector3dVector(points) + point_cloud.colors = o3d.utility.Vector3dVector(colors) + return point_cloud + +def droid_visualization(video, device="cuda:0"): + """ DROID visualization frontend """ + + torch.cuda.set_device(device) + droid_visualization.video = video + droid_visualization.cameras = {} + droid_visualization.points = {} + droid_visualization.warmup = 8 + droid_visualization.scale = 1.0 + droid_visualization.ix = 0 + + droid_visualization.filter_thresh = 0.005 + + def increase_filter(vis): + droid_visualization.filter_thresh *= 2 + with droid_visualization.video.get_lock(): + droid_visualization.video.dirty[:droid_visualization.video.counter.value] = True + + def decrease_filter(vis): + droid_visualization.filter_thresh *= 0.5 + with droid_visualization.video.get_lock(): + droid_visualization.video.dirty[:droid_visualization.video.counter.value] = True + + #file dialog based pointcloud export added# + def export_pointcloud(vis): + gui.Application.instance.initialize() + window = gui.Application.instance.create_window("Export", 350, 600) + + def _on_filedlg_cancel(): + window.close_dialog() + window.close() + gui.Application.instance.quit() + + def _on_filedlg_done(path): + pcd_export(path) + window.close_dialog() + gui.Application.instance.quit() + + def exec_file_dialog(): + filedlg = gui.FileDialog(gui.FileDialog.SAVE, "Select file", window.theme) + + filedlg.add_filter(".ply .xyz .pcd", "PointCloud (.xyz .ply .pcd)") + filedlg.add_filter("", "All files") + filedlg.set_on_cancel(_on_filedlg_cancel) + filedlg.set_on_done(_on_filedlg_done) + window.show_dialog(filedlg) + + def pcd_export(path): + print("\nExporting pointcloud as", path) + final_pcd = o3d.geometry.PointCloud() + for p in droid_visualization.points.items(): + final_pcd += p[1] + + o3d.io.write_point_cloud(path, final_pcd, write_ascii=False) + #vis.capture_depth_point_cloud("/home/bertuser/droidslam_export.ply") + + exec_file_dialog() + + def animation_callback(vis): + cam = vis.get_view_control().convert_to_pinhole_camera_parameters() + + with torch.no_grad(): + + with video.get_lock(): + t = video.counter.value + dirty_index, = torch.where(video.dirty.clone()) + dirty_index = dirty_index + + if len(dirty_index) == 0: + return + + video.dirty[dirty_index] = False + + # convert poses to 4x4 matrix + poses = torch.index_select(video.poses, 0, dirty_index) + disps = torch.index_select(video.disps, 0, dirty_index) + Ps = SE3(poses).inv().matrix().cpu().numpy() + + images = torch.index_select(video.images, 0, dirty_index) + images = images.cpu()[:,[2,1,0],3::8,3::8].permute(0,2,3,1) / 255.0 + points = droid_backends.iproj(SE3(poses).inv().data, disps, video.intrinsics[0]).cpu() + + thresh = droid_visualization.filter_thresh * torch.ones_like(disps.mean(dim=[1,2])) + + count = droid_backends.depth_filter( + video.poses, video.disps, video.intrinsics[0], dirty_index, thresh) + + count = count.cpu() + disps = disps.cpu() + masks = ((count >= 2) & (disps > .5*disps.mean(dim=[1,2], keepdim=True))) + + for i in range(len(dirty_index)): + pose = Ps[i] + ix = dirty_index[i].item() + + if ix in droid_visualization.cameras: + vis.remove_geometry(droid_visualization.cameras[ix]) + del droid_visualization.cameras[ix] + + if ix in droid_visualization.points: + vis.remove_geometry(droid_visualization.points[ix]) + del droid_visualization.points[ix] + + ### add camera actor ### + cam_actor = create_camera_actor(True) + cam_actor.transform(pose) + vis.add_geometry(cam_actor) + droid_visualization.cameras[ix] = cam_actor + + mask = masks[i].reshape(-1) + pts = points[i].reshape(-1, 3)[mask].cpu().numpy() + clr = images[i].reshape(-1, 3)[mask].cpu().numpy() + + ## add point actor ### + point_actor = create_point_actor(pts, clr) + vis.add_geometry(point_actor) + droid_visualization.points[ix] = point_actor + + # hack to allow interacting with vizualization during inference + if len(droid_visualization.cameras) >= droid_visualization.warmup: + cam = vis.get_view_control().convert_from_pinhole_camera_parameters(cam) + + droid_visualization.ix += 1 + vis.poll_events() + vis.update_renderer() + + ### create Open3D visualization ### + vis = o3d.visualization.VisualizerWithKeyCallback() + vis.register_animation_callback(animation_callback) + vis.register_key_callback(ord("S"), increase_filter) + vis.register_key_callback(ord("A"), decrease_filter) + + vis.create_window(height=540, width=960) + vis.get_render_option().load_from_json("misc/renderoption.json") + + vis.run() + vis.destroy_window() diff --git a/thirdparty/DROID-SLAM/environment.yaml b/thirdparty/DROID-SLAM/environment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f92153acc588328bd9784dc329febe863cf68b88 --- /dev/null +++ b/thirdparty/DROID-SLAM/environment.yaml @@ -0,0 +1,22 @@ +name: droidenv +channels: + - rusty1s + - pytorch + - open3d-admin + - nvidia + - conda-forge + - defaults +dependencies: + - pytorch-scatter + - torchaudio + - torchvision + - open3d + - pytorch=1.10 + - cudatoolkit=11.3 + - tensorboard + - scipy + - opencv + - tqdm + - suitesparse + - matplotlib + - pyyaml diff --git a/thirdparty/DROID-SLAM/environment_novis.yaml b/thirdparty/DROID-SLAM/environment_novis.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e6653f5eccc005894f3c29f35c28f085b9b05d68 --- /dev/null +++ b/thirdparty/DROID-SLAM/environment_novis.yaml @@ -0,0 +1,20 @@ +name: droidenv +channels: + - rusty1s + - pytorch + - nvidia + - conda-forge + - defaults +dependencies: + - pytorch-scatter + - torchaudio + - torchvision + - pytorch=1.10 + - cudatoolkit=11.3 + - tensorboard + - scipy + - opencv + - tqdm + - suitesparse + - matplotlib + - pyyaml diff --git a/thirdparty/DROID-SLAM/evaluation_scripts/test_eth3d.py b/thirdparty/DROID-SLAM/evaluation_scripts/test_eth3d.py new file mode 100644 index 0000000000000000000000000000000000000000..3e9e3186a8c583bdaf8e98b1f49ecb9a0bf67fa8 --- /dev/null +++ b/thirdparty/DROID-SLAM/evaluation_scripts/test_eth3d.py @@ -0,0 +1,134 @@ +import sys +sys.path.append('droid_slam') + +from tqdm import tqdm +import numpy as np +import torch +import lietorch +import cv2 +import os +import glob +import time +import argparse + +import torch.nn.functional as F +from droid import Droid + +import matplotlib.pyplot as plt + + +def show_image(image): + image = image.permute(1, 2, 0).cpu().numpy() + cv2.imshow('image', image / 255.0) + cv2.waitKey(1) + +def image_stream(datapath, use_depth=False, stride=1): + """ image generator """ + + fx, fy, cx, cy = np.loadtxt(os.path.join(datapath, 'calibration.txt')).tolist() + image_list = sorted(glob.glob(os.path.join(datapath, 'rgb', '*.png')))[::stride] + depth_list = sorted(glob.glob(os.path.join(datapath, 'depth', '*.png')))[::stride] + + for t, (image_file, depth_file) in enumerate(zip(image_list, depth_list)): + image = cv2.imread(image_file) + depth = cv2.imread(depth_file, cv2.IMREAD_ANYDEPTH) / 5000.0 + + h0, w0, _ = image.shape + h1 = int(h0 * np.sqrt((384 * 512) / (h0 * w0))) + w1 = int(w0 * np.sqrt((384 * 512) / (h0 * w0))) + + image = cv2.resize(image, (w1, h1)) + image = image[:h1-h1%8, :w1-w1%8] + image = torch.as_tensor(image).permute(2, 0, 1) + + depth = torch.as_tensor(depth) + depth = F.interpolate(depth[None,None], (h1, w1)).squeeze() + depth = depth[:h1-h1%8, :w1-w1%8] + + intrinsics = torch.as_tensor([fx, fy, cx, cy]) + intrinsics[0::2] *= (w1 / w0) + intrinsics[1::2] *= (h1 / h0) + + if use_depth: + yield t, image[None], depth, intrinsics + + else: + yield t, image[None], intrinsics + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--datapath") + parser.add_argument("--weights", default="droid.pth") + parser.add_argument("--buffer", type=int, default=1024) + parser.add_argument("--image_size", default=[240, 320]) + parser.add_argument("--disable_vis", action="store_true") + + parser.add_argument("--beta", type=float, default=0.5) + parser.add_argument("--filter_thresh", type=float, default=2.0) + parser.add_argument("--warmup", type=int, default=8) + parser.add_argument("--keyframe_thresh", type=float, default=3.5) + parser.add_argument("--frontend_thresh", type=float, default=16.0) + parser.add_argument("--frontend_window", type=int, default=16) + parser.add_argument("--frontend_radius", type=int, default=1) + parser.add_argument("--frontend_nms", type=int, default=0) + + parser.add_argument("--stereo", action="store_true") + parser.add_argument("--depth", action="store_true") + + parser.add_argument("--backend_thresh", type=float, default=22.0) + parser.add_argument("--backend_radius", type=int, default=2) + parser.add_argument("--backend_nms", type=int, default=3) + args = parser.parse_args() + + torch.multiprocessing.set_start_method('spawn') + + print("Running evaluation on {}".format(args.datapath)) + print(args) + + # this can usually be set to 2-3 except for "camera_shake" scenes + # set to 2 for test scenes + stride = 1 + + tstamps = [] + for (t, image, depth, intrinsics) in tqdm(image_stream(args.datapath, use_depth=True, stride=stride)): + if not args.disable_vis: + show_image(image[0]) + + if t == 0: + args.image_size = [image.shape[2], image.shape[3]] + droid = Droid(args) + + droid.track(t, image, depth, intrinsics=intrinsics) + + traj_est = droid.terminate(image_stream(args.datapath, use_depth=False, stride=stride)) + + ### run evaluation ### + + print("#"*20 + " Results...") + + import evo + from evo.core.trajectory import PoseTrajectory3D + from evo.tools import file_interface + from evo.core import sync + import evo.main_ape as main_ape + from evo.core.metrics import PoseRelation + + image_path = os.path.join(args.datapath, 'rgb') + images_list = sorted(glob.glob(os.path.join(image_path, '*.png')))[::stride] + tstamps = [float(x.split('/')[-1][:-4]) for x in images_list] + + traj_est = PoseTrajectory3D( + positions_xyz=traj_est[:,:3], + orientations_quat_wxyz=traj_est[:,3:], + timestamps=np.array(tstamps)) + + gt_file = os.path.join(args.datapath, 'groundtruth.txt') + traj_ref = file_interface.read_tum_trajectory_file(gt_file) + + traj_ref, traj_est = sync.associate_trajectories(traj_ref, traj_est) + + result = main_ape.ape(traj_ref, traj_est, est_name='traj', + pose_relation=PoseRelation.translation_part, align=True, correct_scale=False) + + print(result.stats) + diff --git a/thirdparty/DROID-SLAM/evaluation_scripts/test_euroc.py b/thirdparty/DROID-SLAM/evaluation_scripts/test_euroc.py new file mode 100644 index 0000000000000000000000000000000000000000..d0214f5f209a5c103732114ea2f012e0454f5775 --- /dev/null +++ b/thirdparty/DROID-SLAM/evaluation_scripts/test_euroc.py @@ -0,0 +1,142 @@ +import sys +sys.path.append('droid_slam') + +from tqdm import tqdm +import numpy as np +import torch +import lietorch +import cv2 +import os +import glob +import time +import argparse + +from torch.multiprocessing import Process +from droid import Droid + +import torch.nn.functional as F + + + +def show_image(image): + image = image.permute(1, 2, 0).cpu().numpy() + cv2.imshow('image', image / 255.0) + cv2.waitKey(1) + +def image_stream(datapath, image_size=[320, 512], stereo=False, stride=1): + """ image generator """ + + K_l = np.array([458.654, 0.0, 367.215, 0.0, 457.296, 248.375, 0.0, 0.0, 1.0]).reshape(3,3) + d_l = np.array([-0.28340811, 0.07395907, 0.00019359, 1.76187114e-05, 0.0]) + R_l = np.array([ + 0.999966347530033, -0.001422739138722922, 0.008079580483432283, + 0.001365741834644127, 0.9999741760894847, 0.007055629199258132, + -0.008089410156878961, -0.007044357138835809, 0.9999424675829176 + ]).reshape(3,3) + + P_l = np.array([435.2046959714599, 0, 367.4517211914062, 0, 0, 435.2046959714599, 252.2008514404297, 0, 0, 0, 1, 0]).reshape(3,4) + map_l = cv2.initUndistortRectifyMap(K_l, d_l, R_l, P_l[:3,:3], (752, 480), cv2.CV_32F) + + K_r = np.array([457.587, 0.0, 379.999, 0.0, 456.134, 255.238, 0.0, 0.0, 1]).reshape(3,3) + d_r = np.array([-0.28368365, 0.07451284, -0.00010473, -3.555907e-05, 0.0]).reshape(5) + R_r = np.array([ + 0.9999633526194376, -0.003625811871560086, 0.007755443660172947, + 0.003680398547259526, 0.9999684752771629, -0.007035845251224894, + -0.007729688520722713, 0.007064130529506649, 0.999945173484644 + ]).reshape(3,3) + + P_r = np.array([435.2046959714599, 0, 367.4517211914062, -47.90639384423901, 0, 435.2046959714599, 252.2008514404297, 0, 0, 0, 1, 0]).reshape(3,4) + map_r = cv2.initUndistortRectifyMap(K_r, d_r, R_r, P_r[:3,:3], (752, 480), cv2.CV_32F) + + intrinsics_vec = [435.2046959714599, 435.2046959714599, 367.4517211914062, 252.2008514404297] + ht0, wd0 = [480, 752] + + # read all png images in folder + images_left = sorted(glob.glob(os.path.join(datapath, 'mav0/cam0/data/*.png')))[::stride] + images_right = [x.replace('cam0', 'cam1') for x in images_left] + + for t, (imgL, imgR) in enumerate(zip(images_left, images_right)): + if stereo and not os.path.isfile(imgR): + continue + tstamp = float(imgL.split('/')[-1][:-4]) + images = [cv2.remap(cv2.imread(imgL), map_l[0], map_l[1], interpolation=cv2.INTER_LINEAR)] + if stereo: + images += [cv2.remap(cv2.imread(imgR), map_r[0], map_r[1], interpolation=cv2.INTER_LINEAR)] + + images = torch.from_numpy(np.stack(images, 0)) + images = images.permute(0, 3, 1, 2).to("cuda:0", dtype=torch.float32) + images = F.interpolate(images, image_size, mode="bilinear", align_corners=False) + + intrinsics = torch.as_tensor(intrinsics_vec).cuda() + intrinsics[0] *= image_size[1] / wd0 + intrinsics[1] *= image_size[0] / ht0 + intrinsics[2] *= image_size[1] / wd0 + intrinsics[3] *= image_size[0] / ht0 + + yield stride*t, images, intrinsics + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--datapath", help="path to euroc sequence") + parser.add_argument("--gt", help="path to gt file") + parser.add_argument("--weights", default="droid.pth") + parser.add_argument("--buffer", type=int, default=512) + parser.add_argument("--image_size", default=[320,512]) + parser.add_argument("--disable_vis", action="store_true") + parser.add_argument("--stereo", action="store_true") + + parser.add_argument("--beta", type=float, default=0.3) + parser.add_argument("--filter_thresh", type=float, default=2.4) + parser.add_argument("--warmup", type=int, default=15) + parser.add_argument("--keyframe_thresh", type=float, default=3.5) + parser.add_argument("--frontend_thresh", type=float, default=17.5) + parser.add_argument("--frontend_window", type=int, default=20) + parser.add_argument("--frontend_radius", type=int, default=2) + parser.add_argument("--frontend_nms", type=int, default=1) + + parser.add_argument("--backend_thresh", type=float, default=24.0) + parser.add_argument("--backend_radius", type=int, default=2) + parser.add_argument("--backend_nms", type=int, default=2) + args = parser.parse_args() + + torch.multiprocessing.set_start_method('spawn') + + print("Running evaluation on {}".format(args.datapath)) + print(args) + + droid = Droid(args) + time.sleep(5) + + for (t, image, intrinsics) in tqdm(image_stream(args.datapath, stereo=args.stereo, stride=2)): + droid.track(t, image, intrinsics=intrinsics) + + traj_est = droid.terminate(image_stream(args.datapath, stride=1)) + + ### run evaluation ### + + import evo + from evo.core.trajectory import PoseTrajectory3D + from evo.tools import file_interface + from evo.core import sync + import evo.main_ape as main_ape + from evo.core.metrics import PoseRelation + + images_list = sorted(glob.glob(os.path.join(args.datapath, 'mav0/cam0/data/*.png'))) + tstamps = [float(x.split('/')[-1][:-4]) for x in images_list] + + traj_est = PoseTrajectory3D( + positions_xyz=1.10 * traj_est[:,:3], + orientations_quat_wxyz=traj_est[:,3:], + timestamps=np.array(tstamps)) + + traj_ref = file_interface.read_tum_trajectory_file(args.gt) + + traj_ref, traj_est = sync.associate_trajectories(traj_ref, traj_est) + + result = main_ape.ape(traj_ref, traj_est, est_name='traj', + pose_relation=PoseRelation.translation_part, align=True, correct_scale=True) + + print(result) + + diff --git a/thirdparty/DROID-SLAM/evaluation_scripts/test_tum.py b/thirdparty/DROID-SLAM/evaluation_scripts/test_tum.py new file mode 100644 index 0000000000000000000000000000000000000000..6c207afc92ed7b5f66e8c14d2eb1876b505c246d --- /dev/null +++ b/thirdparty/DROID-SLAM/evaluation_scripts/test_tum.py @@ -0,0 +1,123 @@ +import sys +sys.path.append('droid_slam') + +from tqdm import tqdm +import numpy as np +import torch +import lietorch +import cv2 +import os +import glob +import time +import argparse + +import torch.nn.functional as F +from droid import Droid + + +def show_image(image): + image = image.permute(1, 2, 0).cpu().numpy() + cv2.imshow('image', image / 255.0) + cv2.waitKey(1) + +def image_stream(datapath, image_size=[320, 512]): + """ image generator """ + + fx, fy, cx, cy = 517.3, 516.5, 318.6, 255.3 + + K_l = np.array([fx, 0.0, cx, 0.0, fy, cy, 0.0, 0.0, 1.0]).reshape(3,3) + d_l = np.array([0.2624, -0.9531, -0.0054, 0.0026, 1.1633]) + + # read all png images in folder + images_list = sorted(glob.glob(os.path.join(datapath, 'rgb', '*.png')))[::2] + + for t, imfile in enumerate(images_list): + image = cv2.imread(imfile) + ht0, wd0, _ = image.shape + image = cv2.undistort(image, K_l, d_l) + image = cv2.resize(image, (320+32, 240+16)) + image = torch.from_numpy(image).permute(2,0,1) + + intrinsics = torch.as_tensor([fx, fy, cx, cy]).cuda() + intrinsics[0] *= image.shape[2] / 640.0 + intrinsics[1] *= image.shape[1] / 480.0 + intrinsics[2] *= image.shape[2] / 640.0 + intrinsics[3] *= image.shape[1] / 480.0 + + # crop image to remove distortion boundary + intrinsics[2] -= 16 + intrinsics[3] -= 8 + image = image[:, 8:-8, 16:-16] + + yield t, image[None], intrinsics + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--datapath") + parser.add_argument("--weights", default="droid.pth") + parser.add_argument("--buffer", type=int, default=512) + parser.add_argument("--image_size", default=[240, 320]) + parser.add_argument("--disable_vis", action="store_true") + + parser.add_argument("--beta", type=float, default=0.6) + parser.add_argument("--filter_thresh", type=float, default=1.75) + parser.add_argument("--warmup", type=int, default=12) + parser.add_argument("--keyframe_thresh", type=float, default=2.25) + parser.add_argument("--frontend_thresh", type=float, default=12.0) + parser.add_argument("--frontend_window", type=int, default=25) + parser.add_argument("--frontend_radius", type=int, default=2) + parser.add_argument("--frontend_nms", type=int, default=1) + + parser.add_argument("--backend_thresh", type=float, default=15.0) + parser.add_argument("--backend_radius", type=int, default=2) + parser.add_argument("--backend_nms", type=int, default=3) + args = parser.parse_args() + + args.stereo = False + torch.multiprocessing.set_start_method('spawn') + + print("Running evaluation on {}".format(args.datapath)) + print(args) + + droid = Droid(args) + time.sleep(5) + + tstamps = [] + for (t, image, intrinsics) in tqdm(image_stream(args.datapath)): + if not args.disable_vis: + show_image(image) + droid.track(t, image, intrinsics=intrinsics) + + + traj_est = droid.terminate(image_stream(args.datapath)) + + ### run evaluation ### + + print("#"*20 + " Results...") + + import evo + from evo.core.trajectory import PoseTrajectory3D + from evo.tools import file_interface + from evo.core import sync + import evo.main_ape as main_ape + from evo.core.metrics import PoseRelation + + image_path = os.path.join(args.datapath, 'rgb') + images_list = sorted(glob.glob(os.path.join(image_path, '*.png')))[::2] + tstamps = [float(x.split('/')[-1][:-4]) for x in images_list] + + traj_est = PoseTrajectory3D( + positions_xyz=traj_est[:,:3], + orientations_quat_wxyz=traj_est[:,3:], + timestamps=np.array(tstamps)) + + gt_file = os.path.join(args.datapath, 'groundtruth.txt') + traj_ref = file_interface.read_tum_trajectory_file(gt_file) + + traj_ref, traj_est = sync.associate_trajectories(traj_ref, traj_est) + result = main_ape.ape(traj_ref, traj_est, est_name='traj', + pose_relation=PoseRelation.translation_part, align=True, correct_scale=True) + + + print(result) + diff --git a/thirdparty/DROID-SLAM/evaluation_scripts/validate_tartanair.py b/thirdparty/DROID-SLAM/evaluation_scripts/validate_tartanair.py new file mode 100644 index 0000000000000000000000000000000000000000..3424bdf368a2b131f2a8dc15c839e0e69482517d --- /dev/null +++ b/thirdparty/DROID-SLAM/evaluation_scripts/validate_tartanair.py @@ -0,0 +1,115 @@ +import sys +sys.path.append('droid_slam') +sys.path.append('thirdparty/tartanair_tools') + +from tqdm import tqdm +import numpy as np +import torch +import lietorch +import cv2 +import os +import glob +import time +import yaml +import argparse + +from droid import Droid + +def image_stream(datapath, image_size=[384, 512], intrinsics_vec=[320.0, 320.0, 320.0, 240.0], stereo=False): + """ image generator """ + + # read all png images in folder + ht0, wd0 = [480, 640] + images_left = sorted(glob.glob(os.path.join(datapath, 'image_left/*.png'))) + images_right = sorted(glob.glob(os.path.join(datapath, 'image_right/*.png'))) + + data = [] + for t in range(len(images_left)): + images = [ cv2.resize(cv2.imread(images_left[t]), (image_size[1], image_size[0])) ] + if stereo: + images += [ cv2.resize(cv2.imread(images_right[t]), (image_size[1], image_size[0])) ] + + images = torch.from_numpy(np.stack(images, 0)).permute(0,3,1,2) + intrinsics = .8 * torch.as_tensor(intrinsics_vec) + + data.append((t, images, intrinsics)) + + return data + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--datapath", default="datasets/TartanAir") + parser.add_argument("--weights", default="droid.pth") + parser.add_argument("--buffer", type=int, default=1000) + parser.add_argument("--image_size", default=[384,512]) + parser.add_argument("--stereo", action="store_true") + parser.add_argument("--disable_vis", action="store_true") + parser.add_argument("--plot_curve", action="store_true") + parser.add_argument("--id", type=int, default=-1) + + parser.add_argument("--beta", type=float, default=0.3) + parser.add_argument("--filter_thresh", type=float, default=2.4) + parser.add_argument("--warmup", type=int, default=12) + parser.add_argument("--keyframe_thresh", type=float, default=3.5) + parser.add_argument("--frontend_thresh", type=float, default=15) + parser.add_argument("--frontend_window", type=int, default=20) + parser.add_argument("--frontend_radius", type=int, default=1) + parser.add_argument("--frontend_nms", type=int, default=1) + + parser.add_argument("--backend_thresh", type=float, default=20.0) + parser.add_argument("--backend_radius", type=int, default=2) + parser.add_argument("--backend_nms", type=int, default=3) + + args = parser.parse_args() + torch.multiprocessing.set_start_method('spawn') + + from data_readers.tartan import test_split + from evaluation.tartanair_evaluator import TartanAirEvaluator + + if not os.path.isdir("figures"): + os.mkdir("figures") + + if args.id >= 0: + test_split = [ test_split[args.id] ] + + ate_list = [] + for scene in test_split: + print("Performing evaluation on {}".format(scene)) + torch.cuda.empty_cache() + droid = Droid(args) + + scenedir = os.path.join(args.datapath, scene) + + for (tstamp, image, intrinsics) in tqdm(image_stream(scenedir, stereo=args.stereo)): + droid.track(tstamp, image, intrinsics=intrinsics) + + # fill in non-keyframe poses + global BA + traj_est = droid.terminate(image_stream(scenedir)) + + ### do evaluation ### + evaluator = TartanAirEvaluator() + gt_file = os.path.join(scenedir, "pose_left.txt") + traj_ref = np.loadtxt(gt_file, delimiter=' ')[:, [1, 2, 0, 4, 5, 3, 6]] # ned -> xyz + + # usually stereo should not be scale corrected, but we are comparing monocular and stereo here + results = evaluator.evaluate_one_trajectory( + traj_ref, traj_est, scale=True, title=scenedir[-20:].replace('/', '_')) + + print(results) + ate_list.append(results["ate_score"]) + + print("Results") + print(ate_list) + + if args.plot_curve: + import matplotlib.pyplot as plt + ate = np.array(ate_list) + xs = np.linspace(0.0, 1.0, 512) + ys = [np.count_nonzero(ate < t) / ate.shape[0] for t in xs] + + plt.plot(xs, ys) + plt.xlabel("ATE [m]") + plt.ylabel("% runs") + plt.show() + diff --git a/thirdparty/DROID-SLAM/misc/DROID.png b/thirdparty/DROID-SLAM/misc/DROID.png new file mode 100644 index 0000000000000000000000000000000000000000..79ddaedeaa512adafab8a26e24ca870c34880abf --- /dev/null +++ b/thirdparty/DROID-SLAM/misc/DROID.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99fb33606ad6ea92b4512bd843c65b5db654734d570cc5787df467df4c9d8faf +size 745238 diff --git a/thirdparty/DROID-SLAM/misc/renderoption.json b/thirdparty/DROID-SLAM/misc/renderoption.json new file mode 100644 index 0000000000000000000000000000000000000000..80d35c4b1ab67bae5197159fa3f0e1f8a72a3be8 --- /dev/null +++ b/thirdparty/DROID-SLAM/misc/renderoption.json @@ -0,0 +1,40 @@ +{ + "background_color" : [ 1, 1, 1 ], + "class_name" : "RenderOption", + "default_mesh_color" : [ 0.69999999999999996, 0.69999999999999996, 0.69999999999999996 ], + "image_max_depth" : 3000, + "image_stretch_option" : 0, + "interpolation_option" : 0, + "light0_color" : [ 1, 1, 1 ], + "light0_diffuse_power" : 20, + "light0_position" : [ 0, 0, 20 ], + "light0_specular_power" : 2.20000000000000001, + "light0_specular_shininess" : 100, + "light1_color" : [ 1, 1, 1 ], + "light1_diffuse_power" : 0.66000000000000003, + "light1_position" : [ 0, 0, 2 ], + "light1_specular_power" : 2.20000000000000001, + "light1_specular_shininess" : 100, + "light2_color" : [ 1, 1, 1 ], + "light2_diffuse_power" : 20, + "light2_position" : [ 0, 0, -20 ], + "light2_specular_power" : 2.20000000000000001, + "light2_specular_shininess" : 100, + "light3_color" : [ 1, 1, 1 ], + "light3_diffuse_power" : 20, + "light3_position" : [ 0, 0, -20 ], + "light3_specular_power" : 2.20000000000000001, + "light3_specular_shininess" : 100, + "light_ambient_color" : [ 0, 0, 0 ], + "light_on" : true, + "mesh_color_option" : 1, + "mesh_shade_option" : 0, + "mesh_show_back_face" : false, + "mesh_show_wireframe" : false, + "point_color_option" : 7, + "point_show_normal" : false, + "point_size" : 2, + "show_coordinate_frame" : false, + "version_major" : 1, + "version_minor" : 0 +} diff --git a/thirdparty/DROID-SLAM/misc/screenshot.png b/thirdparty/DROID-SLAM/misc/screenshot.png new file mode 100644 index 0000000000000000000000000000000000000000..fdac5be48846f3d4f7c31a5b3b5deff03a4834f7 --- /dev/null +++ b/thirdparty/DROID-SLAM/misc/screenshot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8bb7761f678a743bf6a5b8af137c9d624e44a7d0f1111acf602823278a0529a +size 255827 diff --git a/thirdparty/DROID-SLAM/setup.py b/thirdparty/DROID-SLAM/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..9af97d49c65bd17ce42f19acd3819ee0e8f33834 --- /dev/null +++ b/thirdparty/DROID-SLAM/setup.py @@ -0,0 +1,61 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +import os.path as osp +ROOT = osp.dirname(osp.abspath(__file__)) + +setup( + name='droid_backends', + ext_modules=[ + CUDAExtension('droid_backends', + include_dirs=[osp.join(ROOT, 'thirdparty/eigen')], + sources=[ + 'src/droid.cpp', + 'src/droid_kernels.cu', + 'src/correlation_kernels.cu', + 'src/altcorr_kernel.cu', + ], + extra_compile_args={ + 'cxx': ['-O3'], + 'nvcc': ['-O3', + '-gencode=arch=compute_60,code=sm_60', + '-gencode=arch=compute_61,code=sm_61', + '-gencode=arch=compute_70,code=sm_70', + '-gencode=arch=compute_75,code=sm_75', + '-gencode=arch=compute_80,code=sm_80', + '-gencode=arch=compute_86,code=sm_86', + ] + }), + ], + cmdclass={ 'build_ext' : BuildExtension } +) + +setup( + name='lietorch', + version='0.2', + description='Lie Groups for PyTorch', + packages=['lietorch'], + package_dir={'': 'thirdparty/lietorch'}, + ext_modules=[ + CUDAExtension('lietorch_backends', + include_dirs=[ + osp.join(ROOT, 'thirdparty/lietorch/lietorch/include'), + osp.join(ROOT, 'thirdparty/eigen')], + sources=[ + 'thirdparty/lietorch/lietorch/src/lietorch.cpp', + 'thirdparty/lietorch/lietorch/src/lietorch_gpu.cu', + 'thirdparty/lietorch/lietorch/src/lietorch_cpu.cpp'], + extra_compile_args={ + 'cxx': ['-O2'], + 'nvcc': ['-O2', + '-gencode=arch=compute_60,code=sm_60', + '-gencode=arch=compute_61,code=sm_61', + '-gencode=arch=compute_70,code=sm_70', + '-gencode=arch=compute_75,code=sm_75', + '-gencode=arch=compute_80,code=sm_80', + '-gencode=arch=compute_86,code=sm_86', + ] + }), + ], + cmdclass={ 'build_ext' : BuildExtension } +) diff --git a/thirdparty/DROID-SLAM/src/altcorr_kernel.cu b/thirdparty/DROID-SLAM/src/altcorr_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..f418f5cddd488bb16ab023f2c76cb9d09f721aef --- /dev/null +++ b/thirdparty/DROID-SLAM/src/altcorr_kernel.cu @@ -0,0 +1,356 @@ +#include +#include +#include +#include +#include +#include + + +#include +#include +#include +#include + + + +#define BLOCK_H 4 +#define BLOCK_W 8 +#define BLOCK_HW BLOCK_H * BLOCK_W +#define CHANNEL_STRIDE 32 + + +__forceinline__ __device__ +bool within_bounds(int h, int w, int H, int W) { + return h >= 0 && h < H && w >= 0 && w < W; +} + +template +__global__ void altcorr_forward_kernel( + const torch::PackedTensorAccessor32 fmap1, + const torch::PackedTensorAccessor32 fmap2, + const torch::PackedTensorAccessor32 coords, + torch::PackedTensorAccessor32 corr, + int r) +{ + const int b = blockIdx.x; + const int h0 = blockIdx.y * blockDim.x; + const int w0 = blockIdx.z * blockDim.y; + const int tid = threadIdx.x * blockDim.y + threadIdx.y; + + const int H1 = fmap1.size(1); + const int W1 = fmap1.size(2); + const int H2 = fmap2.size(1); + const int W2 = fmap2.size(2); + const int N = coords.size(1); + const int C = fmap1.size(3); + + __shared__ scalar_t f1[CHANNEL_STRIDE][BLOCK_HW]; + __shared__ scalar_t f2[CHANNEL_STRIDE][BLOCK_HW]; + + __shared__ float x2s[BLOCK_HW]; + __shared__ float y2s[BLOCK_HW]; + + for (int c=0; c(floor(y2s[k1])) - r + iy; + int w2 = static_cast(floor(x2s[k1])) - r + ix; + int c2 = tid % CHANNEL_STRIDE; + + if (within_bounds(h2, w2, H2, W2)) + f2[c2][k1] = fmap2[b][h2][w2][c+c2]; + + else + f2[c2][k1] = static_cast(0.0); + } + + __syncthreads(); + + scalar_t s = 0.0; + for (int k=0; k((dy) * (dx)); + scalar_t ne = s * static_cast((dy) * (1-dx)); + scalar_t sw = s * static_cast((1-dy) * (dx)); + scalar_t se = s * static_cast((1-dy) * (1-dx)); + + // if (iy > 0 && ix > 0 && within_bounds(h1, w1, H1, W1)) + // corr[b][n][ix_nw][h1][w1] += nw; + + // if (iy > 0 && ix < rd && within_bounds(h1, w1, H1, W1)) + // corr[b][n][ix_ne][h1][w1] += ne; + + // if (iy < rd && ix > 0 && within_bounds(h1, w1, H1, W1)) + // corr[b][n][ix_sw][h1][w1] += sw; + + // if (iy < rd && ix < rd && within_bounds(h1, w1, H1, W1)) + // corr[b][n][ix_se][h1][w1] += se; + + + scalar_t* corr_ptr = &corr[b][n][0][h1][w1]; + + if (iy > 0 && ix > 0 && within_bounds(h1, w1, H1, W1)) + *(corr_ptr + ix_nw) += nw; + + if (iy > 0 && ix < rd && within_bounds(h1, w1, H1, W1)) + *(corr_ptr + ix_ne) += ne; + + if (iy < rd && ix > 0 && within_bounds(h1, w1, H1, W1)) + *(corr_ptr + ix_sw) += sw; + + if (iy < rd && ix < rd && within_bounds(h1, w1, H1, W1)) + *(corr_ptr + ix_se) += se; + + + } + } + } + } +} + + +template +__global__ void altcorr_backward_kernel( + const torch::PackedTensorAccessor32 fmap1, + const torch::PackedTensorAccessor32 fmap2, + const torch::PackedTensorAccessor32 coords, + const torch::PackedTensorAccessor32 corr_grad, + torch::PackedTensorAccessor32 fmap1_grad, + torch::PackedTensorAccessor32 fmap2_grad, + torch::PackedTensorAccessor32 coords_grad, + int r) +{ + + const int b = blockIdx.x; + const int h0 = blockIdx.y * blockDim.x; + const int w0 = blockIdx.z * blockDim.y; + const int tid = threadIdx.x * blockDim.y + threadIdx.y; + + const int H1 = fmap1.size(1); + const int W1 = fmap1.size(2); + const int H2 = fmap2.size(1); + const int W2 = fmap2.size(2); + const int N = coords.size(1); + const int C = fmap1.size(3); + + __shared__ scalar_t f1[CHANNEL_STRIDE][BLOCK_HW+1]; + __shared__ scalar_t f2[CHANNEL_STRIDE][BLOCK_HW+1]; + + __shared__ scalar_t f1_grad[CHANNEL_STRIDE][BLOCK_HW+1]; + __shared__ scalar_t f2_grad[CHANNEL_STRIDE][BLOCK_HW+1]; + + __shared__ scalar_t x2s[BLOCK_HW]; + __shared__ scalar_t y2s[BLOCK_HW]; + + for (int c=0; c(floor(y2s[k1]))-r+iy; + int w2 = static_cast(floor(x2s[k1]))-r+ix; + int c2 = tid % CHANNEL_STRIDE; + + auto fptr = fmap2[b][h2][w2]; + if (within_bounds(h2, w2, H2, W2)) + f2[c2][k1] = fptr[c+c2]; + else + f2[c2][k1] = 0.0; + + f2_grad[c2][k1] = 0.0; + } + + __syncthreads(); + + const scalar_t* grad_ptr = &corr_grad[b][n][0][h1][w1]; + scalar_t g = 0.0; + + int ix_nw = H1*W1*((iy-1) + rd*(ix-1)); + int ix_ne = H1*W1*((iy-1) + rd*ix); + int ix_sw = H1*W1*(iy + rd*(ix-1)); + int ix_se = H1*W1*(iy + rd*ix); + + if (iy > 0 && ix > 0 && within_bounds(h1, w1, H1, W1)) + g += *(grad_ptr + ix_nw) * dy * dx; + + if (iy > 0 && ix < rd && within_bounds(h1, w1, H1, W1)) + g += *(grad_ptr + ix_ne) * dy * (1-dx); + + if (iy < rd && ix > 0 && within_bounds(h1, w1, H1, W1)) + g += *(grad_ptr + ix_sw) * (1-dy) * dx; + + if (iy < rd && ix < rd && within_bounds(h1, w1, H1, W1)) + g += *(grad_ptr + ix_se) * (1-dy) * (1-dx); + + for (int k=0; k(floor(y2s[k1]))-r+iy; + int w2 = static_cast(floor(x2s[k1]))-r+ix; + int c2 = tid % CHANNEL_STRIDE; + + scalar_t* fptr = &fmap2_grad[b][h2][w2][0]; + if (within_bounds(h2, w2, H2, W2)) + atomicAdd(fptr+c+c2, f2_grad[c2][k1]); + } + } + } + } + __syncthreads(); + + + for (int k=0; k altcorr_cuda_forward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + int radius) +{ + const auto B = coords.size(0); + const auto N = coords.size(1); + const auto H = coords.size(2); + const auto W = coords.size(3); + + const auto rd = 2 * radius + 1; + auto opts = fmap1.options(); + auto corr = torch::zeros({B, N, rd*rd, H, W}, opts); + + const dim3 blocks(B, (H+BLOCK_H-1)/BLOCK_H, (W+BLOCK_W-1)/BLOCK_W); + const dim3 threads(BLOCK_H, BLOCK_W); + + + AT_DISPATCH_FLOATING_TYPES_AND_HALF(fmap1.type(), "altcorr_forward_kernel", ([&] { + altcorr_forward_kernel<<>>( + fmap1.packed_accessor32(), + fmap2.packed_accessor32(), + coords.packed_accessor32(), + corr.packed_accessor32(), + radius); + })); + + return {corr}; +} + +std::vector altcorr_cuda_backward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + torch::Tensor corr_grad, + int radius) +{ + const auto B = coords.size(0); + const auto N = coords.size(1); + + const auto H1 = fmap1.size(1); + const auto W1 = fmap1.size(2); + const auto H2 = fmap2.size(1); + const auto W2 = fmap2.size(2); + const auto C = fmap1.size(3); + + auto opts = fmap1.options(); + auto fmap1_grad = torch::zeros({B, H1, W1, C}, opts); + auto fmap2_grad = torch::zeros({B, H2, W2, C}, opts); + auto coords_grad = torch::zeros({B, N, H1, W1, 2}, opts); + + const dim3 blocks(B, (H1+BLOCK_H-1)/BLOCK_H, (W1+BLOCK_W-1)/BLOCK_W); + const dim3 threads(BLOCK_H, BLOCK_W); + + altcorr_backward_kernel<<>>( + fmap1.packed_accessor32(), + fmap2.packed_accessor32(), + coords.packed_accessor32(), + corr_grad.packed_accessor32(), + fmap1_grad.packed_accessor32(), + fmap2_grad.packed_accessor32(), + coords_grad.packed_accessor32(), + radius); + + return {fmap1_grad, fmap2_grad, coords_grad}; +} \ No newline at end of file diff --git a/thirdparty/DROID-SLAM/src/correlation_kernels.cu b/thirdparty/DROID-SLAM/src/correlation_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..2812067b9d548c7ec1e25d1298e4356878afd365 --- /dev/null +++ b/thirdparty/DROID-SLAM/src/correlation_kernels.cu @@ -0,0 +1,185 @@ +#include +#include +#include +#include +#include +#include + + +#include +#include +#include + +#define BLOCK 16 + +__forceinline__ __device__ bool within_bounds(int h, int w, int H, int W) { + return h >= 0 && h < H && w >= 0 && w < W; +} + +template +__global__ void corr_index_forward_kernel( + const torch::PackedTensorAccessor32 volume, + const torch::PackedTensorAccessor32 coords, + torch::PackedTensorAccessor32 corr, + int r) +{ + // batch index + const int x = blockIdx.x * blockDim.x + threadIdx.x; + const int y = blockIdx.y * blockDim.y + threadIdx.y; + const int n = blockIdx.z; + + const int h1 = volume.size(1); + const int w1 = volume.size(2); + const int h2 = volume.size(3); + const int w2 = volume.size(4); + + if (!within_bounds(y, x, h1, w1)) { + return; + } + + float x0 = coords[n][0][y][x]; + float y0 = coords[n][1][y][x]; + + float dx = x0 - floor(x0); + float dy = y0 - floor(y0); + + int rd = 2*r + 1; + for (int i=0; i(floor(x0)) - r + i; + int y1 = static_cast(floor(y0)) - r + j; + + if (within_bounds(y1, x1, h2, w2)) { + scalar_t s = volume[n][y][x][y1][x1]; + + if (i > 0 && j > 0) + corr[n][i-1][j-1][y][x] += s * scalar_t(dx * dy); + + if (i > 0 && j < rd) + corr[n][i-1][j][y][x] += s * scalar_t(dx * (1.0f-dy)); + + if (i < rd && j > 0) + corr[n][i][j-1][y][x] += s * scalar_t((1.0f-dx) * dy); + + if (i < rd && j < rd) + corr[n][i][j][y][x] += s * scalar_t((1.0f-dx) * (1.0f-dy)); + + } + } + } +} + + +template +__global__ void corr_index_backward_kernel( + const torch::PackedTensorAccessor32 coords, + const torch::PackedTensorAccessor32 corr_grad, + torch::PackedTensorAccessor32 volume_grad, + int r) +{ + // batch index + const int x = blockIdx.x * blockDim.x + threadIdx.x; + const int y = blockIdx.y * blockDim.y + threadIdx.y; + const int n = blockIdx.z; + + const int h1 = volume_grad.size(1); + const int w1 = volume_grad.size(2); + const int h2 = volume_grad.size(3); + const int w2 = volume_grad.size(4); + + if (!within_bounds(y, x, h1, w1)) { + return; + } + + float x0 = coords[n][0][y][x]; + float y0 = coords[n][1][y][x]; + + float dx = x0 - floor(x0); + float dy = y0 - floor(y0); + + int rd = 2*r + 1; + for (int i=0; i(floor(x0)) - r + i; + int y1 = static_cast(floor(y0)) - r + j; + + if (within_bounds(y1, x1, h2, w2)) { + scalar_t g = 0.0; + if (i > 0 && j > 0) + g += corr_grad[n][i-1][j-1][y][x] * scalar_t(dx * dy); + + if (i > 0 && j < rd) + g += corr_grad[n][i-1][j][y][x] * scalar_t(dx * (1.0f-dy)); + + if (i < rd && j > 0) + g += corr_grad[n][i][j-1][y][x] * scalar_t((1.0f-dx) * dy); + + if (i < rd && j < rd) + g += corr_grad[n][i][j][y][x] * scalar_t((1.0f-dx) * (1.0f-dy)); + + volume_grad[n][y][x][y1][x1] += g; + } + } + } +} + +std::vector corr_index_cuda_forward( + torch::Tensor volume, + torch::Tensor coords, + int radius) +{ + const auto batch_size = volume.size(0); + const auto ht = volume.size(1); + const auto wd = volume.size(2); + + const dim3 blocks((wd + BLOCK - 1) / BLOCK, + (ht + BLOCK - 1) / BLOCK, + batch_size); + + const dim3 threads(BLOCK, BLOCK); + + auto opts = volume.options(); + torch::Tensor corr = torch::zeros( + {batch_size, 2*radius+1, 2*radius+1, ht, wd}, opts); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF(volume.type(), "sampler_forward_kernel", ([&] { + corr_index_forward_kernel<<>>( + volume.packed_accessor32(), + coords.packed_accessor32(), + corr.packed_accessor32(), + radius); + })); + + return {corr}; + +} + +std::vector corr_index_cuda_backward( + torch::Tensor volume, + torch::Tensor coords, + torch::Tensor corr_grad, + int radius) +{ + const auto batch_size = volume.size(0); + const auto ht = volume.size(1); + const auto wd = volume.size(2); + + auto volume_grad = torch::zeros_like(volume); + + const dim3 blocks((wd + BLOCK - 1) / BLOCK, + (ht + BLOCK - 1) / BLOCK, + batch_size); + + const dim3 threads(BLOCK, BLOCK); + + + AT_DISPATCH_FLOATING_TYPES_AND_HALF(volume.type(), "sampler_backward_kernel", ([&] { + corr_index_backward_kernel<<>>( + coords.packed_accessor32(), + corr_grad.packed_accessor32(), + volume_grad.packed_accessor32(), + radius); + })); + + return {volume_grad}; +} \ No newline at end of file diff --git a/thirdparty/DROID-SLAM/src/droid.cpp b/thirdparty/DROID-SLAM/src/droid.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c38c1faecc783719fd0355449a82993c5fc78901 --- /dev/null +++ b/thirdparty/DROID-SLAM/src/droid.cpp @@ -0,0 +1,250 @@ +#include +#include + +// CUDA forward declarations +std::vector projective_transform_cuda( + torch::Tensor poses, + torch::Tensor disps, + torch::Tensor intrinsics, + torch::Tensor ii, + torch::Tensor jj); + + + +torch::Tensor depth_filter_cuda( + torch::Tensor poses, + torch::Tensor disps, + torch::Tensor intrinsics, + torch::Tensor ix, + torch::Tensor thresh); + + +torch::Tensor frame_distance_cuda( + torch::Tensor poses, + torch::Tensor disps, + torch::Tensor intrinsics, + torch::Tensor ii, + torch::Tensor jj, + const float beta); + +std::vector projmap_cuda( + torch::Tensor poses, + torch::Tensor disps, + torch::Tensor intrinsics, + torch::Tensor ii, + torch::Tensor jj); + +torch::Tensor iproj_cuda( + torch::Tensor poses, + torch::Tensor disps, + torch::Tensor intrinsics); + +std::vector ba_cuda( + torch::Tensor poses, + torch::Tensor disps, + torch::Tensor intrinsics, + torch::Tensor disps_sens, + torch::Tensor targets, + torch::Tensor weights, + torch::Tensor eta, + torch::Tensor ii, + torch::Tensor jj, + const int t0, + const int t1, + const int iterations, + const float lm, + const float ep, + const bool motion_only); + +std::vector corr_index_cuda_forward( + torch::Tensor volume, + torch::Tensor coords, + int radius); + +std::vector corr_index_cuda_backward( + torch::Tensor volume, + torch::Tensor coords, + torch::Tensor corr_grad, + int radius); + +std::vector altcorr_cuda_forward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + int radius); + +std::vector altcorr_cuda_backward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + torch::Tensor corr_grad, + int radius); + + +#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") +#define CHECK_INPUT(x) CHECK_CONTIGUOUS(x) + + +std::vector ba( + torch::Tensor poses, + torch::Tensor disps, + torch::Tensor intrinsics, + torch::Tensor disps_sens, + torch::Tensor targets, + torch::Tensor weights, + torch::Tensor eta, + torch::Tensor ii, + torch::Tensor jj, + const int t0, + const int t1, + const int iterations, + const float lm, + const float ep, + const bool motion_only) { + + CHECK_INPUT(targets); + CHECK_INPUT(weights); + CHECK_INPUT(poses); + CHECK_INPUT(disps); + CHECK_INPUT(intrinsics); + CHECK_INPUT(disps_sens); + CHECK_INPUT(ii); + CHECK_INPUT(jj); + + return ba_cuda(poses, disps, intrinsics, disps_sens, targets, weights, + eta, ii, jj, t0, t1, iterations, lm, ep, motion_only); + +} + + +torch::Tensor frame_distance( + torch::Tensor poses, + torch::Tensor disps, + torch::Tensor intrinsics, + torch::Tensor ii, + torch::Tensor jj, + const float beta) { + + CHECK_INPUT(poses); + CHECK_INPUT(disps); + CHECK_INPUT(intrinsics); + CHECK_INPUT(ii); + CHECK_INPUT(jj); + + return frame_distance_cuda(poses, disps, intrinsics, ii, jj, beta); + +} + + +std::vector projmap( + torch::Tensor poses, + torch::Tensor disps, + torch::Tensor intrinsics, + torch::Tensor ii, + torch::Tensor jj) { + + CHECK_INPUT(poses); + CHECK_INPUT(disps); + CHECK_INPUT(intrinsics); + CHECK_INPUT(ii); + CHECK_INPUT(jj); + + return projmap_cuda(poses, disps, intrinsics, ii, jj); + +} + + +torch::Tensor iproj( + torch::Tensor poses, + torch::Tensor disps, + torch::Tensor intrinsics) { + CHECK_INPUT(poses); + CHECK_INPUT(disps); + CHECK_INPUT(intrinsics); + + return iproj_cuda(poses, disps, intrinsics); +} + + +// c++ python binding +std::vector corr_index_forward( + torch::Tensor volume, + torch::Tensor coords, + int radius) { + CHECK_INPUT(volume); + CHECK_INPUT(coords); + + return corr_index_cuda_forward(volume, coords, radius); +} + +std::vector corr_index_backward( + torch::Tensor volume, + torch::Tensor coords, + torch::Tensor corr_grad, + int radius) { + CHECK_INPUT(volume); + CHECK_INPUT(coords); + CHECK_INPUT(corr_grad); + + auto volume_grad = corr_index_cuda_backward(volume, coords, corr_grad, radius); + return {volume_grad}; +} + +std::vector altcorr_forward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + int radius) { + CHECK_INPUT(fmap1); + CHECK_INPUT(fmap2); + CHECK_INPUT(coords); + + return altcorr_cuda_forward(fmap1, fmap2, coords, radius); +} + +std::vector altcorr_backward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + torch::Tensor corr_grad, + int radius) { + CHECK_INPUT(fmap1); + CHECK_INPUT(fmap2); + CHECK_INPUT(coords); + CHECK_INPUT(corr_grad); + + return altcorr_cuda_backward(fmap1, fmap2, coords, corr_grad, radius); +} + + +torch::Tensor depth_filter( + torch::Tensor poses, + torch::Tensor disps, + torch::Tensor intrinsics, + torch::Tensor ix, + torch::Tensor thresh) { + + CHECK_INPUT(poses); + CHECK_INPUT(disps); + CHECK_INPUT(intrinsics); + CHECK_INPUT(ix); + CHECK_INPUT(thresh); + + return depth_filter_cuda(poses, disps, intrinsics, ix, thresh); +} + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + // bundle adjustment kernels + m.def("ba", &ba, "bundle adjustment"); + m.def("frame_distance", &frame_distance, "frame_distance"); + m.def("projmap", &projmap, "projmap"); + m.def("depth_filter", &depth_filter, "depth_filter"); + m.def("iproj", &iproj, "back projection"); + + // correlation volume kernels + m.def("altcorr_forward", &altcorr_forward, "ALTCORR forward"); + m.def("altcorr_backward", &altcorr_backward, "ALTCORR backward"); + m.def("corr_index_forward", &corr_index_forward, "INDEX forward"); + m.def("corr_index_backward", &corr_index_backward, "INDEX backward"); +} \ No newline at end of file diff --git a/thirdparty/DROID-SLAM/src/droid_kernels.cu b/thirdparty/DROID-SLAM/src/droid_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..16ac697f64eefdfc5d1d123af3de933822395cf9 --- /dev/null +++ b/thirdparty/DROID-SLAM/src/droid_kernels.cu @@ -0,0 +1,1541 @@ +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +// #include "utils.cuh" + +#include +#include +#include + +typedef Eigen::SparseMatrix SpMat; +typedef Eigen::Triplet T; +typedef std::vector> graph_t; +typedef std::vector tensor_list_t; + + + +#define MIN_DEPTH 0.25 + +#define THREADS 256 +#define NUM_BLOCKS(batch_size) ((batch_size + THREADS - 1) / THREADS) + + +#define GPU_1D_KERNEL_LOOP(k, n) \ + for (size_t k = threadIdx.x; k 1e-4) { + float a = (1 - cosf(theta)) / theta_sq; + crossInplace(phi, tau); + t[0] += a * tau[0]; + t[1] += a * tau[1]; + t[2] += a * tau[2]; + + float b = (theta - sinf(theta)) / (theta * theta_sq); + crossInplace(phi, tau); + t[0] += b * tau[0]; + t[1] += b * tau[1]; + t[2] += b * tau[2]; + } +} +__global__ void projective_transform_kernel( + const torch::PackedTensorAccessor32 target, + const torch::PackedTensorAccessor32 weight, + const torch::PackedTensorAccessor32 poses, + const torch::PackedTensorAccessor32 disps, + const torch::PackedTensorAccessor32 intrinsics, + const torch::PackedTensorAccessor32 ii, + const torch::PackedTensorAccessor32 jj, + torch::PackedTensorAccessor32 Hs, + torch::PackedTensorAccessor32 vs, + torch::PackedTensorAccessor32 Eii, + torch::PackedTensorAccessor32 Eij, + torch::PackedTensorAccessor32 Cii, + torch::PackedTensorAccessor32 bz) +{ + const int block_id = blockIdx.x; + const int thread_id = threadIdx.x; + + const int ht = disps.size(1); + const int wd = disps.size(2); + + int ix = static_cast(ii[block_id]); + int jx = static_cast(jj[block_id]); + + __shared__ float fx; + __shared__ float fy; + __shared__ float cx; + __shared__ float cy; + + __shared__ float ti[3], tj[3], tij[3]; + __shared__ float qi[4], qj[4], qij[4]; + + // load intrinsics from global memory + if (thread_id == 0) { + fx = intrinsics[0]; + fy = intrinsics[1]; + cx = intrinsics[2]; + cy = intrinsics[3]; + } + + __syncthreads(); + + // stereo frames + if (ix == jx) { + if (thread_id == 0) { + tij[0] = -0.1; + tij[1] = 0; + tij[2] = 0; + qij[0] = 0; + qij[1] = 0; + qij[2] = 0; + qij[3] = 1; + } + } + + else { + + // load poses from global memory + if (thread_id < 3) { + ti[thread_id] = poses[ix][thread_id]; + tj[thread_id] = poses[jx][thread_id]; + } + + if (thread_id < 4) { + qi[thread_id] = poses[ix][thread_id+3]; + qj[thread_id] = poses[jx][thread_id+3]; + } + + __syncthreads(); + + if (thread_id == 0) { + relSE3(ti, qi, tj, qj, tij, qij); + } + } + + __syncthreads(); + + //points + float Xi[4]; + float Xj[4]; + + // jacobians + float Jx[12]; + float Jz; + + float* Ji = &Jx[0]; + float* Jj = &Jx[6]; + + // hessians + float hij[12*(12+1)/2]; + + float vi[6], vj[6]; + + int l; + for (l=0; l<12*(12+1)/2; l++) { + hij[l] = 0; + } + + for (int n=0; n<6; n++) { + vi[n] = 0; + vj[n] = 0; + } + + __syncthreads(); + + GPU_1D_KERNEL_LOOP(k, ht*wd) { + + const int i = k / wd; + const int j = k % wd; + + const float u = static_cast(j); + const float v = static_cast(i); + + // homogenous coordinates + Xi[0] = (u - cx) / fx; + Xi[1] = (v - cy) / fy; + Xi[2] = 1; + Xi[3] = disps[ix][i][j]; + + // transform homogenous point + actSE3(tij, qij, Xi, Xj); + + const float x = Xj[0]; + const float y = Xj[1]; + const float h = Xj[3]; + + const float d = (Xj[2] < MIN_DEPTH) ? 0.0 : 1.0 / Xj[2]; + const float d2 = d * d; + + float wu = (Xj[2] < MIN_DEPTH) ? 0.0 : .001 * weight[block_id][0][i][j]; + float wv = (Xj[2] < MIN_DEPTH) ? 0.0 : .001 * weight[block_id][1][i][j]; + const float ru = target[block_id][0][i][j] - (fx * d * x + cx); + const float rv = target[block_id][1][i][j] - (fy * d * y + cy); + + // x - coordinate + + Jj[0] = fx * (h*d); + Jj[1] = fx * 0; + Jj[2] = fx * (-x*h*d2); + Jj[3] = fx * (-x*y*d2); + Jj[4] = fx * (1 + x*x*d2); + Jj[5] = fx * (-y*d); + + Jz = fx * (tij[0] * d - tij[2] * (x * d2)); + Cii[block_id][k] = wu * Jz * Jz; + bz[block_id][k] = wu * ru * Jz; + + if (ix == jx) wu = 0; + + adjSE3(tij, qij, Jj, Ji); + for (int n=0; n<6; n++) Ji[n] *= -1; + + l=0; + for (int n=0; n<12; n++) { + for (int m=0; m<=n; m++) { + hij[l] += wu * Jx[n] * Jx[m]; + l++; + } + } + + for (int n=0; n<6; n++) { + vi[n] += wu * ru * Ji[n]; + vj[n] += wu * ru * Jj[n]; + + Eii[block_id][n][k] = wu * Jz * Ji[n]; + Eij[block_id][n][k] = wu * Jz * Jj[n]; + } + + + Jj[0] = fy * 0; + Jj[1] = fy * (h*d); + Jj[2] = fy * (-y*h*d2); + Jj[3] = fy * (-1 - y*y*d2); + Jj[4] = fy * (x*y*d2); + Jj[5] = fy * (x*d); + + Jz = fy * (tij[1] * d - tij[2] * (y * d2)); + Cii[block_id][k] += wv * Jz * Jz; + bz[block_id][k] += wv * rv * Jz; + + if (ix == jx) wv = 0; + + adjSE3(tij, qij, Jj, Ji); + for (int n=0; n<6; n++) Ji[n] *= -1; + + l=0; + for (int n=0; n<12; n++) { + for (int m=0; m<=n; m++) { + hij[l] += wv * Jx[n] * Jx[m]; + l++; + } + } + + for (int n=0; n<6; n++) { + vi[n] += wv * rv * Ji[n]; + vj[n] += wv * rv * Jj[n]; + + Eii[block_id][n][k] += wv * Jz * Ji[n]; + Eij[block_id][n][k] += wv * Jz * Jj[n]; + } + + + } + + __syncthreads(); + + __shared__ float sdata[THREADS]; + for (int n=0; n<6; n++) { + sdata[threadIdx.x] = vi[n]; + blockReduce(sdata); + if (threadIdx.x == 0) { + vs[0][block_id][n] = sdata[0]; + } + + __syncthreads(); + + sdata[threadIdx.x] = vj[n]; + blockReduce(sdata); + if (threadIdx.x == 0) { + vs[1][block_id][n] = sdata[0]; + } + + } + + l=0; + for (int n=0; n<12; n++) { + for (int m=0; m<=n; m++) { + sdata[threadIdx.x] = hij[l]; + blockReduce(sdata); + + if (threadIdx.x == 0) { + if (n<6 && m<6) { + Hs[0][block_id][n][m] = sdata[0]; + Hs[0][block_id][m][n] = sdata[0]; + } + else if (n >=6 && m<6) { + Hs[1][block_id][m][n-6] = sdata[0]; + Hs[2][block_id][n-6][m] = sdata[0]; + } + else { + Hs[3][block_id][n-6][m-6] = sdata[0]; + Hs[3][block_id][m-6][n-6] = sdata[0]; + } + } + + l++; + } + } +} + + +__global__ void projmap_kernel( + const torch::PackedTensorAccessor32 poses, + const torch::PackedTensorAccessor32 disps, + const torch::PackedTensorAccessor32 intrinsics, + const torch::PackedTensorAccessor32 ii, + const torch::PackedTensorAccessor32 jj, + torch::PackedTensorAccessor32 coords, + torch::PackedTensorAccessor32 valid) +{ + + const int block_id = blockIdx.x; + const int thread_id = threadIdx.x; + + const int ht = disps.size(1); + const int wd = disps.size(2); + + __shared__ int ix; + __shared__ int jx; + + __shared__ float fx; + __shared__ float fy; + __shared__ float cx; + __shared__ float cy; + + __shared__ float ti[3], tj[3], tij[3]; + __shared__ float qi[4], qj[4], qij[4]; + + // load intrinsics from global memory + if (thread_id == 0) { + ix = static_cast(ii[block_id]); + jx = static_cast(jj[block_id]); + fx = intrinsics[0]; + fy = intrinsics[1]; + cx = intrinsics[2]; + cy = intrinsics[3]; + } + + __syncthreads(); + + // load poses from global memory + if (thread_id < 3) { + ti[thread_id] = poses[ix][thread_id]; + tj[thread_id] = poses[jx][thread_id]; + } + + if (thread_id < 4) { + qi[thread_id] = poses[ix][thread_id+3]; + qj[thread_id] = poses[jx][thread_id+3]; + } + + __syncthreads(); + + if (thread_id == 0) { + relSE3(ti, qi, tj, qj, tij, qij); + } + + //points + float Xi[4]; + float Xj[4]; + + __syncthreads(); + + GPU_1D_KERNEL_LOOP(k, ht*wd) { + const int i = k / wd; + const int j = k % wd; + + const float u = static_cast(j); + const float v = static_cast(i); + + // homogenous coordinates + Xi[0] = (u - cx) / fx; + Xi[1] = (v - cy) / fy; + Xi[2] = 1; + Xi[3] = disps[ix][i][j]; + + // transform homogenous point + actSE3(tij, qij, Xi, Xj); + + coords[block_id][i][j][0] = u; + coords[block_id][i][j][1] = v; + + if (Xj[2] > 0.01) { + coords[block_id][i][j][0] = fx * (Xj[0] / Xj[2]) + cx; + coords[block_id][i][j][1] = fy * (Xj[1] / Xj[2]) + cy; + } + + valid[block_id][i][j][0] = (Xj[2] > MIN_DEPTH) ? 1.0 : 0.0; + + } +} + +__global__ void frame_distance_kernel( + const torch::PackedTensorAccessor32 poses, + const torch::PackedTensorAccessor32 disps, + const torch::PackedTensorAccessor32 intrinsics, + const torch::PackedTensorAccessor32 ii, + const torch::PackedTensorAccessor32 jj, + torch::PackedTensorAccessor32 dist, + const float beta) { + + const int block_id = blockIdx.x; + const int thread_id = threadIdx.x; + + const int ht = disps.size(1); + const int wd = disps.size(2); + + __shared__ int ix; + __shared__ int jx; + + __shared__ float fx; + __shared__ float fy; + __shared__ float cx; + __shared__ float cy; + + __shared__ float ti[3], tj[3], tij[3]; + __shared__ float qi[4], qj[4], qij[4]; + + // load intrinsics from global memory + if (thread_id == 0) { + ix = static_cast(ii[block_id]); + jx = static_cast(jj[block_id]); + fx = intrinsics[0]; + fy = intrinsics[1]; + cx = intrinsics[2]; + cy = intrinsics[3]; + } + + __syncthreads(); + + + //points + float Xi[4]; + float Xj[4]; + + __shared__ float accum[THREADS]; accum[thread_id] = 0; + __shared__ float valid[THREADS]; valid[thread_id] = 0; + __shared__ float total[THREADS]; total[thread_id] = 0; + + __syncthreads(); + + for (int n=0; n<1; n++) { + + if (thread_id < 3) { + ti[thread_id] = poses[ix][thread_id]; + tj[thread_id] = poses[jx][thread_id]; + } + + if (thread_id < 4) { + qi[thread_id] = poses[ix][thread_id+3]; + qj[thread_id] = poses[jx][thread_id+3]; + } + + __syncthreads(); + + + relSE3(ti, qi, tj, qj, tij, qij); + + float d, du, dv; + + GPU_1D_KERNEL_LOOP(k, ht*wd) { + const int i = k / wd; + const int j = k % wd; + + const float u = static_cast(j); + const float v = static_cast(i); + + + // if (disps[ix][i][j] < 0.01) { + // continue; + // } + + // homogenous coordinates + Xi[0] = (u - cx) / fx; + Xi[1] = (v - cy) / fy; + Xi[2] = 1; + Xi[3] = disps[ix][i][j]; + + // transform homogenous point + actSE3(tij, qij, Xi, Xj); + + du = fx * (Xj[0] / Xj[2]) + cx - u; + dv = fy * (Xj[1] / Xj[2]) + cy - v; + d = sqrtf(du*du + dv*dv); + + total[threadIdx.x] += beta; + + if (Xj[2] > MIN_DEPTH) { + accum[threadIdx.x] += beta * d; + valid[threadIdx.x] += beta; + } + + Xi[0] = (u - cx) / fx; + Xi[1] = (v - cy) / fy; + Xi[2] = 1; + Xi[3] = disps[ix][i][j]; + + Xj[0] = Xi[0] + Xi[3] * tij[0]; + Xj[1] = Xi[1] + Xi[3] * tij[1]; + Xj[2] = Xi[2] + Xi[3] * tij[2]; + + du = fx * (Xj[0] / Xj[2]) + cx - u; + dv = fy * (Xj[1] / Xj[2]) + cy - v; + d = sqrtf(du*du + dv*dv); + + total[threadIdx.x] += (1 - beta); + + if (Xj[2] > MIN_DEPTH) { + accum[threadIdx.x] += (1 - beta) * d; + valid[threadIdx.x] += (1 - beta); + } + } + + if (threadIdx.x == 0) { + int tmp = ix; + ix = jx; + jx = tmp; + } + + __syncthreads(); + + } + __syncthreads(); blockReduce(accum); + __syncthreads(); blockReduce(total); + __syncthreads(); blockReduce(valid); + + __syncthreads(); + + if (thread_id == 0) { + dist[block_id] = (valid[0] / (total[0] + 1e-8) < 0.75) ? 1000.0 : accum[0] / valid[0]; + } +} + + + +__global__ void depth_filter_kernel( + const torch::PackedTensorAccessor32 poses, + const torch::PackedTensorAccessor32 disps, + const torch::PackedTensorAccessor32 intrinsics, + const torch::PackedTensorAccessor32 inds, + const torch::PackedTensorAccessor32 thresh, + torch::PackedTensorAccessor32 counter) +{ + + const int block_id = blockIdx.x; + const int neigh_id = blockIdx.y; + const int index = blockIdx.z * blockDim.x + threadIdx.x; + + // if (threadIdx.x == 0) { + // printf("%d %d %d %d\n", blockIdx.x, blockIdx.y, blockDim.x, threadIdx.x); + // } + + const int num = disps.size(0); + const int ht = disps.size(1); + const int wd = disps.size(2); + + __shared__ int ix; + __shared__ int jx; + + __shared__ float fx; + __shared__ float fy; + __shared__ float cx; + __shared__ float cy; + + __shared__ float ti[3], tj[3], tij[3]; + __shared__ float qi[4], qj[4], qij[4]; + + if (threadIdx.x == 0) { + ix = static_cast(inds[block_id]); + jx = (neigh_id < 3) ? ix - neigh_id - 1 : ix + neigh_id; + fx = intrinsics[0]; + fy = intrinsics[1]; + cx = intrinsics[2]; + cy = intrinsics[3]; + } + + __syncthreads(); + + if (jx < 0 || jx >= num) { + return; + } + + const float t = thresh[block_id]; + + // load poses from global memory + if (threadIdx.x < 3) { + ti[threadIdx.x] = poses[ix][threadIdx.x]; + tj[threadIdx.x] = poses[jx][threadIdx.x]; + } + + if (threadIdx.x < 4) { + qi[threadIdx.x] = poses[ix][threadIdx.x+3]; + qj[threadIdx.x] = poses[jx][threadIdx.x+3]; + } + + __syncthreads(); + + if (threadIdx.x == 0) { + relSE3(ti, qi, tj, qj, tij, qij); + } + + //points + float Xi[4]; + float Xj[4]; + + __syncthreads(); + + if (index < ht*wd) { + const int i = index / wd; + const int j = index % wd; + + const float ui = static_cast(j); + const float vi = static_cast(i); + const float di = disps[ix][i][j]; + + // homogenous coordinates + Xi[0] = (ui - cx) / fx; + Xi[1] = (vi - cy) / fy; + Xi[2] = 1; + Xi[3] = di; + + // transform homogenous point + actSE3(tij, qij, Xi, Xj); + + const float uj = fx * (Xj[0] / Xj[2]) + cx; + const float vj = fy * (Xj[1] / Xj[2]) + cy; + const float dj = Xj[3] / Xj[2]; + + const int u0 = static_cast(floor(uj)); + const int v0 = static_cast(floor(vj)); + + if (u0 >= 0 && v0 >= 0 && u0 < wd-1 && v0 < ht-1) { + const float wx = ceil(uj) - uj; + const float wy = ceil(vj) - vj; + + const float d00 = disps[jx][v0+0][u0+0]; + const float d01 = disps[jx][v0+0][u0+1]; + const float d10 = disps[jx][v0+1][u0+0]; + const float d11 = disps[jx][v0+1][u0+1]; + + const float dj_hat = wy*wx*d00 + wy*(1-wx)*d01 + (1-wy)*wx*d10 + (1-wy)*(1-wx)*d11; + + const float err = abs(1.0/dj - 1.0/dj_hat); + if (abs(1.0/dj - 1.0/d00) < t) atomicAdd(&counter[block_id][i][j], 1.0f); + else if (abs(1.0/dj - 1.0/d01) < t) atomicAdd(&counter[block_id][i][j], 1.0f); + else if (abs(1.0/dj - 1.0/d10) < t) atomicAdd(&counter[block_id][i][j], 1.0f); + else if (abs(1.0/dj - 1.0/d11) < t) atomicAdd(&counter[block_id][i][j], 1.0f); + } + } +} + + + +__global__ void iproj_kernel( + const torch::PackedTensorAccessor32 poses, + const torch::PackedTensorAccessor32 disps, + const torch::PackedTensorAccessor32 intrinsics, + torch::PackedTensorAccessor32 points) + +{ + + const int block_id = blockIdx.x; + const int index = blockIdx.y * blockDim.x + threadIdx.x; + + + const int num = disps.size(0); + const int ht = disps.size(1); + const int wd = disps.size(2); + + __shared__ float fx; + __shared__ float fy; + __shared__ float cx; + __shared__ float cy; + + __shared__ float t[3]; + __shared__ float q[4]; + + if (threadIdx.x == 0) { + fx = intrinsics[0]; + fy = intrinsics[1]; + cx = intrinsics[2]; + cy = intrinsics[3]; + } + + __syncthreads(); + + + // load poses from global memory + if (threadIdx.x < 3) { + t[threadIdx.x] = poses[block_id][threadIdx.x]; + } + + if (threadIdx.x < 4) { + q[threadIdx.x] = poses[block_id][threadIdx.x+3]; + } + + __syncthreads(); + + //points + float Xi[4]; + float Xj[4]; + + if (index < ht*wd) { + const int i = index / wd; + const int j = index % wd; + + const float ui = static_cast(j); + const float vi = static_cast(i); + const float di = disps[block_id][i][j]; + + // homogenous coordinates + Xi[0] = (ui - cx) / fx; + Xi[1] = (vi - cy) / fy; + Xi[2] = 1; + Xi[3] = di; + + // transform homogenous point + actSE3(t, q, Xi, Xj); + + points[block_id][i][j][0] = Xj[0] / Xj[3]; + points[block_id][i][j][1] = Xj[1] / Xj[3]; + points[block_id][i][j][2] = Xj[2] / Xj[3]; + + } +} + + + +__global__ void accum_kernel( + const torch::PackedTensorAccessor32 inps, + const torch::PackedTensorAccessor32 ptrs, + const torch::PackedTensorAccessor32 idxs, + torch::PackedTensorAccessor32 outs) +{ + + const int block_id = blockIdx.x; + const int D = inps.size(2); + + const int start = ptrs[block_id]; + const int end = ptrs[block_id+1]; + + for (int k=threadIdx.x; k poses, + const torch::PackedTensorAccessor32 dx, + const int t0, const int t1) +{ + + for (int k=t0+threadIdx.x; k disps, + const torch::PackedTensorAccessor32 dz, + const torch::PackedTensorAccessor32 inds) +{ + const int i = inds[blockIdx.x]; + const int ht = disps.size(1); + const int wd = disps.size(2); + + for (int k=threadIdx.x; k(); + long* jx_data = jx_cpu.data_ptr(); + long* kx_data = inds.data_ptr(); + + int count = jx.size(0); + std::vector cols; + + torch::Tensor ptrs_cpu = torch::zeros({count+1}, + torch::TensorOptions().dtype(torch::kInt64)); + + long* ptrs_data = ptrs_cpu.data_ptr(); + ptrs_data[0] = 0; + + int i = 0; + for (int j=0; j(); + + for (int i=0; i>>( + data.packed_accessor32(), + ptrs.packed_accessor32(), + idxs.packed_accessor32(), + out.packed_accessor32()); + + return out; +} + + +__global__ void EEt6x6_kernel( + const torch::PackedTensorAccessor32 E, + const torch::PackedTensorAccessor32 Q, + const torch::PackedTensorAccessor32 idx, + torch::PackedTensorAccessor32 S) +{ + + // indicices + const int ix = idx[blockIdx.x][0]; + const int jx = idx[blockIdx.x][1]; + const int kx = idx[blockIdx.x][2]; + + const int D = E.size(2); + + float dS[6][6]; + float ei[6]; + float ej[6]; + + for (int i=0; i<6; i++) { + for (int j=0; j<6; j++) { + dS[i][j] = 0; + } + } + + for (int k=threadIdx.x; k E, + const torch::PackedTensorAccessor32 Q, + const torch::PackedTensorAccessor32 w, + const torch::PackedTensorAccessor32 idx, + torch::PackedTensorAccessor32 v) +{ + const int D = E.size(2); + const int kx = idx[blockIdx.x][0]; + + float b[6]; + for (int n=0; n<6; n++) { + b[n] = 0.0; + } + + for (int k=threadIdx.x; k E, + const torch::PackedTensorAccessor32 x, + const torch::PackedTensorAccessor32 idx, + torch::PackedTensorAccessor32 w) +{ + + const int D = E.size(2); + const int ix = idx[blockIdx.x]; + + if (idx[blockIdx.x] <= 0 || idx[blockIdx.x] >= x.size(0)) + return; + + for (int k=threadIdx.x; k A; + Eigen::VectorX b; + + SparseBlock(int N, int M) : N(N), M(M) { + A = Eigen::SparseMatrix(N*M, N*M); + b = Eigen::VectorXd::Zero(N*M); + } + + SparseBlock(Eigen::SparseMatrix const& A, Eigen::VectorX const& b, + int N, int M) : A(A), b(b), N(N), M(M) {} + + void update_lhs(torch::Tensor As, torch::Tensor ii, torch::Tensor jj) { + + auto As_cpu = As.to(torch::kCPU).to(torch::kFloat64); + auto ii_cpu = ii.to(torch::kCPU).to(torch::kInt64); + auto jj_cpu = jj.to(torch::kCPU).to(torch::kInt64); + + auto As_acc = As_cpu.accessor(); + auto ii_acc = ii_cpu.accessor(); + auto jj_acc = jj_cpu.accessor(); + + std::vector tripletList; + for (int n=0; n= 0 && j >= 0) { + for (int k=0; k(); + auto ii_acc = ii_cpu.accessor(); + + for (int n=0; n= 0) { + for (int j=0; j get_dense() { + Eigen::MatrixXd Ad = Eigen::MatrixXd(A); + + torch::Tensor H = torch::from_blob(Ad.data(), {N*M, N*M}, torch::TensorOptions() + .dtype(torch::kFloat64)).to(torch::kCUDA).to(torch::kFloat32); + + torch::Tensor v = torch::from_blob(b.data(), {N*M, 1}, torch::TensorOptions() + .dtype(torch::kFloat64)).to(torch::kCUDA).to(torch::kFloat32); + + return std::make_tuple(H, v); + + } + + torch::Tensor solve(const float lm=0.0001, const float ep=0.1) { + + torch::Tensor dx; + + Eigen::SparseMatrix L(A); + L.diagonal().array() += ep + lm * L.diagonal().array(); + + Eigen::SimplicialLLT> solver; + solver.compute(L); + + if (solver.info() == Eigen::Success) { + Eigen::VectorXd x = solver.solve(b); + dx = torch::from_blob(x.data(), {N, M}, torch::TensorOptions() + .dtype(torch::kFloat64)).to(torch::kCUDA).to(torch::kFloat32); + } + else { + dx = torch::zeros({N, M}, torch::TensorOptions() + .device(torch::kCUDA).dtype(torch::kFloat32)); + } + + return dx; + } + + private: + const int N; + const int M; + +}; + + +SparseBlock schur_block(torch::Tensor E, + torch::Tensor Q, + torch::Tensor w, + torch::Tensor ii, + torch::Tensor jj, + torch::Tensor kk, + const int t0, + const int t1) +{ + + torch::Tensor ii_cpu = ii.to(torch::kCPU); + torch::Tensor jj_cpu = jj.to(torch::kCPU); + torch::Tensor kk_cpu = kk.to(torch::kCPU); + + const int P = t1 - t0; + const long* ii_data = ii_cpu.data_ptr(); + const long* jj_data = jj_cpu.data_ptr(); + const long* kk_data = kk_cpu.data_ptr(); + + std::vector> graph(P); + std::vector> index(P); + + for (int n=0; n= t0 && j <= t1) { + const int t = j - t0; + graph[t].push_back(k); + index[t].push_back(n); + } + } + + std::vector ii_list, jj_list, idx, jdx; + + for (int i=0; i>>( + E.packed_accessor32(), + Q.packed_accessor32(), + ix_cuda.packed_accessor32(), + S.packed_accessor32()); + + Ev6x1_kernel<<>>( + E.packed_accessor32(), + Q.packed_accessor32(), + w.packed_accessor32(), + jx_cuda.packed_accessor32(), + v.packed_accessor32()); + + // schur block + SparseBlock A(P, 6); + A.update_lhs(S, ii2_cpu, jj2_cpu); + A.update_rhs(v, jj_cpu - t0); + + return A; +} + + +std::vector ba_cuda( + torch::Tensor poses, + torch::Tensor disps, + torch::Tensor intrinsics, + torch::Tensor disps_sens, + torch::Tensor targets, + torch::Tensor weights, + torch::Tensor eta, + torch::Tensor ii, + torch::Tensor jj, + const int t0, + const int t1, + const int iterations, + const float lm, + const float ep, + const bool motion_only) +{ + auto opts = poses.options(); + const int num = ii.size(0); + const int ht = disps.size(1); + const int wd = disps.size(2); + + torch::Tensor ts = torch::arange(t0, t1).to(torch::kCUDA); + torch::Tensor ii_exp = torch::cat({ts, ii}, 0); + torch::Tensor jj_exp = torch::cat({ts, jj}, 0); + + std::tuple kuniq = + torch::_unique(ii_exp, true, true); + + torch::Tensor kx = std::get<0>(kuniq); + torch::Tensor kk_exp = std::get<1>(kuniq); + + torch::Tensor dx; + torch::Tensor dz; + + // initialize buffers + torch::Tensor Hs = torch::zeros({4, num, 6, 6}, opts); + torch::Tensor vs = torch::zeros({2, num, 6}, opts); + torch::Tensor Eii = torch::zeros({num, 6, ht*wd}, opts); + torch::Tensor Eij = torch::zeros({num, 6, ht*wd}, opts); + torch::Tensor Cii = torch::zeros({num, ht*wd}, opts); + torch::Tensor wi = torch::zeros({num, ht*wd}, opts); + + for (int itr=0; itr>>( + targets.packed_accessor32(), + weights.packed_accessor32(), + poses.packed_accessor32(), + disps.packed_accessor32(), + intrinsics.packed_accessor32(), + ii.packed_accessor32(), + jj.packed_accessor32(), + Hs.packed_accessor32(), + vs.packed_accessor32(), + Eii.packed_accessor32(), + Eij.packed_accessor32(), + Cii.packed_accessor32(), + wi.packed_accessor32()); + + + // pose x pose block + SparseBlock A(t1 - t0, 6); + + A.update_lhs(Hs.reshape({-1, 6, 6}), + torch::cat({ii, ii, jj, jj}) - t0, + torch::cat({ii, jj, ii, jj}) - t0); + + A.update_rhs(vs.reshape({-1, 6}), + torch::cat({ii, jj}) - t0); + + if (motion_only) { + dx = A.solve(lm, ep); + + // update poses + pose_retr_kernel<<<1, THREADS>>>( + poses.packed_accessor32(), + dx.packed_accessor32(), t0, t1); + } + + else { + // add depth residual if there are depth sensor measurements + const float alpha = 0.05; + torch::Tensor m = (disps_sens.index({kx, "..."}) > 0).to(torch::TensorOptions().dtype(torch::kFloat32)).view({-1, ht*wd}); + torch::Tensor C = accum_cuda(Cii, ii, kx) + m * alpha + (1 - m) * eta.view({-1, ht*wd}); + torch::Tensor w = accum_cuda(wi, ii, kx) - m * alpha * (disps.index({kx, "..."}) - disps_sens.index({kx, "..."})).view({-1, ht*wd}); + torch::Tensor Q = 1.0 / C; + + torch::Tensor Ei = accum_cuda(Eii.view({num, 6*ht*wd}), ii, ts).view({t1-t0, 6, ht*wd}); + torch::Tensor E = torch::cat({Ei, Eij}, 0); + + SparseBlock S = schur_block(E, Q, w, ii_exp, jj_exp, kk_exp, t0, t1); + dx = (A - S).solve(lm, ep); + + torch::Tensor ix = jj_exp - t0; + torch::Tensor dw = torch::zeros({ix.size(0), ht*wd}, opts); + + EvT6x1_kernel<<>>( + E.packed_accessor32(), + dx.packed_accessor32(), + ix.packed_accessor32(), + dw.packed_accessor32()); + + dz = Q * (w - accum_cuda(dw, ii_exp, kx)); + + // update poses + pose_retr_kernel<<<1, THREADS>>>( + poses.packed_accessor32(), + dx.packed_accessor32(), t0, t1); + + // update disparity maps + disp_retr_kernel<<>>( + disps.packed_accessor32(), + dz.packed_accessor32(), + kx.packed_accessor32()); + } + + } + + return {dx, dz}; +} + + + +torch::Tensor frame_distance_cuda( + torch::Tensor poses, + torch::Tensor disps, + torch::Tensor intrinsics, + torch::Tensor ii, + torch::Tensor jj, + const float beta) +{ + auto opts = poses.options(); + const int num = ii.size(0); + + torch::Tensor dist = torch::zeros({num}, opts); + + frame_distance_kernel<<>>( + poses.packed_accessor32(), + disps.packed_accessor32(), + intrinsics.packed_accessor32(), + ii.packed_accessor32(), + jj.packed_accessor32(), + dist.packed_accessor32(), beta); + + return dist; +} + + +std::vector projmap_cuda( + torch::Tensor poses, + torch::Tensor disps, + torch::Tensor intrinsics, + torch::Tensor ii, + torch::Tensor jj) +{ + auto opts = poses.options(); + const int num = ii.size(0); + const int ht = disps.size(1); + const int wd = disps.size(2); + + torch::Tensor coords = torch::zeros({num, ht, wd, 3}, opts); + torch::Tensor valid = torch::zeros({num, ht, wd, 1}, opts); + + projmap_kernel<<>>( + poses.packed_accessor32(), + disps.packed_accessor32(), + intrinsics.packed_accessor32(), + ii.packed_accessor32(), + jj.packed_accessor32(), + coords.packed_accessor32(), + valid.packed_accessor32()); + + return {coords, valid}; +} + + +torch::Tensor depth_filter_cuda( + torch::Tensor poses, + torch::Tensor disps, + torch::Tensor intrinsics, + torch::Tensor ix, + torch::Tensor thresh) +{ + const int num = ix.size(0); + const int ht = disps.size(1); + const int wd = disps.size(2); + + torch::Tensor counter = torch::zeros({num, ht, wd}, disps.options()); + + dim3 blocks(num, 6, NUM_BLOCKS(ht * wd)); + + depth_filter_kernel<<>>( + poses.packed_accessor32(), + disps.packed_accessor32(), + intrinsics.packed_accessor32(), + ix.packed_accessor32(), + thresh.packed_accessor32(), + counter.packed_accessor32()); + + return counter; +} + + +torch::Tensor iproj_cuda( + torch::Tensor poses, + torch::Tensor disps, + torch::Tensor intrinsics) +{ + + const int nm = disps.size(0); + const int ht = disps.size(1); + const int wd = disps.size(2); + + auto opts = disps.options(); + torch::Tensor points = torch::zeros({nm, ht, wd, 3}, opts); + + dim3 blocks(nm, NUM_BLOCKS(ht * wd)); + + iproj_kernel<<>>( + poses.packed_accessor32(), + disps.packed_accessor32(), + intrinsics.packed_accessor32(), + points.packed_accessor32()); + + return points; + +} diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/.clang-format b/thirdparty/DROID-SLAM/thirdparty/eigen/.clang-format new file mode 100644 index 0000000000000000000000000000000000000000..1f33cbed2e4486c6e783d5ff032b3b9edf6ba175 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/.clang-format @@ -0,0 +1,19 @@ +--- +BasedOnStyle: Google +ColumnLimit: 120 +--- +Language: Cpp +BasedOnStyle: Google +ColumnLimit: 120 +StatementMacros: + - EIGEN_STATIC_ASSERT + - EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + - EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN +SortIncludes: false +AttributeMacros: +- EIGEN_STRONG_INLINE +- EIGEN_ALWAYS_INLINE +- EIGEN_DEVICE_FUNC +- EIGEN_DONT_INLINE +- EIGEN_DEPRECATED +- EIGEN_UNUSED diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/.git-blame-ignore-revs b/thirdparty/DROID-SLAM/thirdparty/eigen/.git-blame-ignore-revs new file mode 100644 index 0000000000000000000000000000000000000000..e89d7d7b08fa3483f5f9d13e683c91b78734f68f --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/.git-blame-ignore-revs @@ -0,0 +1,4 @@ +# First major clang-format MR (https://gitlab.com/libeigen/eigen/-/merge_requests/1429). +f38e16c193d489c278c189bc06b448a94adb45fb +# Formatting of tests, examples, benchmarks, et cetera (https://gitlab.com/libeigen/eigen/-/merge_requests/1432). +46e9cdb7fea25d7f7aef4332b9c3ead3857e213d diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/.gitignore b/thirdparty/DROID-SLAM/thirdparty/eigen/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..369ae25b626a173c59eae665d4bb54a4f24d1410 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/.gitignore @@ -0,0 +1,41 @@ +qrc_*cxx +*.orig +*.pyc +*.diff +diff +*.save +save +*.old +*.gmo +*.qm +core +core.* +*.bak +*~ +*.build* +*.moc.* +*.moc +ui_* +CMakeCache.txt +tags +.*.swp +activity.png +*.out +*.php* +*.log +*.orig +*.rej +log +patch +*.patch +a +a.* +lapack/testing +lapack/reference +.*project +.settings +Makefile +!ci/build.gitlab-ci.yml +!scripts/buildtests.in +!Eigen/Core +!Eigen/src/Core diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/.gitlab-ci.yml b/thirdparty/DROID-SLAM/thirdparty/eigen/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..93bfc2cf0a282c95660c05b5141a2b40cbc93927 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/.gitlab-ci.yml @@ -0,0 +1,33 @@ +# This file is part of Eigen, a lightweight C++ template library +# for linear algebra. +# +# Copyright (C) 2023, The Eigen Authors +# +# This Source Code Form is subject to the terms of the Mozilla +# Public License v. 2.0. If a copy of the MPL was not distributed +# with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +stages: + - checkformat + - build + - test + - deploy + +variables: + # CMake build directory. + EIGEN_CI_BUILDDIR: .build + # Specify the CMake build target. + EIGEN_CI_BUILD_TARGET: "" + # If a test regex is specified, that will be selected. + # Otherwise, we will try a label if specified. + EIGEN_CI_TEST_REGEX: "" + EIGEN_CI_TEST_LABEL: "" + +include: + - "/ci/checkformat.gitlab-ci.yml" + - "/ci/common.gitlab-ci.yml" + - "/ci/build.linux.gitlab-ci.yml" + - "/ci/build.windows.gitlab-ci.yml" + - "/ci/test.linux.gitlab-ci.yml" + - "/ci/test.windows.gitlab-ci.yml" + - "/ci/deploy.gitlab-ci.yml" diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/.gitlab/issue_templates/Bug Report.md b/thirdparty/DROID-SLAM/thirdparty/eigen/.gitlab/issue_templates/Bug Report.md new file mode 100644 index 0000000000000000000000000000000000000000..0c49b0fe38d4a21c32fb4da83d073e992a0fa6a9 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/.gitlab/issue_templates/Bug Report.md @@ -0,0 +1,69 @@ + + +### Summary + + +### Environment + +- **Operating System** : Windows/Linux +- **Architecture** : x64/Arm64/PowerPC ... +- **Eigen Version** : 3.3.9 +- **Compiler Version** : Gcc7.0 +- **Compile Flags** : -O3 -march=native +- **Vector Extension** : SSE/AVX/NEON ... + +### Minimal Example + + +```cpp +//show your code here +``` + +### Steps to reproduce + + +1. first step +2. second step +3. ... + +### What is the current *bug* behavior? + + +### What is the expected *correct* behavior? + + +### Relevant logs + + + +### Warning Messages + + + +### Benchmark scripts and results + + +### Anything else that might help + + +- [ ] Have a plan to fix this issue. diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/.gitlab/issue_templates/Feature Request.md b/thirdparty/DROID-SLAM/thirdparty/eigen/.gitlab/issue_templates/Feature Request.md new file mode 100644 index 0000000000000000000000000000000000000000..2c6f908af4f9ba0c056d980c89cbe1f6414db295 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/.gitlab/issue_templates/Feature Request.md @@ -0,0 +1,7 @@ +### Describe the feature you would like to be implemented. + +### Would such a feature be useful for other users? Why? + +### Any hints on how to implement the requested feature? + +### Additional resources diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/.gitlab/merge_request_templates/Merge Request Template.md b/thirdparty/DROID-SLAM/thirdparty/eigen/.gitlab/merge_request_templates/Merge Request Template.md new file mode 100644 index 0000000000000000000000000000000000000000..3fe963afa104b0c70bc9fcbe9e7824c8c839a99f --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/.gitlab/merge_request_templates/Merge Request Template.md @@ -0,0 +1,26 @@ + + +### Reference issue + + +### What does this implement/fix? + + +### Additional information + diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/.hgeol b/thirdparty/DROID-SLAM/thirdparty/eigen/.hgeol new file mode 100644 index 0000000000000000000000000000000000000000..5327df161536cc2b12f8884e9935daaca82690b1 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/.hgeol @@ -0,0 +1,11 @@ +[patterns] +*.sh = LF +*.MINPACK = CRLF +scripts/*.in = LF +debug/msvc/*.dat = CRLF +debug/msvc/*.natvis = CRLF +unsupported/test/mpreal/*.* = CRLF +** = native + +[repository] +native = LF diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/CMakeLists.txt b/thirdparty/DROID-SLAM/thirdparty/eigen/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..116849c8f0b2f09ba6c68d20cffd80960df57f59 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/CMakeLists.txt @@ -0,0 +1,773 @@ +cmake_minimum_required(VERSION 3.10.0) + +#============================================================================== +# CMake Policy issues. +#============================================================================== +# Allow overriding options in a parent project via `set` before including Eigen. +if (POLICY CMP0077) + cmake_policy (SET CMP0077 NEW) +endif (POLICY CMP0077) + +# NOTE Remove setting the policy once the minimum required CMake version is +# increased to at least 3.15. Retain enabling the export to package registry. +if (POLICY CMP0090) + # The export command does not populate package registry by default + cmake_policy (SET CMP0090 NEW) + # Unless otherwise specified, always export to package registry to ensure + # backwards compatibility. + if (NOT DEFINED CMAKE_EXPORT_PACKAGE_REGISTRY) + set (CMAKE_EXPORT_PACKAGE_REGISTRY ON) + endif (NOT DEFINED CMAKE_EXPORT_PACKAGE_REGISTRY) +endif (POLICY CMP0090) + +# Disable warning about find_package(CUDA). +# CUDA language support is lacking for clang as the CUDA compiler +# until at least cmake version 3.18. Even then, there seems to be +# issues on Windows+Ninja in passing build flags. Continue using +# the "old" way for now. +if (POLICY CMP0146) + cmake_policy(SET CMP0146 OLD) +endif () + +#============================================================================== +# CMake Project. +#============================================================================== + +project(Eigen3) + +# Remove this block after bumping CMake to v3.21.0 +# PROJECT_IS_TOP_LEVEL is defined then by default +if(CMAKE_VERSION VERSION_LESS 3.21.0) + if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) + set(PROJECT_IS_TOP_LEVEL ON) + else() + set(PROJECT_IS_TOP_LEVEL OFF) + endif() +endif() + +#============================================================================== +# Build ON/OFF Settings. +#============================================================================== +# Determine if we should build tests. +include(CMakeDependentOption) +cmake_dependent_option(BUILD_TESTING "Enable creation of tests." ON "PROJECT_IS_TOP_LEVEL" OFF) +option(EIGEN_BUILD_TESTING "Enable creation of Eigen tests." ${BUILD_TESTING}) +option(EIGEN_LEAVE_TEST_IN_ALL_TARGET "Leaves tests in the all target, needed by ctest for automatic building." OFF) + +# Determine if we should build BLAS/LAPACK implementations. +option(EIGEN_BUILD_BLAS "Toggles the building of the Eigen Blas library" ${PROJECT_IS_TOP_LEVEL}) +option(EIGEN_BUILD_LAPACK "Toggles the building of the included Eigen LAPACK library" ${PROJECT_IS_TOP_LEVEL}) +if (EIGEN_BUILD_BLAS OR EIGEN_BUILD_LAPACK) + # BLAS and LAPACK currently need a fortran compiler. + include(CMakeDetermineFortranCompiler) + if (NOT CMAKE_Fortran_COMPILER) + set(EIGEN_BUILD_BLAS OFF) + set(EIGEN_BUILD_LAPACK OFF) + else() + # Determine if we should build shared libraries for BLAS/LAPACK on this platform. + get_cmake_property(EIGEN_BUILD_SHARED_LIBS TARGET_SUPPORTS_SHARED_LIBS) + endif() +endif() + +option(EIGEN_BUILD_BTL "Build benchmark suite" OFF) +option(EIGEN_BUILD_SPBENCH "Build sparse benchmark suite" OFF) +# Avoid building docs if included from another project. +# Building documentation requires creating and running executables on the host +# platform. We shouldn't do this if cross-compiling. +if (PROJECT_IS_TOP_LEVEL AND NOT CMAKE_CROSSCOMPILING) + set(EIGEN_BUILD_DOC_DEFAULT ON) +endif() +option(EIGEN_BUILD_DOC "Enable creation of Eigen documentation" ${EIGEN_BUILD_DOC_DEFAULT}) + +option(EIGEN_BUILD_DEMOS "Toggles the building of the Eigen demos" ${PROJECT_IS_TOP_LEVEL}) + +# Disable pkgconfig only for native Windows builds +if(NOT WIN32 OR NOT CMAKE_HOST_SYSTEM_NAME MATCHES Windows) + option(EIGEN_BUILD_PKGCONFIG "Build pkg-config .pc file for Eigen" ${PROJECT_IS_TOP_LEVEL}) +endif() +option(EIGEN_BUILD_CMAKE_PACKAGE "Enables the creation of EigenConfig.cmake and related files" ${PROJECT_IS_TOP_LEVEL}) + +if (EIGEN_BUILD_TESTING OR EIGEN_BUILD_BLAS OR EIGEN_BUILD_LAPACK OR EIGEN_BUILT_BTL OR EIGEN_BUILD_BTL OR EIGEN_BUILD_SPBENCH OR EIGEN_BUILD_DOC OR EIGEN_BUILD_DEMOS) + set(EIGEN_IS_BUILDING_ ON) +endif() + +#============================================================================== +# Version Info. +#============================================================================== + +# Automatically parse the version number from header files. +file(READ "${PROJECT_SOURCE_DIR}/Eigen/src/Core/util/Macros.h" _eigen_version_header) +string(REGEX MATCH "define[ \t]+EIGEN_WORLD_VERSION[ \t]+([0-9]+)" _eigen_world_version_match "${_eigen_version_header}") +set(EIGEN_WORLD_VERSION "${CMAKE_MATCH_1}") +string(REGEX MATCH "define[ \t]+EIGEN_MAJOR_VERSION[ \t]+([0-9]+)" _eigen_major_version_match "${_eigen_version_header}") +set(EIGEN_MAJOR_VERSION "${CMAKE_MATCH_1}") +string(REGEX MATCH "define[ \t]+EIGEN_MINOR_VERSION[ \t]+([0-9]+)" _eigen_minor_version_match "${_eigen_version_header}") +set(EIGEN_MINOR_VERSION "${CMAKE_MATCH_1}") +set(EIGEN_VERSION_NUMBER ${EIGEN_WORLD_VERSION}.${EIGEN_MAJOR_VERSION}.${EIGEN_MINOR_VERSION}) + +# If we are in a git repo, extract a changeset. +if(IS_DIRECTORY ${CMAKE_SOURCE_DIR}/.git) + # if the git program is absent or this will leave the EIGEN_GIT_REVNUM string empty, + # but won't stop CMake. + execute_process(COMMAND git ls-remote --refs -q ${CMAKE_SOURCE_DIR} HEAD OUTPUT_VARIABLE EIGEN_GIT_OUTPUT) +endif() +# extract the git rev number from the git output... +if(EIGEN_GIT_OUTPUT) +string(REGEX MATCH "^([0-9;a-f]+).*" EIGEN_GIT_CHANGESET_MATCH "${EIGEN_GIT_OUTPUT}") +set(EIGEN_GIT_REVNUM "${CMAKE_MATCH_1}") +endif() +#...and show it next to the version number +if(EIGEN_GIT_REVNUM) + set(EIGEN_VERSION "${EIGEN_VERSION_NUMBER} (git rev ${EIGEN_GIT_REVNUM})") +else() + set(EIGEN_VERSION "${EIGEN_VERSION_NUMBER}") +endif() + +#============================================================================== +# Install Path Configuration. +#============================================================================== + +# Unconditionally allow install of targets to support nested dependency +# installations. +# +# Note: projects that depend on Eigen should _probably_ exclude installing +# Eigen by default (e.g. by using EXCLUDE_FROM_ALL when using +# FetchContent_Declare or add_subdirectory) to avoid overwriting a previous +# installation. + +include(GNUInstallDirs) +# Backward compatibility support for EIGEN_INCLUDE_INSTALL_DIR +if(EIGEN_INCLUDE_INSTALL_DIR) + message(WARNING "EIGEN_INCLUDE_INSTALL_DIR is deprecated. Use INCLUDE_INSTALL_DIR instead.") +endif() + +if(EIGEN_INCLUDE_INSTALL_DIR AND NOT INCLUDE_INSTALL_DIR) + set(INCLUDE_INSTALL_DIR ${EIGEN_INCLUDE_INSTALL_DIR} + CACHE PATH "The directory relative to CMAKE_INSTALL_PREFIX where Eigen header files are installed") +else() + set(INCLUDE_INSTALL_DIR + "${CMAKE_INSTALL_INCLUDEDIR}/eigen3" + CACHE PATH "The directory relative to CMAKE_INSTALL_PREFIX where Eigen header files are installed" + ) +endif() +set(CMAKEPACKAGE_INSTALL_DIR + "${CMAKE_INSTALL_DATADIR}/eigen3/cmake" + CACHE PATH "The directory relative to CMAKE_INSTALL_PREFIX where Eigen3Config.cmake is installed" + ) +set(PKGCONFIG_INSTALL_DIR + "${CMAKE_INSTALL_DATADIR}/pkgconfig" + CACHE PATH "The directory relative to CMAKE_INSTALL_PREFIX where eigen3.pc is installed" + ) + +foreach(var INCLUDE_INSTALL_DIR CMAKEPACKAGE_INSTALL_DIR PKGCONFIG_INSTALL_DIR) + # If an absolute path is specified, make it relative to "{CMAKE_INSTALL_PREFIX}". + if(IS_ABSOLUTE "${${var}}") + file(RELATIVE_PATH "${var}" "${CMAKE_INSTALL_PREFIX}" "${${var}}") + endif() +endforeach() + +#============================================================================== +# Eigen Library. +#============================================================================== + +set ( EIGEN_VERSION_STRING ${EIGEN_VERSION_NUMBER} ) +set ( EIGEN_VERSION_MAJOR ${EIGEN_WORLD_VERSION} ) +set ( EIGEN_VERSION_MINOR ${EIGEN_MAJOR_VERSION} ) +set ( EIGEN_VERSION_PATCH ${EIGEN_MINOR_VERSION} ) + +# Alias Eigen_*_DIR to Eigen3_*_DIR: +set(Eigen_SOURCE_DIR ${Eigen3_SOURCE_DIR}) +set(Eigen_BINARY_DIR ${Eigen3_BINARY_DIR}) + +# Imported target support +add_library (eigen INTERFACE) +add_library (Eigen3::Eigen ALIAS eigen) +target_include_directories (eigen INTERFACE + $ + $ +) + +# Export as title case Eigen +set_target_properties (eigen PROPERTIES EXPORT_NAME Eigen) + +#============================================================================== +# Install Rule Configuration. +#============================================================================== + +install(FILES + signature_of_eigen3_matrix_library + DESTINATION ${INCLUDE_INSTALL_DIR} COMPONENT Devel + ) + +if(EIGEN_BUILD_PKGCONFIG) + configure_file(eigen3.pc.in eigen3.pc @ONLY) + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/eigen3.pc + DESTINATION ${PKGCONFIG_INSTALL_DIR}) +endif() + +install(DIRECTORY Eigen DESTINATION ${INCLUDE_INSTALL_DIR} COMPONENT Devel) + +install(TARGETS eigen EXPORT Eigen3Targets) + +if(EIGEN_BUILD_CMAKE_PACKAGE) + include (CMakePackageConfigHelpers) + configure_package_config_file ( + ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Eigen3Config.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/Eigen3Config.cmake + INSTALL_DESTINATION ${CMAKEPACKAGE_INSTALL_DIR} + NO_SET_AND_CHECK_MACRO # Eigen does not provide legacy style defines + NO_CHECK_REQUIRED_COMPONENTS_MACRO # Eigen does not provide components + ) + + # NOTE Remove the first code path once the minimum required CMake version is + # bumped to 3.14 or above. + if (CMAKE_VERSION VERSION_LESS 3.14) + # Remove CMAKE_SIZEOF_VOID_P from Eigen3ConfigVersion.cmake since Eigen does + # not depend on architecture specific settings or libraries. More + # specifically, an Eigen3Config.cmake generated from a 64 bit target can be + # used for 32 bit targets as well (and vice versa). + set (_Eigen3_CMAKE_SIZEOF_VOID_P ${CMAKE_SIZEOF_VOID_P}) + unset (CMAKE_SIZEOF_VOID_P) + write_basic_package_version_file (Eigen3ConfigVersion.cmake + VERSION ${EIGEN_VERSION_NUMBER} + COMPATIBILITY SameMajorVersion) + set (CMAKE_SIZEOF_VOID_P ${_Eigen3_CMAKE_SIZEOF_VOID_P}) + else (CMAKE_VERSION VERSION_LESS 3.14) + write_basic_package_version_file (Eigen3ConfigVersion.cmake + VERSION ${EIGEN_VERSION_NUMBER} + COMPATIBILITY SameMajorVersion + ARCH_INDEPENDENT) + endif (CMAKE_VERSION VERSION_LESS 3.14) + + # The Eigen target will be located in the Eigen3 namespace. Other CMake + # targets can refer to it using Eigen3::Eigen. + export (TARGETS eigen NAMESPACE Eigen3:: FILE Eigen3Targets.cmake) + # Export Eigen3 package to CMake registry such that it can be easily found by + # CMake even if it has not been installed to a standard directory. + export (PACKAGE Eigen3) + + install (EXPORT Eigen3Targets NAMESPACE Eigen3:: DESTINATION ${CMAKEPACKAGE_INSTALL_DIR}) + + install (FILES ${CMAKE_CURRENT_BINARY_DIR}/Eigen3Config.cmake + ${CMAKE_CURRENT_BINARY_DIR}/Eigen3ConfigVersion.cmake + DESTINATION ${CMAKEPACKAGE_INSTALL_DIR}) + + # Add uninstall target + if(NOT TARGET uninstall) + add_custom_target ( uninstall + COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_SOURCE_DIR}/cmake/EigenUninstall.cmake) + endif() +endif() + +#============================================================================== +# General Build Configuration. +#============================================================================== + +# Avoid setting the standard in a parent if unset. +if(PROJECT_IS_TOP_LEVEL) + set(CMAKE_CXX_STANDARD 14 CACHE STRING "Default C++ standard") + set(CMAKE_CXX_STANDARD_REQUIRED ON CACHE BOOL "Require C++ standard") + set(CMAKE_CXX_EXTENSIONS OFF CACHE BOOL "Allow C++ extensions") +endif() + +# Guard against in-source builds +if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR}) + message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ") +endif() + +# Guard against bad build-type strings +if (PROJECT_IS_TOP_LEVEL AND NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE "Release") +endif() + +# Only try to figure out how to link the math library if we are building something. +# Otherwise, let the parent project deal with dependencies. +if (EIGEN_IS_BUILDING_) + # Use Eigen's cmake files. + set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) + + set(CMAKE_INCLUDE_CURRENT_DIR OFF) + + find_package(StandardMathLibrary) + set(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO "") + if(NOT STANDARD_MATH_LIBRARY_FOUND) + message(FATAL_ERROR + "Can't link to the standard math library. Please report to the Eigen developers, telling them about your platform.") + else() + if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO) + set(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO "${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO} ${STANDARD_MATH_LIBRARY}") + else() + set(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO "${STANDARD_MATH_LIBRARY}") + endif() + endif() + if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO) + message(STATUS "Standard libraries to link to explicitly: ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO}") + else() + message(STATUS "Standard libraries to link to explicitly: none") + endif() + + # Default tests/examples/libraries to row-major. + option(EIGEN_DEFAULT_TO_ROW_MAJOR "Use row-major as default matrix storage order" OFF) + if(EIGEN_DEFAULT_TO_ROW_MAJOR) + add_definitions("-DEIGEN_DEFAULT_TO_ROW_MAJOR") + endif() +endif() + +#============================================================================== +# Test Configuration. +#============================================================================== + +if (EIGEN_BUILD_TESTING) + function(ei_maybe_separate_arguments variable mode args) + # Use separate_arguments if the input is a single string containing a space. + # Otherwise, if it is already a list or doesn't have a space, just propagate + # the original value. This is to better support multi-argument lists. + list(LENGTH args list_length) + if (${list_length} EQUAL 1) + string(FIND "${args}" " " has_space) + if (${has_space} GREATER -1) + separate_arguments(args ${mode} "${args}") + endif() + endif() + set(${variable} ${args} PARENT_SCOPE) + endfunction(ei_maybe_separate_arguments) + + include(CheckCXXCompilerFlag) + macro(ei_add_cxx_compiler_flag FLAG) + string(REGEX REPLACE "-" "" SFLAG1 ${FLAG}) + string(REGEX REPLACE "\\+" "p" SFLAG ${SFLAG1}) + check_cxx_compiler_flag(${FLAG} COMPILER_SUPPORT_${SFLAG}) + if(COMPILER_SUPPORT_${SFLAG}) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FLAG}") + endif() + endmacro() + + set(EIGEN_TEST_CUSTOM_LINKER_FLAGS "" CACHE STRING "Additional linker flags when linking unit tests.") + set(EIGEN_TEST_CUSTOM_CXX_FLAGS "" CACHE STRING "Additional compiler flags when compiling unit tests.") + # Convert space-separated arguments into CMake lists for downstream consumption. + ei_maybe_separate_arguments(EIGEN_TEST_CUSTOM_LINKER_FLAGS NATIVE_COMMAND "${EIGEN_TEST_CUSTOM_LINKER_FLAGS}") + ei_maybe_separate_arguments(EIGEN_TEST_CUSTOM_CXX_FLAGS NATIVE_COMMAND "${EIGEN_TEST_CUSTOM_CXX_FLAGS}") + + option(EIGEN_SPLIT_LARGE_TESTS "Split large tests into smaller executables" ON) + set(EIGEN_TEST_MAX_SIZE "320" CACHE STRING "Maximal matrix/vector size, default is 320") + + # Flags for tests. + if(NOT MSVC) + # We assume that other compilers are partly compatible with GNUCC + + # clang outputs some warnings for unknown flags that are not caught by check_cxx_compiler_flag + # adding -Werror turns such warnings into errors + check_cxx_compiler_flag("-Werror" COMPILER_SUPPORT_WERROR) + if(COMPILER_SUPPORT_WERROR) + set(CMAKE_REQUIRED_FLAGS "-Werror") + endif() + ei_add_cxx_compiler_flag("-pedantic") + ei_add_cxx_compiler_flag("-Wall") + ei_add_cxx_compiler_flag("-Wextra") + # ei_add_cxx_compiler_flag("-Weverything") # clang + ei_add_cxx_compiler_flag("-Wundef") + ei_add_cxx_compiler_flag("-Wcast-align") + ei_add_cxx_compiler_flag("-Wchar-subscripts") + ei_add_cxx_compiler_flag("-Wnon-virtual-dtor") + ei_add_cxx_compiler_flag("-Wunused-local-typedefs") + ei_add_cxx_compiler_flag("-Wpointer-arith") + ei_add_cxx_compiler_flag("-Wwrite-strings") + ei_add_cxx_compiler_flag("-Wformat-security") + ei_add_cxx_compiler_flag("-Wshorten-64-to-32") + ei_add_cxx_compiler_flag("-Wlogical-op") + ei_add_cxx_compiler_flag("-Wenum-conversion") + ei_add_cxx_compiler_flag("-Wc++11-extensions") + ei_add_cxx_compiler_flag("-Wdouble-promotion") + # ei_add_cxx_compiler_flag("-Wconversion") + ei_add_cxx_compiler_flag("-Wshadow") + ei_add_cxx_compiler_flag("-Wno-psabi") + ei_add_cxx_compiler_flag("-Wno-variadic-macros") + ei_add_cxx_compiler_flag("-Wno-long-long") + ei_add_cxx_compiler_flag("-fno-check-new") + ei_add_cxx_compiler_flag("-fno-common") + ei_add_cxx_compiler_flag("-fstrict-aliasing") + ei_add_cxx_compiler_flag("-wd981") # disable ICC's "operands are evaluated in unspecified order" remark + ei_add_cxx_compiler_flag("-wd2304") # disable ICC's "warning #2304: non-explicit constructor with single argument may cause implicit type conversion" produced by -Wnon-virtual-dtor + + if(ANDROID_NDK) + ei_add_cxx_compiler_flag("-pie") + ei_add_cxx_compiler_flag("-fPIE") + endif() + + set(CMAKE_REQUIRED_FLAGS "") + + option(EIGEN_TEST_SSE2 "Enable/Disable SSE2 in tests/examples" OFF) + if(EIGEN_TEST_SSE2) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse2") + message(STATUS "Enabling SSE2 in tests/examples") + endif() + + option(EIGEN_TEST_SSE3 "Enable/Disable SSE3 in tests/examples" OFF) + if(EIGEN_TEST_SSE3) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse3") + message(STATUS "Enabling SSE3 in tests/examples") + endif() + + option(EIGEN_TEST_SSSE3 "Enable/Disable SSSE3 in tests/examples" OFF) + if(EIGEN_TEST_SSSE3) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mssse3") + message(STATUS "Enabling SSSE3 in tests/examples") + endif() + + option(EIGEN_TEST_SSE4_1 "Enable/Disable SSE4.1 in tests/examples" OFF) + if(EIGEN_TEST_SSE4_1) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.1") + message(STATUS "Enabling SSE4.1 in tests/examples") + endif() + + option(EIGEN_TEST_SSE4_2 "Enable/Disable SSE4.2 in tests/examples" OFF) + if(EIGEN_TEST_SSE4_2) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2") + message(STATUS "Enabling SSE4.2 in tests/examples") + endif() + + option(EIGEN_TEST_AVX "Enable/Disable AVX in tests/examples" OFF) + if(EIGEN_TEST_AVX) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx") + message(STATUS "Enabling AVX in tests/examples") + endif() + + option(EIGEN_TEST_FMA "Enable/Disable FMA in tests/examples" OFF) + if(EIGEN_TEST_FMA AND NOT EIGEN_TEST_NEON) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma") + message(STATUS "Enabling FMA in tests/examples") + endif() + + option(EIGEN_TEST_AVX2 "Enable/Disable AVX2 in tests/examples" OFF) + if(EIGEN_TEST_AVX2) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx2 -mfma") + message(STATUS "Enabling AVX2 in tests/examples") + endif() + + option(EIGEN_TEST_AVX512 "Enable/Disable AVX512 in tests/examples" OFF) + if(EIGEN_TEST_AVX512) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mfma") + message(STATUS "Enabling AVX512 in tests/examples") + endif() + + option(EIGEN_TEST_AVX512DQ "Enable/Disable AVX512DQ in tests/examples" OFF) + if(EIGEN_TEST_AVX512DQ) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512dq -mfma") + message(STATUS "Enabling AVX512DQ in tests/examples") + endif() + + option(EIGEN_TEST_AVX512FP16 "Enable/Disable AVX512-FP16 in tests/examples" OFF) + if(EIGEN_TEST_AVX512FP16) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mfma -mavx512vl -mavx512fp16") + message(STATUS "Enabling AVX512-FP16 in tests/examples") + endif() + + option(EIGEN_TEST_F16C "Enable/Disable F16C in tests/examples" OFF) + if(EIGEN_TEST_F16C) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mf16c") + message(STATUS "Enabling F16C in tests/examples") + endif() + + option(EIGEN_TEST_ALTIVEC "Enable/Disable AltiVec in tests/examples" OFF) + if(EIGEN_TEST_ALTIVEC) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -maltivec -mabi=altivec") + message(STATUS "Enabling AltiVec in tests/examples") + endif() + + option(EIGEN_TEST_VSX "Enable/Disable VSX in tests/examples" OFF) + if(EIGEN_TEST_VSX) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m64 -mvsx") + message(STATUS "Enabling VSX in tests/examples") + endif() + + option(EIGEN_TEST_MSA "Enable/Disable MSA in tests/examples" OFF) + if(EIGEN_TEST_MSA) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mmsa") + message(STATUS "Enabling MSA in tests/examples") + endif() + + option(EIGEN_TEST_NEON "Enable/Disable Neon in tests/examples" OFF) + if(EIGEN_TEST_NEON) + if(EIGEN_TEST_FMA) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon-vfpv4") + else() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon") + endif() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfloat-abi=hard") + message(STATUS "Enabling NEON in tests/examples") + endif() + + option(EIGEN_TEST_NEON64 "Enable/Disable Neon in tests/examples" OFF) + if(EIGEN_TEST_NEON64) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + message(STATUS "Enabling NEON in tests/examples") + endif() + + option(EIGEN_TEST_Z13 "Enable/Disable S390X(zEC13) ZVECTOR in tests/examples" OFF) + if(EIGEN_TEST_Z13) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=z13 -mzvector") + message(STATUS "Enabling S390X(zEC13) ZVECTOR in tests/examples") + endif() + + option(EIGEN_TEST_Z14 "Enable/Disable S390X(zEC14) ZVECTOR in tests/examples" OFF) + if(EIGEN_TEST_Z14) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=z14 -mzvector") + message(STATUS "Enabling S390X(zEC13) ZVECTOR in tests/examples") + endif() + + check_cxx_compiler_flag("-fopenmp" COMPILER_SUPPORT_OPENMP) + if(COMPILER_SUPPORT_OPENMP) + option(EIGEN_TEST_OPENMP "Enable/Disable OpenMP in tests/examples" OFF) + if(EIGEN_TEST_OPENMP) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp") + message(STATUS "Enabling OpenMP in tests/examples") + endif() + endif() + + else() + # C4127 - conditional expression is constant + # C4714 - marked as __forceinline not inlined (I failed to deactivate it selectively) + # We can disable this warning in the unit tests since it is clear that it occurs + # because we are oftentimes returning objects that have a destructor or may + # throw exceptions - in particular in the unit tests we are throwing extra many + # exceptions to cover indexing errors. + # C4505 - unreferenced local function has been removed (impossible to deactivate selectively) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /wd4127 /wd4505 /wd4714") + + # replace all /Wx by /W4 + string(REGEX REPLACE "/W[0-9]" "/W4" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + + check_cxx_compiler_flag("/openmp" COMPILER_SUPPORT_OPENMP) + if(COMPILER_SUPPORT_OPENMP) + option(EIGEN_TEST_OPENMP "Enable/Disable OpenMP in tests/examples" OFF) + if(EIGEN_TEST_OPENMP) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /openmp") + message(STATUS "Enabling OpenMP in tests/examples") + endif() + endif() + + option(EIGEN_TEST_SSE2 "Enable/Disable SSE2 in tests/examples" OFF) + if(EIGEN_TEST_SSE2) + if(NOT CMAKE_CL_64) + # arch is not supported on 64 bit systems, SSE is enabled automatically. + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:SSE2") + endif() + message(STATUS "Enabling SSE2 in tests/examples") + endif() + + option(EIGEN_TEST_AVX "Enable/Disable AVX in tests/examples" OFF) + if(EIGEN_TEST_AVX) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX") + message(STATUS "Enabling AVX in tests/examples") + endif() + + option(EIGEN_TEST_FMA "Enable/Disable FMA/AVX2 in tests/examples" OFF) + option(EIGEN_TEST_AVX2 "Enable/Disable FMA/AVX2 in tests/examples" OFF) + if((EIGEN_TEST_FMA AND NOT EIGEN_TEST_NEON) OR EIGEN_TEST_AVX2) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX2") + message(STATUS "Enabling FMA/AVX2 in tests/examples") + endif() + + option(EIGEN_TEST_AVX512 "Enable/Disable AVX512 in tests/examples" OFF) + option(EIGEN_TEST_AVX512DQ "Enable/Disable AVX512DQ in tests/examples" OFF) + if(EIGEN_TEST_AVX512 OR EIGEN_TEST_AVX512DQ) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX512") + message(STATUS "Enabling AVX512 in tests/examples") + endif() + + endif(NOT MSVC) + + option(EIGEN_TEST_NO_EXPLICIT_VECTORIZATION "Disable explicit vectorization in tests/examples" OFF) + option(EIGEN_TEST_X87 "Force using X87 instructions. Implies no vectorization." OFF) + option(EIGEN_TEST_32BIT "Force generating 32bit code." OFF) + + if(EIGEN_TEST_X87) + set(EIGEN_TEST_NO_EXPLICIT_VECTORIZATION ON) + if(CMAKE_COMPILER_IS_GNUCXX) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpmath=387") + message(STATUS "Forcing use of x87 instructions in tests/examples") + else() + message(STATUS "EIGEN_TEST_X87 ignored on your compiler") + endif() + endif() + + if(EIGEN_TEST_32BIT) + if(CMAKE_COMPILER_IS_GNUCXX) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m32") + message(STATUS "Forcing generation of 32-bit code in tests/examples") + else() + message(STATUS "EIGEN_TEST_32BIT ignored on your compiler") + endif() + endif() + + if(EIGEN_TEST_NO_EXPLICIT_VECTORIZATION) + add_definitions(-DEIGEN_DONT_VECTORIZE=1) + message(STATUS "Disabling vectorization in tests/examples") + endif() + + option(EIGEN_TEST_NO_EXPLICIT_ALIGNMENT "Disable explicit alignment (hence vectorization) in tests/examples" OFF) + if(EIGEN_TEST_NO_EXPLICIT_ALIGNMENT) + add_definitions(-DEIGEN_DONT_ALIGN=1) + message(STATUS "Disabling alignment in tests/examples") + endif() + + option(EIGEN_TEST_NO_EXCEPTIONS "Disables C++ exceptions" OFF) + if(EIGEN_TEST_NO_EXCEPTIONS) + ei_add_cxx_compiler_flag("-fno-exceptions") + message(STATUS "Disabling exceptions in tests/examples") + endif() + + set(EIGEN_CUDA_CXX_FLAGS "" CACHE STRING "Additional flags to pass to the cuda compiler.") + set(EIGEN_CUDA_COMPUTE_ARCH 30 CACHE STRING "The CUDA compute architecture(s) to target when compiling CUDA code") + + option(EIGEN_TEST_SYCL "Add Sycl support." OFF) + if(EIGEN_TEST_SYCL) + option(EIGEN_SYCL_DPCPP "Use the DPCPP Sycl implementation (DPCPP is default SYCL-Compiler)." ON) + option(EIGEN_SYCL_TRISYCL "Use the triSYCL Sycl implementation." OFF) + option(EIGEN_SYCL_ComputeCpp "Use the ComputeCPP Sycl implementation." OFF) + + # Building options + # https://developer.codeplay.com/products/computecpp/ce/2.11.0/guides/eigen-overview/options-for-building-eigen-sycl + option(EIGEN_SYCL_USE_DEFAULT_SELECTOR "Use sycl default selector to select the preferred device." OFF) + option(EIGEN_SYCL_NO_LOCAL_MEM "Build for devices without dedicated shared memory." OFF) + option(EIGEN_SYCL_LOCAL_MEM "Allow the use of local memory (enabled by default)." ON) + option(EIGEN_SYCL_LOCAL_THREAD_DIM0 "Set work group size for dimension 0." 16) + option(EIGEN_SYCL_LOCAL_THREAD_DIM1 "Set work group size for dimension 1." 16) + option(EIGEN_SYCL_ASYNC_EXECUTION "Allow asynchronous execution (enabled by default)." ON) + option(EIGEN_SYCL_DISABLE_SKINNY "Disable optimization for tall/skinny matrices." OFF) + option(EIGEN_SYCL_DISABLE_DOUBLE_BUFFER "Disable double buffer." OFF) + option(EIGEN_SYCL_DISABLE_SCALAR "Disable scalar contraction." OFF) + option(EIGEN_SYCL_DISABLE_GEMV "Disable GEMV and create a single kernel to calculate contraction instead." OFF) + + set(EIGEN_SYCL ON) + set(CMAKE_CXX_STANDARD 17) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations -Wno-shorten-64-to-32 -Wno-cast-align") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-copy-with-user-provided-copy -Wno-unused-variable") + set (CMAKE_MODULE_PATH "${CMAKE_ROOT}/Modules" "cmake/Modules/" "${CMAKE_MODULE_PATH}") + find_package(Threads REQUIRED) + if(EIGEN_SYCL_TRISYCL) + message(STATUS "Using triSYCL") + include(FindTriSYCL) + elseif(EIGEN_SYCL_ComputeCpp) + message(STATUS "Using ComputeCPP SYCL") + include(FindComputeCpp) + set(COMPUTECPP_DRIVER_DEFAULT_VALUE OFF) + if (NOT MSVC) + set(COMPUTECPP_DRIVER_DEFAULT_VALUE ON) + endif() + option(COMPUTECPP_USE_COMPILER_DRIVER + "Use ComputeCpp driver instead of a 2 steps compilation" + ${COMPUTECPP_DRIVER_DEFAULT_VALUE} + ) + else() #Default SYCL compiler is DPCPP (EIGEN_SYCL_DPCPP) + set(DPCPP_SYCL_TARGET "spir64" CACHE STRING "Default target for Intel CPU/GPU") + message(STATUS "Using DPCPP") + find_package(DPCPP) + add_definitions(-DSYCL_COMPILER_IS_DPCPP) + endif(EIGEN_SYCL_TRISYCL) + if(EIGEN_DONT_VECTORIZE_SYCL) + message(STATUS "Disabling SYCL vectorization in tests/examples") + # When disabling SYCL vectorization, also disable Eigen default vectorization + add_definitions(-DEIGEN_DONT_VECTORIZE=1) + add_definitions(-DEIGEN_DONT_VECTORIZE_SYCL=1) + endif() + endif() + + include(EigenConfigureTesting) + + if(EIGEN_LEAVE_TEST_IN_ALL_TARGET) + # CTest automatic test building relies on the "all" target. + add_subdirectory(test) + add_subdirectory(failtest) + else() + add_subdirectory(test EXCLUDE_FROM_ALL) + add_subdirectory(failtest EXCLUDE_FROM_ALL) + endif() + + ei_testing_print_summary() + + if (EIGEN_SPLIT_TESTSUITE) + ei_split_testsuite("${EIGEN_SPLIT_TESTSUITE}") + endif() +endif(EIGEN_BUILD_TESTING) + +#============================================================================== +# Other Build Configurations. +#============================================================================== +add_subdirectory(unsupported) + +if(EIGEN_BUILD_BLAS) + add_subdirectory(blas) +endif() + +if (EIGEN_BUILD_LAPACK) + add_subdirectory(lapack) +endif() + +if(EIGEN_BUILD_DOC) + add_subdirectory(doc EXCLUDE_FROM_ALL) +endif() + +# TODO: consider also replacing EIGEN_BUILD_BTL by a custom target "make btl"? +if(EIGEN_BUILD_BTL) + add_subdirectory(bench/btl EXCLUDE_FROM_ALL) +endif() + +if(NOT WIN32 AND EIGEN_BUILD_SPBENCH) + add_subdirectory(bench/spbench EXCLUDE_FROM_ALL) +endif() + +if (EIGEN_BUILD_DEMOS) + add_subdirectory(demos EXCLUDE_FROM_ALL) +endif() + +if (PROJECT_IS_TOP_LEVEL) + # must be after test and unsupported, for configuring buildtests.in + add_subdirectory(scripts EXCLUDE_FROM_ALL) + configure_file(scripts/cdashtesting.cmake.in cdashtesting.cmake @ONLY) +endif() + +#============================================================================== +# Summary. +#============================================================================== + +if(PROJECT_IS_TOP_LEVEL) + string(TOLOWER "${CMAKE_GENERATOR}" cmake_generator_tolower) + if(cmake_generator_tolower MATCHES "makefile") + message(STATUS "Available targets (use: make TARGET):") + else() + message(STATUS "Available targets (use: cmake --build . --target TARGET):") + endif() + message(STATUS "---------+--------------------------------------------------------------") + message(STATUS "Target | Description") + message(STATUS "---------+--------------------------------------------------------------") + message(STATUS "install | Install Eigen. Headers will be installed to:") + message(STATUS " | /") + message(STATUS " | Using the following values:") + message(STATUS " | CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}") + message(STATUS " | INCLUDE_INSTALL_DIR: ${INCLUDE_INSTALL_DIR}") + message(STATUS " | Change the install location of Eigen headers using:") + message(STATUS " | cmake . -DCMAKE_INSTALL_PREFIX=yourprefix") + message(STATUS " | Or:") + message(STATUS " | cmake . -DINCLUDE_INSTALL_DIR=yourdir") + message(STATUS "uninstall| Remove files installed by the install target") + if (EIGEN_BUILD_DOC) + message(STATUS "doc | Generate the API documentation, requires Doxygen & LaTeX") + endif() + if(EIGEN_BUILD_TESTING) + message(STATUS "check | Build and run the unit-tests. Read this page:") + message(STATUS " | http://eigen.tuxfamily.org/index.php?title=Tests") + endif() + if (EIGEN_BUILD_BLAS) + message(STATUS "blas | Build BLAS library (not the same thing as Eigen)") + endif() + if (EIGEN_BUILD_LAPACK) + message(STATUS "lapack | Build LAPACK subset library (not the same thing as Eigen)") + endif() + message(STATUS "---------+--------------------------------------------------------------") + message(STATUS "") +endif() + +message(STATUS "") +message(STATUS "Configured Eigen ${EIGEN_VERSION_NUMBER}") +message(STATUS "") diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/COPYING.APACHE b/thirdparty/DROID-SLAM/thirdparty/eigen/COPYING.APACHE new file mode 100644 index 0000000000000000000000000000000000000000..61e948d2aaa3f7cd1db246b874d3b8ad1e8204fb --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/COPYING.APACHE @@ -0,0 +1,203 @@ +/* + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ \ No newline at end of file diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/COPYING.BSD b/thirdparty/DROID-SLAM/thirdparty/eigen/COPYING.BSD new file mode 100644 index 0000000000000000000000000000000000000000..8964ddfdd0effc65e8dd90ea1d8c411efb30e5fd --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/COPYING.BSD @@ -0,0 +1,26 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/COPYING.MINPACK b/thirdparty/DROID-SLAM/thirdparty/eigen/COPYING.MINPACK new file mode 100644 index 0000000000000000000000000000000000000000..132cc3f33fa7fd4169a92b05241db59c8428a7ab --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/COPYING.MINPACK @@ -0,0 +1,51 @@ +Minpack Copyright Notice (1999) University of Chicago. All rights reserved + +Redistribution and use in source and binary forms, with or +without modification, are permitted provided that the +following conditions are met: + +1. Redistributions of source code must retain the above +copyright notice, this list of conditions and the following +disclaimer. + +2. Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials +provided with the distribution. + +3. The end-user documentation included with the +redistribution, if any, must include the following +acknowledgment: + + "This product includes software developed by the + University of Chicago, as Operator of Argonne National + Laboratory. + +Alternately, this acknowledgment may appear in the software +itself, if and wherever such third-party acknowledgments +normally appear. + +4. WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS" +WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE +UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND +THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE +OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY +OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR +USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF +THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4) +DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION +UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL +BE CORRECTED. + +5. LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT +HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF +ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT, +INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF +ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF +PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER +SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT +(INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE, +EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE +POSSIBILITY OF SUCH LOSS OR DAMAGES. diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/COPYING.MPL2 b/thirdparty/DROID-SLAM/thirdparty/eigen/COPYING.MPL2 new file mode 100644 index 0000000000000000000000000000000000000000..ee6256cdb62a765749a71aae3abea32884301cd1 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/COPYING.MPL2 @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at https://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/COPYING.README b/thirdparty/DROID-SLAM/thirdparty/eigen/COPYING.README new file mode 100644 index 0000000000000000000000000000000000000000..11af93ca790a55f1a2acdc71bb36acd80874d378 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/COPYING.README @@ -0,0 +1,6 @@ +Eigen is primarily MPL2 licensed. See COPYING.MPL2 and these links: + http://www.mozilla.org/MPL/2.0/ + http://www.mozilla.org/MPL/2.0/FAQ.html + +Some files contain third-party code under BSD or other MPL2-compatible licenses, +whence the other COPYING.* files here. \ No newline at end of file diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/CTestConfig.cmake b/thirdparty/DROID-SLAM/thirdparty/eigen/CTestConfig.cmake new file mode 100644 index 0000000000000000000000000000000000000000..0ea24b8e3447872d8bbb08e574678af7ef4f937f --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/CTestConfig.cmake @@ -0,0 +1,17 @@ +## This file should be placed in the root directory of your project. +## Then modify the CMakeLists.txt file in the root directory of your +## project to incorporate the testing dashboard. +## # The following are required to uses Dart and the Cdash dashboard +## enable_testing() +## include(CTest) +set(CTEST_PROJECT_NAME "Eigen") +set(CTEST_NIGHTLY_START_TIME "00:00:00 UTC") + +set(CTEST_DROP_METHOD "http") +set(CTEST_DROP_SITE "my.cdash.org") +set(CTEST_DROP_LOCATION "/submit.php?project=Eigen") +set(CTEST_DROP_SITE_CDASH TRUE) +#set(CTEST_PROJECT_SUBPROJECTS +#Official +#Unsupported +#) diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/CTestCustom.cmake.in b/thirdparty/DROID-SLAM/thirdparty/eigen/CTestCustom.cmake.in new file mode 100644 index 0000000000000000000000000000000000000000..89e487f05a0d19e336302e09747f6b94df43db03 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/CTestCustom.cmake.in @@ -0,0 +1,4 @@ + +set(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_WARNINGS "2000") +set(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_ERRORS "2000") +list(APPEND CTEST_CUSTOM_ERROR_EXCEPTION @EIGEN_CTEST_ERROR_EXCEPTION@) diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/AccelerateSupport b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/AccelerateSupport new file mode 100644 index 0000000000000000000000000000000000000000..533be688ed264cccc1041da0077ecd3768863169 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/AccelerateSupport @@ -0,0 +1,52 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ACCELERATESUPPORT_MODULE_H +#define EIGEN_ACCELERATESUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \ingroup Support_modules + * \defgroup AccelerateSupport_Module AccelerateSupport module + * + * This module provides an interface to the Apple Accelerate library. + * It provides the seven following main factorization classes: + * - class AccelerateLLT: a Cholesky (LL^T) factorization. + * - class AccelerateLDLT: the default LDL^T factorization. + * - class AccelerateLDLTUnpivoted: a Cholesky-like LDL^T factorization with only 1x1 pivots and no pivoting + * - class AccelerateLDLTSBK: an LDL^T factorization with Supernode Bunch-Kaufman and static pivoting + * - class AccelerateLDLTTPP: an LDL^T factorization with full threshold partial pivoting + * - class AccelerateQR: a QR factorization + * - class AccelerateCholeskyAtA: a QR factorization without storing Q (equivalent to A^TA = R^T R) + * + * \code + * #include + * \endcode + * + * In order to use this module, the Accelerate headers must be accessible from + * the include paths, and your binary must be linked to the Accelerate framework. + * The Accelerate library is only available on Apple hardware. + * + * Note that many of the algorithms can be influenced by the UpLo template + * argument. All matrices are assumed to be symmetric. For example, the following + * creates an LDLT factorization where your matrix is symmetric (implicit) and + * uses the lower triangle: + * + * \code + * AccelerateLDLT, Lower> ldlt; + * \endcode + */ + +// IWYU pragma: begin_exports +#include "src/AccelerateSupport/AccelerateSupport.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_ACCELERATESUPPORT_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Cholesky b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Cholesky new file mode 100644 index 0000000000000000000000000000000000000000..b05ed8278c67883007d2957d08ea5222ffd55170 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Cholesky @@ -0,0 +1,43 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CHOLESKY_MODULE_H +#define EIGEN_CHOLESKY_MODULE_H + +#include "Core" +#include "Jacobi" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup Cholesky_Module Cholesky module + * + * + * + * This module provides two variants of the Cholesky decomposition for selfadjoint (hermitian) matrices. + * Those decompositions are also accessible via the following methods: + * - MatrixBase::llt() + * - MatrixBase::ldlt() + * - SelfAdjointView::llt() + * - SelfAdjointView::ldlt() + * + * \code + * #include + * \endcode + */ + +// IWYU pragma: begin_exports +#include "src/Cholesky/LLT.h" +#include "src/Cholesky/LDLT.h" +#ifdef EIGEN_USE_LAPACKE +#include "src/misc/lapacke_helpers.h" +#include "src/Cholesky/LLT_LAPACKE.h" +#endif +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_CHOLESKY_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/CholmodSupport b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/CholmodSupport new file mode 100644 index 0000000000000000000000000000000000000000..adc5f8d63e755b4f21531fecc6d471f8596fffb5 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/CholmodSupport @@ -0,0 +1,48 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CHOLMODSUPPORT_MODULE_H +#define EIGEN_CHOLMODSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include + +/** \ingroup Support_modules + * \defgroup CholmodSupport_Module CholmodSupport module + * + * This module provides an interface to the Cholmod library which is part of the suitesparse package. It provides the two following main factorization classes: + * - class CholmodSupernodalLLT: a supernodal LLT Cholesky factorization. + * - class CholmodDecomposition: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of + * the underlying factorization method (supernodal or simplicial). + * + * For the sake of completeness, this module also propose the two following classes: + * - class CholmodSimplicialLLT + * - class CholmodSimplicialLDLT + * Note that these classes does not bring any particular advantage compared to the built-in + * SimplicialLLT and SimplicialLDLT factorization classes. + * + * \code + * #include + * \endcode + * + * In order to use this module, the cholmod headers must be accessible from the include paths, and your binary must be + * linked to the cholmod library and its dependencies. The dependencies depend on how cholmod has been compiled. For a + * cmake based project, you can use our FindCholmod.cmake module to help you in this task. + * + */ + +// IWYU pragma: begin_exports +#include "src/CholmodSupport/CholmodSupport.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_CHOLMODSUPPORT_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Core b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Core new file mode 100644 index 0000000000000000000000000000000000000000..f87f3d2f63026789a72da9b49133901857171959 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Core @@ -0,0 +1,419 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2007-2011 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CORE_MODULE_H +#define EIGEN_CORE_MODULE_H + +// first thing Eigen does: stop the compiler from reporting useless warnings. +#include "src/Core/util/DisableStupidWarnings.h" + +// then include this file where all our macros are defined. It's really important to do it first because +// it's where we do all the compiler/OS/arch detections and define most defaults. +#include "src/Core/util/Macros.h" + +// This detects SSE/AVX/NEON/etc. and configure alignment settings +#include "src/Core/util/ConfigureVectorization.h" + +// We need cuda_runtime.h/hip_runtime.h to ensure that +// the EIGEN_USING_STD macro works properly on the device side +#if defined(EIGEN_CUDACC) +#include +#elif defined(EIGEN_HIPCC) +#include +#endif + +#ifdef EIGEN_EXCEPTIONS +#include +#endif + +// Disable the ipa-cp-clone optimization flag with MinGW 6.x or older (enabled by default with -O3) +// See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=556 for details. +#if EIGEN_COMP_MINGW && EIGEN_GNUC_STRICT_LESS_THAN(6, 0, 0) +#pragma GCC optimize("-fno-ipa-cp-clone") +#endif + +// Prevent ICC from specializing std::complex operators that silently fail +// on device. This allows us to use our own device-compatible specializations +// instead. +#if EIGEN_COMP_ICC && defined(EIGEN_GPU_COMPILE_PHASE) && !defined(_OVERRIDE_COMPLEX_SPECIALIZATION_) +#define _OVERRIDE_COMPLEX_SPECIALIZATION_ 1 +#endif +#include + +// this include file manages BLAS and MKL related macros +// and inclusion of their respective header files +#include "src/Core/util/MKL_support.h" + +#if defined(EIGEN_HAS_CUDA_FP16) || defined(EIGEN_HAS_HIP_FP16) +#define EIGEN_HAS_GPU_FP16 +#endif + +#if defined(EIGEN_HAS_CUDA_BF16) || defined(EIGEN_HAS_HIP_BF16) +#define EIGEN_HAS_GPU_BF16 +#endif + +#if (defined _OPENMP) && (!defined EIGEN_DONT_PARALLELIZE) +#define EIGEN_HAS_OPENMP +#endif + +#ifdef EIGEN_HAS_OPENMP +#include +#include +#endif + +// MSVC for windows mobile does not have the errno.h file +#if !(EIGEN_COMP_MSVC && EIGEN_OS_WINCE) && !EIGEN_COMP_ARM +#define EIGEN_HAS_ERRNO +#endif + +#ifdef EIGEN_HAS_ERRNO +#include +#endif +#include +#include +#include +#include +#ifndef EIGEN_NO_IO +#include +#include +#endif +#include +#include +#include +#include // for CHAR_BIT +// for min/max: +#include + +#include +#include + +// for std::is_nothrow_move_assignable +#include + +// for std::this_thread::yield(). +#if !defined(EIGEN_USE_BLAS) && (defined(EIGEN_HAS_OPENMP) || defined(EIGEN_GEMM_THREADPOOL)) +#include +#endif + +// for outputting debug info +#ifdef EIGEN_DEBUG_ASSIGN +#include +#endif + +// required for __cpuid, needs to be included after cmath +// also required for _BitScanReverse on Windows on ARM +#if EIGEN_COMP_MSVC && (EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM64) && !EIGEN_OS_WINCE +#include +#endif + +#if defined(EIGEN_USE_SYCL) +#undef min +#undef max +#undef isnan +#undef isinf +#undef isfinite +#include +#include +#include +#include +#include +#ifndef EIGEN_SYCL_LOCAL_THREAD_DIM0 +#define EIGEN_SYCL_LOCAL_THREAD_DIM0 16 +#endif +#ifndef EIGEN_SYCL_LOCAL_THREAD_DIM1 +#define EIGEN_SYCL_LOCAL_THREAD_DIM1 16 +#endif +#endif + +#if defined EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS || defined EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API || \ + defined EIGEN2_SUPPORT_STAGE20_RESOLVE_API_CONFLICTS || defined EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API || \ + defined EIGEN2_SUPPORT +// This will generate an error message: +#error Eigen2-support is only available up to version 3.2. Please go to "http://eigen.tuxfamily.org/index.php?title=Eigen2" for further information +#endif + +namespace Eigen { + +// we use size_t frequently and we'll never remember to prepend it with std:: every time just to +// ensure QNX/QCC support +using std::size_t; +// gcc 4.6.0 wants std:: for ptrdiff_t +using std::ptrdiff_t; + +} // namespace Eigen + +/** \defgroup Core_Module Core module + * This is the main module of Eigen providing dense matrix and vector support + * (both fixed and dynamic size) with all the features corresponding to a BLAS library + * and much more... + * + * \code + * #include + * \endcode + */ + +#ifdef EIGEN_USE_LAPACKE +#ifdef EIGEN_USE_MKL +#include "mkl_lapacke.h" +#else +#include "src/misc/lapacke.h" +#endif +#endif + +// IWYU pragma: begin_exports +#include "src/Core/util/Constants.h" +#include "src/Core/util/Meta.h" +#include "src/Core/util/Assert.h" +#include "src/Core/util/ForwardDeclarations.h" +#include "src/Core/util/StaticAssert.h" +#include "src/Core/util/XprHelper.h" +#include "src/Core/util/Memory.h" +#include "src/Core/util/IntegralConstant.h" +#include "src/Core/util/Serializer.h" +#include "src/Core/util/SymbolicIndex.h" +#include "src/Core/util/EmulateArray.h" +#include "src/Core/util/MoreMeta.h" + +#include "src/Core/NumTraits.h" +#include "src/Core/MathFunctions.h" +#include "src/Core/RandomImpl.h" +#include "src/Core/GenericPacketMath.h" +#include "src/Core/MathFunctionsImpl.h" +#include "src/Core/arch/Default/ConjHelper.h" +// Generic half float support +#include "src/Core/arch/Default/Half.h" +#include "src/Core/arch/Default/BFloat16.h" +#include "src/Core/arch/Default/GenericPacketMathFunctionsFwd.h" + +#if defined EIGEN_VECTORIZE_AVX512 +#if defined EIGEN_VECTORIZE_AVX512FP16 +#include "src/Core/arch/AVX512/PacketMathFP16.h" +#endif +#include "src/Core/arch/SSE/PacketMath.h" +#include "src/Core/arch/SSE/TypeCasting.h" +#include "src/Core/arch/SSE/Complex.h" +#include "src/Core/arch/AVX/PacketMath.h" +#include "src/Core/arch/AVX/TypeCasting.h" +#include "src/Core/arch/AVX/Complex.h" +#include "src/Core/arch/AVX512/PacketMath.h" +#include "src/Core/arch/AVX512/TypeCasting.h" +#include "src/Core/arch/AVX512/Complex.h" +#include "src/Core/arch/SSE/MathFunctions.h" +#include "src/Core/arch/AVX/MathFunctions.h" +#include "src/Core/arch/AVX512/MathFunctions.h" +#include "src/Core/arch/AVX512/TrsmKernel.h" +#elif defined EIGEN_VECTORIZE_AVX + // Use AVX for floats and doubles, SSE for integers +#include "src/Core/arch/SSE/PacketMath.h" +#include "src/Core/arch/SSE/TypeCasting.h" +#include "src/Core/arch/SSE/Complex.h" +#include "src/Core/arch/AVX/PacketMath.h" +#include "src/Core/arch/AVX/TypeCasting.h" +#include "src/Core/arch/AVX/Complex.h" +#include "src/Core/arch/SSE/MathFunctions.h" +#include "src/Core/arch/AVX/MathFunctions.h" +#elif defined EIGEN_VECTORIZE_SSE +#include "src/Core/arch/SSE/PacketMath.h" +#include "src/Core/arch/SSE/TypeCasting.h" +#include "src/Core/arch/SSE/MathFunctions.h" +#include "src/Core/arch/SSE/Complex.h" +#elif defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX) +#include "src/Core/arch/AltiVec/PacketMath.h" +#include "src/Core/arch/AltiVec/TypeCasting.h" +#include "src/Core/arch/AltiVec/MathFunctions.h" +#include "src/Core/arch/AltiVec/Complex.h" +#elif defined EIGEN_VECTORIZE_NEON +#include "src/Core/arch/NEON/PacketMath.h" +#include "src/Core/arch/NEON/TypeCasting.h" +#include "src/Core/arch/NEON/MathFunctions.h" +#include "src/Core/arch/NEON/Complex.h" +#elif defined EIGEN_VECTORIZE_SVE +#include "src/Core/arch/SVE/PacketMath.h" +#include "src/Core/arch/SVE/TypeCasting.h" +#include "src/Core/arch/SVE/MathFunctions.h" +#elif defined EIGEN_VECTORIZE_ZVECTOR +#include "src/Core/arch/ZVector/PacketMath.h" +#include "src/Core/arch/ZVector/MathFunctions.h" +#include "src/Core/arch/ZVector/Complex.h" +#elif defined EIGEN_VECTORIZE_MSA +#include "src/Core/arch/MSA/PacketMath.h" +#include "src/Core/arch/MSA/MathFunctions.h" +#include "src/Core/arch/MSA/Complex.h" +#elif defined EIGEN_VECTORIZE_HVX +#include "src/Core/arch/HVX/PacketMath.h" +#endif + +#if defined EIGEN_VECTORIZE_GPU +#include "src/Core/arch/GPU/PacketMath.h" +#include "src/Core/arch/GPU/MathFunctions.h" +#include "src/Core/arch/GPU/TypeCasting.h" +#endif + +#if defined(EIGEN_USE_SYCL) +#include "src/Core/arch/SYCL/InteropHeaders.h" +#if !defined(EIGEN_DONT_VECTORIZE_SYCL) +#include "src/Core/arch/SYCL/PacketMath.h" +#include "src/Core/arch/SYCL/MathFunctions.h" +#include "src/Core/arch/SYCL/TypeCasting.h" +#endif +#endif + +#include "src/Core/arch/Default/Settings.h" +// This file provides generic implementations valid for scalar as well +#include "src/Core/arch/Default/GenericPacketMathFunctions.h" + +#include "src/Core/functors/TernaryFunctors.h" +#include "src/Core/functors/BinaryFunctors.h" +#include "src/Core/functors/UnaryFunctors.h" +#include "src/Core/functors/NullaryFunctors.h" +#include "src/Core/functors/StlFunctors.h" +#include "src/Core/functors/AssignmentFunctors.h" + +// Specialized functors for GPU. +#ifdef EIGEN_GPUCC +#include "src/Core/arch/GPU/Complex.h" +#endif + +// Specializations of vectorized activation functions for NEON. +#ifdef EIGEN_VECTORIZE_NEON +#include "src/Core/arch/NEON/UnaryFunctors.h" +#endif + +#include "src/Core/util/IndexedViewHelper.h" +#include "src/Core/util/ReshapedHelper.h" +#include "src/Core/ArithmeticSequence.h" +#ifndef EIGEN_NO_IO +#include "src/Core/IO.h" +#endif +#include "src/Core/DenseCoeffsBase.h" +#include "src/Core/DenseBase.h" +#include "src/Core/MatrixBase.h" +#include "src/Core/EigenBase.h" + +#include "src/Core/Product.h" +#include "src/Core/CoreEvaluators.h" +#include "src/Core/AssignEvaluator.h" + +#ifndef EIGEN_PARSED_BY_DOXYGEN // work around Doxygen bug triggered by Assign.h r814874 + // at least confirmed with Doxygen 1.5.5 and 1.5.6 +#include "src/Core/Assign.h" +#endif + +#include "src/Core/ArrayBase.h" +#include "src/Core/util/BlasUtil.h" +#include "src/Core/DenseStorage.h" +#include "src/Core/NestByValue.h" + +// #include "src/Core/ForceAlignedAccess.h" + +#include "src/Core/ReturnByValue.h" +#include "src/Core/NoAlias.h" +#include "src/Core/PlainObjectBase.h" +#include "src/Core/Matrix.h" +#include "src/Core/Array.h" +#include "src/Core/Fill.h" +#include "src/Core/CwiseTernaryOp.h" +#include "src/Core/CwiseBinaryOp.h" +#include "src/Core/CwiseUnaryOp.h" +#include "src/Core/CwiseNullaryOp.h" +#include "src/Core/CwiseUnaryView.h" +#include "src/Core/SelfCwiseBinaryOp.h" +#include "src/Core/InnerProduct.h" +#include "src/Core/Dot.h" +#include "src/Core/StableNorm.h" +#include "src/Core/Stride.h" +#include "src/Core/MapBase.h" +#include "src/Core/Map.h" +#include "src/Core/Ref.h" +#include "src/Core/Block.h" +#include "src/Core/VectorBlock.h" +#include "src/Core/IndexedView.h" +#include "src/Core/Reshaped.h" +#include "src/Core/Transpose.h" +#include "src/Core/DiagonalMatrix.h" +#include "src/Core/Diagonal.h" +#include "src/Core/DiagonalProduct.h" +#include "src/Core/SkewSymmetricMatrix3.h" +#include "src/Core/Redux.h" +#include "src/Core/Visitor.h" +#include "src/Core/Fuzzy.h" +#include "src/Core/Swap.h" +#include "src/Core/CommaInitializer.h" +#include "src/Core/GeneralProduct.h" +#include "src/Core/Solve.h" +#include "src/Core/Inverse.h" +#include "src/Core/SolverBase.h" +#include "src/Core/PermutationMatrix.h" +#include "src/Core/Transpositions.h" +#include "src/Core/TriangularMatrix.h" +#include "src/Core/SelfAdjointView.h" +#include "src/Core/products/GeneralBlockPanelKernel.h" +#include "src/Core/DeviceWrapper.h" +#ifdef EIGEN_GEMM_THREADPOOL +#include "ThreadPool" +#endif +#include "src/Core/products/Parallelizer.h" +#include "src/Core/ProductEvaluators.h" +#include "src/Core/products/GeneralMatrixVector.h" +#include "src/Core/products/GeneralMatrixMatrix.h" +#include "src/Core/SolveTriangular.h" +#include "src/Core/products/GeneralMatrixMatrixTriangular.h" +#include "src/Core/products/SelfadjointMatrixVector.h" +#include "src/Core/products/SelfadjointMatrixMatrix.h" +#include "src/Core/products/SelfadjointProduct.h" +#include "src/Core/products/SelfadjointRank2Update.h" +#include "src/Core/products/TriangularMatrixVector.h" +#include "src/Core/products/TriangularMatrixMatrix.h" +#include "src/Core/products/TriangularSolverMatrix.h" +#include "src/Core/products/TriangularSolverVector.h" +#include "src/Core/BandMatrix.h" +#include "src/Core/CoreIterators.h" +#include "src/Core/ConditionEstimator.h" + +#if defined(EIGEN_VECTORIZE_VSX) +#include "src/Core/arch/AltiVec/MatrixProduct.h" +#elif defined EIGEN_VECTORIZE_NEON +#include "src/Core/arch/NEON/GeneralBlockPanelKernel.h" +#endif + +#if defined(EIGEN_VECTORIZE_AVX512) +#include "src/Core/arch/AVX512/GemmKernel.h" +#endif + +#include "src/Core/Select.h" +#include "src/Core/VectorwiseOp.h" +#include "src/Core/PartialReduxEvaluator.h" +#include "src/Core/Random.h" +#include "src/Core/Replicate.h" +#include "src/Core/Reverse.h" +#include "src/Core/ArrayWrapper.h" +#include "src/Core/StlIterators.h" + +#ifdef EIGEN_USE_BLAS +#include "src/Core/products/GeneralMatrixMatrix_BLAS.h" +#include "src/Core/products/GeneralMatrixVector_BLAS.h" +#include "src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h" +#include "src/Core/products/SelfadjointMatrixMatrix_BLAS.h" +#include "src/Core/products/SelfadjointMatrixVector_BLAS.h" +#include "src/Core/products/TriangularMatrixMatrix_BLAS.h" +#include "src/Core/products/TriangularMatrixVector_BLAS.h" +#include "src/Core/products/TriangularSolverMatrix_BLAS.h" +#endif // EIGEN_USE_BLAS + +#ifdef EIGEN_USE_MKL_VML +#include "src/Core/Assign_MKL.h" +#endif + +#include "src/Core/GlobalFunctions.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_CORE_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Dense b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Dense new file mode 100644 index 0000000000000000000000000000000000000000..5768910bd88c43f0761f2f345c6f0e3b46a4d8ec --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Dense @@ -0,0 +1,7 @@ +#include "Core" +#include "LU" +#include "Cholesky" +#include "QR" +#include "SVD" +#include "Geometry" +#include "Eigenvalues" diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Eigen b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Eigen new file mode 100644 index 0000000000000000000000000000000000000000..654c8dc6380f7bb21d3ba1a9ce916006043552aa --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Eigen @@ -0,0 +1,2 @@ +#include "Dense" +#include "Sparse" diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Eigenvalues b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Eigenvalues new file mode 100644 index 0000000000000000000000000000000000000000..3b0bdee171517756b97eb773f89b13656c49e5f8 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Eigenvalues @@ -0,0 +1,63 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_EIGENVALUES_MODULE_H +#define EIGEN_EIGENVALUES_MODULE_H + +#include "Core" + +#include "Cholesky" +#include "Jacobi" +#include "Householder" +#include "LU" +#include "Geometry" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup Eigenvalues_Module Eigenvalues module + * + * + * + * This module mainly provides various eigenvalue solvers. + * This module also provides some MatrixBase methods, including: + * - MatrixBase::eigenvalues(), + * - MatrixBase::operatorNorm() + * + * \code + * #include + * \endcode + */ + +#include "src/misc/RealSvd2x2.h" + +// IWYU pragma: begin_exports +#include "src/Eigenvalues/Tridiagonalization.h" +#include "src/Eigenvalues/RealSchur.h" +#include "src/Eigenvalues/EigenSolver.h" +#include "src/Eigenvalues/SelfAdjointEigenSolver.h" +#include "src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h" +#include "src/Eigenvalues/HessenbergDecomposition.h" +#include "src/Eigenvalues/ComplexSchur.h" +#include "src/Eigenvalues/ComplexEigenSolver.h" +#include "src/Eigenvalues/RealQZ.h" +#include "src/Eigenvalues/GeneralizedEigenSolver.h" +#include "src/Eigenvalues/MatrixBaseEigenvalues.h" +#ifdef EIGEN_USE_LAPACKE +#ifdef EIGEN_USE_MKL +#include "mkl_lapacke.h" +#else +#include "src/misc/lapacke.h" +#endif +#include "src/Eigenvalues/RealSchur_LAPACKE.h" +#include "src/Eigenvalues/ComplexSchur_LAPACKE.h" +#include "src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h" +#endif +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_EIGENVALUES_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Geometry b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Geometry new file mode 100644 index 0000000000000000000000000000000000000000..019c98b6ef0b1d265c42ef7a1d884f078ffc3c95 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Geometry @@ -0,0 +1,61 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GEOMETRY_MODULE_H +#define EIGEN_GEOMETRY_MODULE_H + +#include "Core" + +#include "SVD" +#include "LU" +#include + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup Geometry_Module Geometry module + * + * This module provides support for: + * - fixed-size homogeneous transformations + * - translation, scaling, 2D and 3D rotations + * - \link Quaternion quaternions \endlink + * - cross products (\ref MatrixBase::cross, \ref MatrixBase::cross3) + * - orthogonal vector generation (\ref MatrixBase::unitOrthogonal) + * - some linear components: \link ParametrizedLine parametrized-lines \endlink and \link Hyperplane hyperplanes + * \endlink + * - \link AlignedBox axis aligned bounding boxes \endlink + * - \link umeyama least-square transformation fitting \endlink + * + * \code + * #include + * \endcode + */ + +// IWYU pragma: begin_exports +#include "src/Geometry/OrthoMethods.h" +#include "src/Geometry/EulerAngles.h" +#include "src/Geometry/Homogeneous.h" +#include "src/Geometry/RotationBase.h" +#include "src/Geometry/Rotation2D.h" +#include "src/Geometry/Quaternion.h" +#include "src/Geometry/AngleAxis.h" +#include "src/Geometry/Transform.h" +#include "src/Geometry/Translation.h" +#include "src/Geometry/Scaling.h" +#include "src/Geometry/Hyperplane.h" +#include "src/Geometry/ParametrizedLine.h" +#include "src/Geometry/AlignedBox.h" +#include "src/Geometry/Umeyama.h" + +// Use the SSE optimized version whenever possible. +#if (defined EIGEN_VECTORIZE_SSE) || (defined EIGEN_VECTORIZE_NEON) +#include "src/Geometry/arch/Geometry_SIMD.h" +#endif +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_GEOMETRY_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Householder b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Householder new file mode 100644 index 0000000000000000000000000000000000000000..5070e070e677727c50acb2b3843051820105e8cb --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Householder @@ -0,0 +1,31 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_HOUSEHOLDER_MODULE_H +#define EIGEN_HOUSEHOLDER_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup Householder_Module Householder module + * This module provides Householder transformations. + * + * \code + * #include + * \endcode + */ + +// IWYU pragma: begin_exports +#include "src/Householder/Householder.h" +#include "src/Householder/HouseholderSequence.h" +#include "src/Householder/BlockHouseholder.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_HOUSEHOLDER_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/IterativeLinearSolvers b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/IterativeLinearSolvers new file mode 100644 index 0000000000000000000000000000000000000000..fe5159e9f881eccf30f5d1092897912879dd209a --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/IterativeLinearSolvers @@ -0,0 +1,52 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ITERATIVELINEARSOLVERS_MODULE_H +#define EIGEN_ITERATIVELINEARSOLVERS_MODULE_H + +#include "SparseCore" +#include "OrderingMethods" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** + * \defgroup IterativeLinearSolvers_Module IterativeLinearSolvers module + * + * This module currently provides iterative methods to solve problems of the form \c A \c x = \c b, where \c A is a + squared matrix, usually very large and sparse. + * Those solvers are accessible via the following classes: + * - ConjugateGradient for selfadjoint (hermitian) matrices, + * - LeastSquaresConjugateGradient for rectangular least-square problems, + * - BiCGSTAB for general square matrices. + * + * These iterative solvers are associated with some preconditioners: + * - IdentityPreconditioner - not really useful + * - DiagonalPreconditioner - also called Jacobi preconditioner, work very well on diagonal dominant matrices. + * - IncompleteLUT - incomplete LU factorization with dual thresholding + * + * Such problems can also be solved using the direct sparse decomposition modules: SparseCholesky, CholmodSupport, + UmfPackSupport, SuperLUSupport, AccelerateSupport. + * + \code + #include + \endcode + */ + +// IWYU pragma: begin_exports +#include "src/IterativeLinearSolvers/SolveWithGuess.h" +#include "src/IterativeLinearSolvers/IterativeSolverBase.h" +#include "src/IterativeLinearSolvers/BasicPreconditioners.h" +#include "src/IterativeLinearSolvers/ConjugateGradient.h" +#include "src/IterativeLinearSolvers/LeastSquareConjugateGradient.h" +#include "src/IterativeLinearSolvers/BiCGSTAB.h" +#include "src/IterativeLinearSolvers/IncompleteLUT.h" +#include "src/IterativeLinearSolvers/IncompleteCholesky.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_ITERATIVELINEARSOLVERS_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Jacobi b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Jacobi new file mode 100644 index 0000000000000000000000000000000000000000..31eb36a79f696008287593df4e4ab8876d9536da --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Jacobi @@ -0,0 +1,33 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_JACOBI_MODULE_H +#define EIGEN_JACOBI_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup Jacobi_Module Jacobi module + * This module provides Jacobi and Givens rotations. + * + * \code + * #include + * \endcode + * + * In addition to listed classes, it defines the two following MatrixBase methods to apply a Jacobi or Givens rotation: + * - MatrixBase::applyOnTheLeft() + * - MatrixBase::applyOnTheRight(). + */ + +// IWYU pragma: begin_exports +#include "src/Jacobi/Jacobi.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_JACOBI_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/KLUSupport b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/KLUSupport new file mode 100644 index 0000000000000000000000000000000000000000..13959a3c863692af4d129d353d10fd35a39a1e1c --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/KLUSupport @@ -0,0 +1,43 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_KLUSUPPORT_MODULE_H +#define EIGEN_KLUSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +extern "C" { +#include +#include +} + +/** \ingroup Support_modules + * \defgroup KLUSupport_Module KLUSupport module + * + * This module provides an interface to the KLU library which is part of the suitesparse package. It provides the following factorization class: + * - class KLU: a sparse LU factorization, well-suited for circuit simulation. + * + * \code + * #include + * \endcode + * + * In order to use this module, the klu and btf headers must be accessible from the include paths, and your binary must + * be linked to the klu library and its dependencies. The dependencies depend on how umfpack has been compiled. For a + * cmake based project, you can use our FindKLU.cmake module to help you in this task. + * + */ + +// IWYU pragma: begin_exports +#include "src/KLUSupport/KLUSupport.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_KLUSUPPORT_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/LU b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/LU new file mode 100644 index 0000000000000000000000000000000000000000..d80448039ef3cf393fa02b6b008b54eb787af968 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/LU @@ -0,0 +1,46 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_LU_MODULE_H +#define EIGEN_LU_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup LU_Module LU module + * This module includes %LU decomposition and related notions such as matrix inversion and determinant. + * This module defines the following MatrixBase methods: + * - MatrixBase::inverse() + * - MatrixBase::determinant() + * + * \code + * #include + * \endcode + */ + +#include "src/misc/Kernel.h" +#include "src/misc/Image.h" + +// IWYU pragma: begin_exports +#include "src/LU/FullPivLU.h" +#include "src/LU/PartialPivLU.h" +#ifdef EIGEN_USE_LAPACKE +#include "src/misc/lapacke_helpers.h" +#include "src/LU/PartialPivLU_LAPACKE.h" +#endif +#include "src/LU/Determinant.h" +#include "src/LU/InverseImpl.h" + +#if defined EIGEN_VECTORIZE_SSE || defined EIGEN_VECTORIZE_NEON +#include "src/LU/arch/InverseSize4.h" +#endif +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_LU_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/MetisSupport b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/MetisSupport new file mode 100644 index 0000000000000000000000000000000000000000..3636d3a06b94518d2a18ea8866274c163896bcdd --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/MetisSupport @@ -0,0 +1,35 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_METISSUPPORT_MODULE_H +#define EIGEN_METISSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +extern "C" { +#include +} + +/** \ingroup Support_modules + * \defgroup MetisSupport_Module MetisSupport module + * + * \code + * #include + * \endcode + * This module defines an interface to the METIS reordering package (http://glaros.dtc.umn.edu/gkhome/views/metis). + * It can be used just as any other built-in method as explained in \link OrderingMethods_Module here. \endlink + */ + +// IWYU pragma: begin_exports +#include "src/MetisSupport/MetisSupport.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_METISSUPPORT_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/OrderingMethods b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/OrderingMethods new file mode 100644 index 0000000000000000000000000000000000000000..016741945f0e90065795029b761b2a25b0997be5 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/OrderingMethods @@ -0,0 +1,73 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ORDERINGMETHODS_MODULE_H +#define EIGEN_ORDERINGMETHODS_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** + * \defgroup OrderingMethods_Module OrderingMethods module + * + * This module is currently for internal use only + * + * It defines various built-in and external ordering methods for sparse matrices. + * They are typically used to reduce the number of elements during + * the sparse matrix decomposition (LLT, LU, QR). + * Precisely, in a preprocessing step, a permutation matrix P is computed using + * those ordering methods and applied to the columns of the matrix. + * Using for instance the sparse Cholesky decomposition, it is expected that + * the nonzeros elements in LLT(A*P) will be much smaller than that in LLT(A). + * + * + * Usage : + * \code + * #include + * \endcode + * + * A simple usage is as a template parameter in the sparse decomposition classes : + * + * \code + * SparseLU > solver; + * \endcode + * + * \code + * SparseQR > solver; + * \endcode + * + * It is possible as well to call directly a particular ordering method for your own purpose, + * \code + * AMDOrdering ordering; + * PermutationMatrix perm; + * SparseMatrix A; + * //Fill the matrix ... + * + * ordering(A, perm); // Call AMD + * \endcode + * + * \note Some of these methods (like AMD or METIS), need the sparsity pattern + * of the input matrix to be symmetric. When the matrix is structurally unsymmetric, + * Eigen computes internally the pattern of \f$A^T*A\f$ before calling the method. + * If your matrix is already symmetric (at least in structure), you can avoid that + * by calling the method with a SelfAdjointView type. + * + * \code + * // Call the ordering on the pattern of the lower triangular matrix A + * ordering(A.selfadjointView(), perm); + * \endcode + */ + +// IWYU pragma: begin_exports +#include "src/OrderingMethods/Amd.h" +#include "src/OrderingMethods/Ordering.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_ORDERINGMETHODS_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/PaStiXSupport b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/PaStiXSupport new file mode 100644 index 0000000000000000000000000000000000000000..dd1cfcb12de9c62126202f3d3d7c3faf0858c69b --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/PaStiXSupport @@ -0,0 +1,51 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PASTIXSUPPORT_MODULE_H +#define EIGEN_PASTIXSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +extern "C" { +#include +#include +} + +#ifdef complex +#undef complex +#endif + +/** \ingroup Support_modules + * \defgroup PaStiXSupport_Module PaStiXSupport module + * + * This module provides an interface to the PaSTiX library. + * PaSTiX is a general \b supernodal, \b parallel and \b opensource sparse solver. + * It provides the two following main factorization classes: + * - class PastixLLT : a supernodal, parallel LLt Cholesky factorization. + * - class PastixLDLT: a supernodal, parallel LDLt Cholesky factorization. + * - class PastixLU : a supernodal, parallel LU factorization (optimized for a symmetric pattern). + * + * \code + * #include + * \endcode + * + * In order to use this module, the PaSTiX headers must be accessible from the include paths, and your binary must be + * linked to the PaSTiX library and its dependencies. This wrapper resuires PaStiX version 5.x compiled without MPI + * support. The dependencies depend on how PaSTiX has been compiled. For a cmake based project, you can use our + * FindPaSTiX.cmake module to help you in this task. + * + */ + +// IWYU pragma: begin_exports +#include "src/PaStiXSupport/PaStiXSupport.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_PASTIXSUPPORT_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/PardisoSupport b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/PardisoSupport new file mode 100644 index 0000000000000000000000000000000000000000..4aef5fb346c3d371b4d67e35f3c928ef9c97ceea --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/PardisoSupport @@ -0,0 +1,38 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PARDISOSUPPORT_MODULE_H +#define EIGEN_PARDISOSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include + +/** \ingroup Support_modules + * \defgroup PardisoSupport_Module PardisoSupport module + * + * This module brings support for the Intel(R) MKL PARDISO direct sparse solvers. + * + * \code + * #include + * \endcode + * + * In order to use this module, the MKL headers must be accessible from the include paths, and your binary must be + * linked to the MKL library and its dependencies. See this \ref TopicUsingIntelMKL "page" for more information on + * MKL-Eigen integration. + * + */ + +// IWYU pragma: begin_exports +#include "src/PardisoSupport/PardisoSupport.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_PARDISOSUPPORT_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/QR b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/QR new file mode 100644 index 0000000000000000000000000000000000000000..c38b453b076b3e2538f847fa9258c0d30a26363a --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/QR @@ -0,0 +1,48 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_QR_MODULE_H +#define EIGEN_QR_MODULE_H + +#include "Core" + +#include "Cholesky" +#include "Jacobi" +#include "Householder" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup QR_Module QR module + * + * + * + * This module provides various QR decompositions + * This module also provides some MatrixBase methods, including: + * - MatrixBase::householderQr() + * - MatrixBase::colPivHouseholderQr() + * - MatrixBase::fullPivHouseholderQr() + * + * \code + * #include + * \endcode + */ + +// IWYU pragma: begin_exports +#include "src/QR/HouseholderQR.h" +#include "src/QR/FullPivHouseholderQR.h" +#include "src/QR/ColPivHouseholderQR.h" +#include "src/QR/CompleteOrthogonalDecomposition.h" +#ifdef EIGEN_USE_LAPACKE +#include "src/misc/lapacke_helpers.h" +#include "src/QR/HouseholderQR_LAPACKE.h" +#include "src/QR/ColPivHouseholderQR_LAPACKE.h" +#endif +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_QR_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/QtAlignedMalloc b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/QtAlignedMalloc new file mode 100644 index 0000000000000000000000000000000000000000..585f8e81ceb88161ebbd73b1989a6516b4f585af --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/QtAlignedMalloc @@ -0,0 +1,32 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_QTMALLOC_MODULE_H +#define EIGEN_QTMALLOC_MODULE_H + +#include "Core" + +#if (!EIGEN_MALLOC_ALREADY_ALIGNED) + +#include "src/Core/util/DisableStupidWarnings.h" + +void *qMalloc(std::size_t size) { return Eigen::internal::aligned_malloc(size); } + +void qFree(void *ptr) { Eigen::internal::aligned_free(ptr); } + +void *qRealloc(void *ptr, std::size_t size) { + void *newPtr = Eigen::internal::aligned_malloc(size); + std::memcpy(newPtr, ptr, size); + Eigen::internal::aligned_free(ptr); + return newPtr; +} + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif + +#endif // EIGEN_QTMALLOC_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SPQRSupport b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SPQRSupport new file mode 100644 index 0000000000000000000000000000000000000000..c01dbe0093fa816bf30fa156f46a90b2e590449f --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SPQRSupport @@ -0,0 +1,41 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPQRSUPPORT_MODULE_H +#define EIGEN_SPQRSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include "SuiteSparseQR.hpp" + +/** \ingroup Support_modules + * \defgroup SPQRSupport_Module SuiteSparseQR module + * + * This module provides an interface to the SPQR library, which is part of the suitesparse package. + * + * \code + * #include + * \endcode + * + * In order to use this module, the SPQR headers must be accessible from the include paths, and your binary must be + * linked to the SPQR library and its dependencies (Cholmod, AMD, COLAMD,...). For a cmake based project, you can use + * our FindSPQR.cmake and FindCholmod.Cmake modules + * + */ + +#include "CholmodSupport" + +// IWYU pragma: begin_exports +#include "src/SPQRSupport/SuiteSparseQRSupport.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SVD b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SVD new file mode 100644 index 0000000000000000000000000000000000000000..2a013f825d77e1bbd51349861b8d0e0d9385dbdc --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SVD @@ -0,0 +1,56 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SVD_MODULE_H +#define EIGEN_SVD_MODULE_H + +#include "QR" +#include "Householder" +#include "Jacobi" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup SVD_Module SVD module + * + * + * + * This module provides SVD decomposition for matrices (both real and complex). + * Two decomposition algorithms are provided: + * - JacobiSVD implementing two-sided Jacobi iterations is numerically very accurate, fast for small matrices, but very + * slow for larger ones. + * - BDCSVD implementing a recursive divide & conquer strategy on top of an upper-bidiagonalization which remains fast + * for large problems. These decompositions are accessible via the respective classes and following MatrixBase methods: + * - MatrixBase::jacobiSvd() + * - MatrixBase::bdcSvd() + * + * \code + * #include + * \endcode + */ + +// IWYU pragma: begin_exports +#include "src/misc/RealSvd2x2.h" +#include "src/SVD/UpperBidiagonalization.h" +#include "src/SVD/SVDBase.h" +#include "src/SVD/JacobiSVD.h" +#include "src/SVD/BDCSVD.h" +#ifdef EIGEN_USE_LAPACKE +#ifdef EIGEN_USE_MKL +#include "mkl_lapacke.h" +#else +#include "src/misc/lapacke.h" +#endif +#ifndef EIGEN_USE_LAPACKE_STRICT +#include "src/SVD/JacobiSVD_LAPACKE.h" +#endif +#include "src/SVD/BDCSVD_LAPACKE.h" +#endif +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SVD_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Sparse b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Sparse new file mode 100644 index 0000000000000000000000000000000000000000..4d0ee8bc4486fc805354570af7417928d85c6973 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/Sparse @@ -0,0 +1,33 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_MODULE_H +#define EIGEN_SPARSE_MODULE_H + +/** \defgroup Sparse_Module Sparse meta-module + * + * Meta-module including all related modules: + * - \ref SparseCore_Module + * - \ref OrderingMethods_Module + * - \ref SparseCholesky_Module + * - \ref SparseLU_Module + * - \ref SparseQR_Module + * - \ref IterativeLinearSolvers_Module + * + \code + #include + \endcode + */ + +#include "SparseCore" +#include "OrderingMethods" +#include "SparseCholesky" +#include "SparseLU" +#include "SparseQR" +#include "IterativeLinearSolvers" + +#endif // EIGEN_SPARSE_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SparseCholesky b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SparseCholesky new file mode 100644 index 0000000000000000000000000000000000000000..6abdcd6601cc85d15fd20419bd28af2391907d0b --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SparseCholesky @@ -0,0 +1,40 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2013 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSECHOLESKY_MODULE_H +#define EIGEN_SPARSECHOLESKY_MODULE_H + +#include "SparseCore" +#include "OrderingMethods" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** + * \defgroup SparseCholesky_Module SparseCholesky module + * + * This module currently provides two variants of the direct sparse Cholesky decomposition for selfadjoint (hermitian) + * matrices. Those decompositions are accessible via the following classes: + * - SimplicialLLt, + * - SimplicialLDLt + * + * Such problems can also be solved using the ConjugateGradient solver from the IterativeLinearSolvers module. + * + * \code + * #include + * \endcode + */ + +// IWYU pragma: begin_exports +#include "src/SparseCholesky/SimplicialCholesky.h" +#include "src/SparseCholesky/SimplicialCholesky_impl.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SPARSECHOLESKY_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SparseCore b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SparseCore new file mode 100644 index 0000000000000000000000000000000000000000..56a9401af34a61f2bdfb2d646ddc71d9f2aa1b7d --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SparseCore @@ -0,0 +1,70 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSECORE_MODULE_H +#define EIGEN_SPARSECORE_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include +#include +#include +#include +#include +#include + +/** + * \defgroup SparseCore_Module SparseCore module + * + * This module provides a sparse matrix representation, and basic associated matrix manipulations + * and operations. + * + * See the \ref TutorialSparse "Sparse tutorial" + * + * \code + * #include + * \endcode + * + * This module depends on: Core. + */ + +// IWYU pragma: begin_exports +#include "src/SparseCore/SparseUtil.h" +#include "src/SparseCore/SparseMatrixBase.h" +#include "src/SparseCore/SparseAssign.h" +#include "src/SparseCore/CompressedStorage.h" +#include "src/SparseCore/AmbiVector.h" +#include "src/SparseCore/SparseCompressedBase.h" +#include "src/SparseCore/SparseMatrix.h" +#include "src/SparseCore/SparseMap.h" +#include "src/SparseCore/SparseVector.h" +#include "src/SparseCore/SparseRef.h" +#include "src/SparseCore/SparseCwiseUnaryOp.h" +#include "src/SparseCore/SparseCwiseBinaryOp.h" +#include "src/SparseCore/SparseTranspose.h" +#include "src/SparseCore/SparseBlock.h" +#include "src/SparseCore/SparseDot.h" +#include "src/SparseCore/SparseRedux.h" +#include "src/SparseCore/SparseView.h" +#include "src/SparseCore/SparseDiagonalProduct.h" +#include "src/SparseCore/ConservativeSparseSparseProduct.h" +#include "src/SparseCore/SparseSparseProductWithPruning.h" +#include "src/SparseCore/SparseProduct.h" +#include "src/SparseCore/SparseDenseProduct.h" +#include "src/SparseCore/SparseSelfAdjointView.h" +#include "src/SparseCore/SparseTriangularView.h" +#include "src/SparseCore/TriangularSolver.h" +#include "src/SparseCore/SparsePermutation.h" +#include "src/SparseCore/SparseFuzzy.h" +#include "src/SparseCore/SparseSolverBase.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SPARSECORE_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SparseLU b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SparseLU new file mode 100644 index 0000000000000000000000000000000000000000..6faf13069439eb4b744354b497f77b029bdcc971 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SparseLU @@ -0,0 +1,50 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// Copyright (C) 2012 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSELU_MODULE_H +#define EIGEN_SPARSELU_MODULE_H + +#include "SparseCore" + +/** + * \defgroup SparseLU_Module SparseLU module + * This module defines a supernodal factorization of general sparse matrices. + * The code is fully optimized for supernode-panel updates with specialized kernels. + * Please, see the documentation of the SparseLU class for more details. + */ + +// Ordering interface +#include "OrderingMethods" + +#include "src/Core/util/DisableStupidWarnings.h" + +// IWYU pragma: begin_exports +#include "src/SparseLU/SparseLU_Structs.h" +#include "src/SparseLU/SparseLU_SupernodalMatrix.h" +#include "src/SparseLU/SparseLUImpl.h" +#include "src/SparseCore/SparseColEtree.h" +#include "src/SparseLU/SparseLU_Memory.h" +#include "src/SparseLU/SparseLU_heap_relax_snode.h" +#include "src/SparseLU/SparseLU_relax_snode.h" +#include "src/SparseLU/SparseLU_pivotL.h" +#include "src/SparseLU/SparseLU_panel_dfs.h" +#include "src/SparseLU/SparseLU_kernel_bmod.h" +#include "src/SparseLU/SparseLU_panel_bmod.h" +#include "src/SparseLU/SparseLU_column_dfs.h" +#include "src/SparseLU/SparseLU_column_bmod.h" +#include "src/SparseLU/SparseLU_copy_to_ucol.h" +#include "src/SparseLU/SparseLU_pruneL.h" +#include "src/SparseLU/SparseLU_Utils.h" +#include "src/SparseLU/SparseLU.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SPARSELU_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SparseQR b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SparseQR new file mode 100644 index 0000000000000000000000000000000000000000..b4f1cad6bbb1826977e4c0debc669e650bb435ce --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SparseQR @@ -0,0 +1,38 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSEQR_MODULE_H +#define EIGEN_SPARSEQR_MODULE_H + +#include "SparseCore" +#include "OrderingMethods" +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup SparseQR_Module SparseQR module + * \brief Provides QR decomposition for sparse matrices + * + * This module provides a simplicial version of the left-looking Sparse QR decomposition. + * The columns of the input matrix should be reordered to limit the fill-in during the + * decomposition. Built-in methods (COLAMD, AMD) or external methods (METIS) can be used to this end. + * See the \link OrderingMethods_Module OrderingMethods\endlink module for the list + * of built-in and external ordering methods. + * + * \code + * #include + * \endcode + * + * + */ + +// IWYU pragma: begin_exports +#include "src/SparseCore/SparseColEtree.h" +#include "src/SparseQR/SparseQR.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/StdDeque b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/StdDeque new file mode 100644 index 0000000000000000000000000000000000000000..01e1d76f043b4cbb1e4776f8b5c11ea68894e7c0 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/StdDeque @@ -0,0 +1,30 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2009 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STDDEQUE_MODULE_H +#define EIGEN_STDDEQUE_MODULE_H + +#include "Core" +#include + +#if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && \ + (EIGEN_MAX_STATIC_ALIGN_BYTES <= 16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */ + +#define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...) + +#else + +// IWYU pragma: begin_exports +#include "src/StlSupport/StdDeque.h" +// IWYU pragma: end_exports + +#endif + +#endif // EIGEN_STDDEQUE_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/StdList b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/StdList new file mode 100644 index 0000000000000000000000000000000000000000..1453c9f569163bdbdf062d72f42c55a8cc50387a --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/StdList @@ -0,0 +1,29 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STDLIST_MODULE_H +#define EIGEN_STDLIST_MODULE_H + +#include "Core" +#include + +#if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && \ + (EIGEN_MAX_STATIC_ALIGN_BYTES <= 16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */ + +#define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...) + +#else + +// IWYU pragma: begin_exports +#include "src/StlSupport/StdList.h" +// IWYU pragma: end_exports + +#endif + +#endif // EIGEN_STDLIST_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/StdVector b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/StdVector new file mode 100644 index 0000000000000000000000000000000000000000..711a654eaba339044faa0a1b279c003402e8906c --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/StdVector @@ -0,0 +1,30 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2009 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STDVECTOR_MODULE_H +#define EIGEN_STDVECTOR_MODULE_H + +#include "Core" +#include + +#if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && \ + (EIGEN_MAX_STATIC_ALIGN_BYTES <= 16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */ + +#define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) + +#else + +// IWYU pragma: begin_exports +#include "src/StlSupport/StdVector.h" +// IWYU pragma: end_exports + +#endif + +#endif // EIGEN_STDVECTOR_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SuperLUSupport b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SuperLUSupport new file mode 100644 index 0000000000000000000000000000000000000000..79e2222f40dfb4c2099b70fd64866eebd4f6f3a7 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/SuperLUSupport @@ -0,0 +1,70 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SUPERLUSUPPORT_MODULE_H +#define EIGEN_SUPERLUSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +#ifdef EMPTY +#define EIGEN_EMPTY_WAS_ALREADY_DEFINED +#endif + +typedef int int_t; +#include +#include +#include + +// slu_util.h defines a preprocessor token named EMPTY which is really polluting, +// so we remove it in favor of a SUPERLU_EMPTY token. +// If EMPTY was already defined then we don't undef it. + +#if defined(EIGEN_EMPTY_WAS_ALREADY_DEFINED) +#undef EIGEN_EMPTY_WAS_ALREADY_DEFINED +#elif defined(EMPTY) +#undef EMPTY +#endif + +#define SUPERLU_EMPTY (-1) + +namespace Eigen { +struct SluMatrix; +} + +/** \ingroup Support_modules + * \defgroup SuperLUSupport_Module SuperLUSupport module + * + * This module provides an interface to the SuperLU library. + * It provides the following factorization class: + * - class SuperLU: a supernodal sequential LU factorization. + * - class SuperILU: a supernodal sequential incomplete LU factorization (to be used as a preconditioner for iterative + * methods). + * + * \warning This wrapper requires at least versions 4.0 of SuperLU. The 3.x versions are not supported. + * + * \warning When including this module, you have to use SUPERLU_EMPTY instead of EMPTY which is no longer defined + * because it is too polluting. + * + * \code + * #include + * \endcode + * + * In order to use this module, the superlu headers must be accessible from the include paths, and your binary must be + * linked to the superlu library and its dependencies. The dependencies depend on how superlu has been compiled. For a + * cmake based project, you can use our FindSuperLU.cmake module to help you in this task. + * + */ + +// IWYU pragma: begin_exports +#include "src/SuperLUSupport/SuperLUSupport.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SUPERLUSUPPORT_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/ThreadPool b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/ThreadPool new file mode 100644 index 0000000000000000000000000000000000000000..c6e2cee5b6252ff0abad3623ec7efca341649e0b --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/ThreadPool @@ -0,0 +1,79 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_THREADPOOL_MODULE_H +#define EIGEN_THREADPOOL_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup ThreadPool_Module ThreadPool Module + * + * This module provides 2 threadpool implementations + * - a simple reference implementation + * - a faster non blocking implementation + * + * \code + * #include + * \endcode + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// There are non-parenthesized calls to "max" in the header, +// which trigger a check in test/main.h causing compilation to fail. +// We work around the check here by removing the check for max in +// the case where we have to emulate thread_local. +#ifdef max +#undef max +#endif +#include + +#include "src/Core/util/Meta.h" +#include "src/Core/util/MaxSizeVector.h" + +#ifndef EIGEN_MUTEX +#define EIGEN_MUTEX std::mutex +#endif +#ifndef EIGEN_MUTEX_LOCK +#define EIGEN_MUTEX_LOCK std::unique_lock +#endif +#ifndef EIGEN_CONDVAR +#define EIGEN_CONDVAR std::condition_variable +#endif + +// IWYU pragma: begin_exports +#include "src/ThreadPool/ThreadLocal.h" +#include "src/ThreadPool/ThreadYield.h" +#include "src/ThreadPool/ThreadCancel.h" +#include "src/ThreadPool/EventCount.h" +#include "src/ThreadPool/RunQueue.h" +#include "src/ThreadPool/ThreadPoolInterface.h" +#include "src/ThreadPool/ThreadEnvironment.h" +#include "src/ThreadPool/Barrier.h" +#include "src/ThreadPool/NonBlockingThreadPool.h" +#include "src/ThreadPool/CoreThreadPoolDevice.h" +// IWYU pragma: end_exports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_CXX11_THREADPOOL_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/UmfPackSupport b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/UmfPackSupport new file mode 100644 index 0000000000000000000000000000000000000000..126344cba3fa3c473afc079dee3003cbb930dd33 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/UmfPackSupport @@ -0,0 +1,42 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_UMFPACKSUPPORT_MODULE_H +#define EIGEN_UMFPACKSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +extern "C" { +#include +} + +/** \ingroup Support_modules + * \defgroup UmfPackSupport_Module UmfPackSupport module + * + * This module provides an interface to the UmfPack library which is part of the suitesparse package. It provides the following factorization class: + * - class UmfPackLU: a multifrontal sequential LU factorization. + * + * \code + * #include + * \endcode + * + * In order to use this module, the umfpack headers must be accessible from the include paths, and your binary must be + * linked to the umfpack library and its dependencies. The dependencies depend on how umfpack has been compiled. For a + * cmake based project, you can use our FindUmfPack.cmake module to help you in this task. + * + */ + +// IWYU pragma: begin_exports +#include "src/UmfPackSupport/UmfPackSupport.h" +// IWYU pragma: endexports + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_UMFPACKSUPPORT_MODULE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/AccelerateSupport/AccelerateSupport.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/AccelerateSupport/AccelerateSupport.h new file mode 100644 index 0000000000000000000000000000000000000000..09967fffd8142ad0422a99377a8258aab0bf0fe4 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/AccelerateSupport/AccelerateSupport.h @@ -0,0 +1,423 @@ +#ifndef EIGEN_ACCELERATESUPPORT_H +#define EIGEN_ACCELERATESUPPORT_H + +#include + +#include + +namespace Eigen { + +template +class AccelerateImpl; + +/** \ingroup AccelerateSupport_Module + * \class AccelerateLLT + * \brief A direct Cholesky (LLT) factorization and solver based on Accelerate + * + * \warning Only single and double precision real scalar types are supported by Accelerate + * + * \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam UpLo_ additional information about the matrix structure. Default is Lower. + * + * \sa \ref TutorialSparseSolverConcept, class AccelerateLLT + */ +template +using AccelerateLLT = AccelerateImpl; + +/** \ingroup AccelerateSupport_Module + * \class AccelerateLDLT + * \brief The default Cholesky (LDLT) factorization and solver based on Accelerate + * + * \warning Only single and double precision real scalar types are supported by Accelerate + * + * \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam UpLo_ additional information about the matrix structure. Default is Lower. + * + * \sa \ref TutorialSparseSolverConcept, class AccelerateLDLT + */ +template +using AccelerateLDLT = AccelerateImpl; + +/** \ingroup AccelerateSupport_Module + * \class AccelerateLDLTUnpivoted + * \brief A direct Cholesky-like LDL^T factorization and solver based on Accelerate with only 1x1 pivots and no pivoting + * + * \warning Only single and double precision real scalar types are supported by Accelerate + * + * \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam UpLo_ additional information about the matrix structure. Default is Lower. + * + * \sa \ref TutorialSparseSolverConcept, class AccelerateLDLTUnpivoted + */ +template +using AccelerateLDLTUnpivoted = AccelerateImpl; + +/** \ingroup AccelerateSupport_Module + * \class AccelerateLDLTSBK + * \brief A direct Cholesky (LDLT) factorization and solver based on Accelerate with Supernode Bunch-Kaufman and static + * pivoting + * + * \warning Only single and double precision real scalar types are supported by Accelerate + * + * \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam UpLo_ additional information about the matrix structure. Default is Lower. + * + * \sa \ref TutorialSparseSolverConcept, class AccelerateLDLTSBK + */ +template +using AccelerateLDLTSBK = AccelerateImpl; + +/** \ingroup AccelerateSupport_Module + * \class AccelerateLDLTTPP + * \brief A direct Cholesky (LDLT) factorization and solver based on Accelerate with full threshold partial pivoting + * + * \warning Only single and double precision real scalar types are supported by Accelerate + * + * \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam UpLo_ additional information about the matrix structure. Default is Lower. + * + * \sa \ref TutorialSparseSolverConcept, class AccelerateLDLTTPP + */ +template +using AccelerateLDLTTPP = AccelerateImpl; + +/** \ingroup AccelerateSupport_Module + * \class AccelerateQR + * \brief A QR factorization and solver based on Accelerate + * + * \warning Only single and double precision real scalar types are supported by Accelerate + * + * \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<> + * + * \sa \ref TutorialSparseSolverConcept, class AccelerateQR + */ +template +using AccelerateQR = AccelerateImpl; + +/** \ingroup AccelerateSupport_Module + * \class AccelerateCholeskyAtA + * \brief A QR factorization and solver based on Accelerate without storing Q (equivalent to A^TA = R^T R) + * + * \warning Only single and double precision real scalar types are supported by Accelerate + * + * \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<> + * + * \sa \ref TutorialSparseSolverConcept, class AccelerateCholeskyAtA + */ +template +using AccelerateCholeskyAtA = AccelerateImpl; + +namespace internal { +template +struct AccelFactorizationDeleter { + void operator()(T* sym) { + if (sym) { + SparseCleanup(*sym); + delete sym; + sym = nullptr; + } + } +}; + +template +struct SparseTypesTraitBase { + typedef DenseVecT AccelDenseVector; + typedef DenseMatT AccelDenseMatrix; + typedef SparseMatT AccelSparseMatrix; + + typedef SparseOpaqueSymbolicFactorization SymbolicFactorization; + typedef NumFactT NumericFactorization; + + typedef AccelFactorizationDeleter SymbolicFactorizationDeleter; + typedef AccelFactorizationDeleter NumericFactorizationDeleter; +}; + +template +struct SparseTypesTrait {}; + +template <> +struct SparseTypesTrait : SparseTypesTraitBase {}; + +template <> +struct SparseTypesTrait + : SparseTypesTraitBase { +}; + +} // end namespace internal + +template +class AccelerateImpl : public SparseSolverBase > { + protected: + using Base = SparseSolverBase; + using Base::derived; + using Base::m_isInitialized; + + public: + using Base::_solve_impl; + + typedef MatrixType_ MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::StorageIndex StorageIndex; + enum { ColsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic }; + enum { UpLo = UpLo_ }; + + using AccelDenseVector = typename internal::SparseTypesTrait::AccelDenseVector; + using AccelDenseMatrix = typename internal::SparseTypesTrait::AccelDenseMatrix; + using AccelSparseMatrix = typename internal::SparseTypesTrait::AccelSparseMatrix; + using SymbolicFactorization = typename internal::SparseTypesTrait::SymbolicFactorization; + using NumericFactorization = typename internal::SparseTypesTrait::NumericFactorization; + using SymbolicFactorizationDeleter = typename internal::SparseTypesTrait::SymbolicFactorizationDeleter; + using NumericFactorizationDeleter = typename internal::SparseTypesTrait::NumericFactorizationDeleter; + + AccelerateImpl() { + m_isInitialized = false; + + auto check_flag_set = [](int value, int flag) { return ((value & flag) == flag); }; + + if (check_flag_set(UpLo_, Symmetric)) { + m_sparseKind = SparseSymmetric; + m_triType = (UpLo_ & Lower) ? SparseLowerTriangle : SparseUpperTriangle; + } else if (check_flag_set(UpLo_, UnitLower)) { + m_sparseKind = SparseUnitTriangular; + m_triType = SparseLowerTriangle; + } else if (check_flag_set(UpLo_, UnitUpper)) { + m_sparseKind = SparseUnitTriangular; + m_triType = SparseUpperTriangle; + } else if (check_flag_set(UpLo_, StrictlyLower)) { + m_sparseKind = SparseTriangular; + m_triType = SparseLowerTriangle; + } else if (check_flag_set(UpLo_, StrictlyUpper)) { + m_sparseKind = SparseTriangular; + m_triType = SparseUpperTriangle; + } else if (check_flag_set(UpLo_, Lower)) { + m_sparseKind = SparseTriangular; + m_triType = SparseLowerTriangle; + } else if (check_flag_set(UpLo_, Upper)) { + m_sparseKind = SparseTriangular; + m_triType = SparseUpperTriangle; + } else { + m_sparseKind = SparseOrdinary; + m_triType = (UpLo_ & Lower) ? SparseLowerTriangle : SparseUpperTriangle; + } + + m_order = SparseOrderDefault; + } + + explicit AccelerateImpl(const MatrixType& matrix) : AccelerateImpl() { compute(matrix); } + + ~AccelerateImpl() {} + + inline Index cols() const { return m_nCols; } + inline Index rows() const { return m_nRows; } + + ComputationInfo info() const { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + + void analyzePattern(const MatrixType& matrix); + + void factorize(const MatrixType& matrix); + + void compute(const MatrixType& matrix); + + template + void _solve_impl(const MatrixBase& b, MatrixBase& dest) const; + + /** Sets the ordering algorithm to use. */ + void setOrder(SparseOrder_t order) { m_order = order; } + + private: + template + void buildAccelSparseMatrix(const SparseMatrix& a, AccelSparseMatrix& A, std::vector& columnStarts) { + const Index nColumnsStarts = a.cols() + 1; + + columnStarts.resize(nColumnsStarts); + + for (Index i = 0; i < nColumnsStarts; i++) columnStarts[i] = a.outerIndexPtr()[i]; + + SparseAttributes_t attributes{}; + attributes.transpose = false; + attributes.triangle = m_triType; + attributes.kind = m_sparseKind; + + SparseMatrixStructure structure{}; + structure.attributes = attributes; + structure.rowCount = static_cast(a.rows()); + structure.columnCount = static_cast(a.cols()); + structure.blockSize = 1; + structure.columnStarts = columnStarts.data(); + structure.rowIndices = const_cast(a.innerIndexPtr()); + + A.structure = structure; + A.data = const_cast(a.valuePtr()); + } + + void doAnalysis(AccelSparseMatrix& A) { + m_numericFactorization.reset(nullptr); + + SparseSymbolicFactorOptions opts{}; + opts.control = SparseDefaultControl; + opts.orderMethod = m_order; + opts.order = nullptr; + opts.ignoreRowsAndColumns = nullptr; + opts.malloc = malloc; + opts.free = free; + opts.reportError = nullptr; + + m_symbolicFactorization.reset(new SymbolicFactorization(SparseFactor(Solver_, A.structure, opts))); + + SparseStatus_t status = m_symbolicFactorization->status; + + updateInfoStatus(status); + + if (status != SparseStatusOK) m_symbolicFactorization.reset(nullptr); + } + + void doFactorization(AccelSparseMatrix& A) { + SparseStatus_t status = SparseStatusReleased; + + if (m_symbolicFactorization) { + m_numericFactorization.reset(new NumericFactorization(SparseFactor(*m_symbolicFactorization, A))); + + status = m_numericFactorization->status; + + if (status != SparseStatusOK) m_numericFactorization.reset(nullptr); + } + + updateInfoStatus(status); + } + + protected: + void updateInfoStatus(SparseStatus_t status) const { + switch (status) { + case SparseStatusOK: + m_info = Success; + break; + case SparseFactorizationFailed: + case SparseMatrixIsSingular: + m_info = NumericalIssue; + break; + case SparseInternalError: + case SparseParameterError: + case SparseStatusReleased: + default: + m_info = InvalidInput; + break; + } + } + + mutable ComputationInfo m_info; + Index m_nRows, m_nCols; + std::unique_ptr m_symbolicFactorization; + std::unique_ptr m_numericFactorization; + SparseKind_t m_sparseKind; + SparseTriangle_t m_triType; + SparseOrder_t m_order; +}; + +/** Computes the symbolic and numeric decomposition of matrix \a a */ +template +void AccelerateImpl::compute(const MatrixType& a) { + if (EnforceSquare_) eigen_assert(a.rows() == a.cols()); + + m_nRows = a.rows(); + m_nCols = a.cols(); + + AccelSparseMatrix A{}; + std::vector columnStarts; + + buildAccelSparseMatrix(a, A, columnStarts); + + doAnalysis(A); + + if (m_symbolicFactorization) doFactorization(A); + + m_isInitialized = true; +} + +/** Performs a symbolic decomposition on the sparsity pattern of matrix \a a. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ +template +void AccelerateImpl::analyzePattern(const MatrixType& a) { + if (EnforceSquare_) eigen_assert(a.rows() == a.cols()); + + m_nRows = a.rows(); + m_nCols = a.cols(); + + AccelSparseMatrix A{}; + std::vector columnStarts; + + buildAccelSparseMatrix(a, A, columnStarts); + + doAnalysis(A); + + m_isInitialized = true; +} + +/** Performs a numeric decomposition of matrix \a a. + * + * The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been + * performed. + * + * \sa analyzePattern() + */ +template +void AccelerateImpl::factorize(const MatrixType& a) { + eigen_assert(m_symbolicFactorization && "You must first call analyzePattern()"); + eigen_assert(m_nRows == a.rows() && m_nCols == a.cols()); + + if (EnforceSquare_) eigen_assert(a.rows() == a.cols()); + + AccelSparseMatrix A{}; + std::vector columnStarts; + + buildAccelSparseMatrix(a, A, columnStarts); + + doFactorization(A); +} + +template +template +void AccelerateImpl::_solve_impl(const MatrixBase& b, + MatrixBase& x) const { + if (!m_numericFactorization) { + m_info = InvalidInput; + return; + } + + eigen_assert(m_nRows == b.rows()); + eigen_assert(((b.cols() == 1) || b.outerStride() == b.rows())); + + SparseStatus_t status = SparseStatusOK; + + Scalar* b_ptr = const_cast(b.derived().data()); + Scalar* x_ptr = const_cast(x.derived().data()); + + AccelDenseMatrix xmat{}; + xmat.attributes = SparseAttributes_t(); + xmat.columnCount = static_cast(x.cols()); + xmat.rowCount = static_cast(x.rows()); + xmat.columnStride = xmat.rowCount; + xmat.data = x_ptr; + + AccelDenseMatrix bmat{}; + bmat.attributes = SparseAttributes_t(); + bmat.columnCount = static_cast(b.cols()); + bmat.rowCount = static_cast(b.rows()); + bmat.columnStride = bmat.rowCount; + bmat.data = b_ptr; + + SparseSolve(*m_numericFactorization, bmat, xmat); + + updateInfoStatus(status); +} + +} // end namespace Eigen + +#endif // EIGEN_ACCELERATESUPPORT_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/AccelerateSupport/InternalHeaderCheck.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/AccelerateSupport/InternalHeaderCheck.h new file mode 100644 index 0000000000000000000000000000000000000000..69bcff50a65b7d856a69b859ba9c23a36771be27 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/AccelerateSupport/InternalHeaderCheck.h @@ -0,0 +1,3 @@ +#ifndef EIGEN_ACCELERATESUPPORT_MODULE_H +#error "Please include Eigen/AccelerateSupport instead of including headers inside the src directory directly." +#endif diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Cholesky/InternalHeaderCheck.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Cholesky/InternalHeaderCheck.h new file mode 100644 index 0000000000000000000000000000000000000000..5de2b219b3f66eaa59773f6d05212c930143856a --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Cholesky/InternalHeaderCheck.h @@ -0,0 +1,3 @@ +#ifndef EIGEN_CHOLESKY_MODULE_H +#error "Please include Eigen/Cholesky instead of including headers inside the src directory directly." +#endif diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Cholesky/LDLT.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Cholesky/LDLT.h new file mode 100644 index 0000000000000000000000000000000000000000..5d52ab20f6430e99caed15cc487bd4ef7f7ebd86 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Cholesky/LDLT.h @@ -0,0 +1,649 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2011 Gael Guennebaud +// Copyright (C) 2009 Keir Mierle +// Copyright (C) 2009 Benoit Jacob +// Copyright (C) 2011 Timothy E. Holy +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_LDLT_H +#define EIGEN_LDLT_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { +template +struct traits > : traits { + typedef MatrixXpr XprKind; + typedef SolverStorage StorageKind; + typedef int StorageIndex; + enum { Flags = 0 }; +}; + +template +struct LDLT_Traits; + +// PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef +enum SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite }; +} // namespace internal + +/** \ingroup Cholesky_Module + * + * \class LDLT + * + * \brief Robust Cholesky decomposition of a matrix with pivoting + * + * \tparam MatrixType_ the type of the matrix of which to compute the LDL^T Cholesky decomposition + * \tparam UpLo_ the triangular part that will be used for the decomposition: Lower (default) or Upper. + * The other triangular part won't be read. + * + * Perform a robust Cholesky decomposition of a positive semidefinite or negative semidefinite + * matrix \f$ A \f$ such that \f$ A = P^TLDL^*P \f$, where P is a permutation matrix, L + * is lower triangular with a unit diagonal and D is a diagonal matrix. + * + * The decomposition uses pivoting to ensure stability, so that D will have + * zeros in the bottom right rank(A) - n submatrix. Avoiding the square root + * on D also stabilizes the computation. + * + * Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky + * decomposition to determine whether a system of equations has a solution. + * + * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. + * + * \sa MatrixBase::ldlt(), SelfAdjointView::ldlt(), class LLT + */ +template +class LDLT : public SolverBase > { + public: + typedef MatrixType_ MatrixType; + typedef SolverBase Base; + friend class SolverBase; + + EIGEN_GENERIC_PUBLIC_INTERFACE(LDLT) + enum { + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + UpLo = UpLo_ + }; + typedef Matrix TmpMatrixType; + + typedef Transpositions TranspositionType; + typedef PermutationMatrix PermutationType; + + typedef internal::LDLT_Traits Traits; + + /** \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via LDLT::compute(const MatrixType&). + */ + LDLT() : m_matrix(), m_transpositions(), m_sign(internal::ZeroSign), m_isInitialized(false) {} + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem \a size. + * \sa LDLT() + */ + explicit LDLT(Index size) + : m_matrix(size, size), + m_transpositions(size), + m_temporary(size), + m_sign(internal::ZeroSign), + m_isInitialized(false) {} + + /** \brief Constructor with decomposition + * + * This calculates the decomposition for the input \a matrix. + * + * \sa LDLT(Index size) + */ + template + explicit LDLT(const EigenBase& matrix) + : m_matrix(matrix.rows(), matrix.cols()), + m_transpositions(matrix.rows()), + m_temporary(matrix.rows()), + m_sign(internal::ZeroSign), + m_isInitialized(false) { + compute(matrix.derived()); + } + + /** \brief Constructs a LDLT factorization from a given matrix + * + * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c + * MatrixType is a Eigen::Ref. + * + * \sa LDLT(const EigenBase&) + */ + template + explicit LDLT(EigenBase& matrix) + : m_matrix(matrix.derived()), + m_transpositions(matrix.rows()), + m_temporary(matrix.rows()), + m_sign(internal::ZeroSign), + m_isInitialized(false) { + compute(matrix.derived()); + } + + /** Clear any existing decomposition + * \sa rankUpdate(w,sigma) + */ + void setZero() { m_isInitialized = false; } + + /** \returns a view of the upper triangular matrix U */ + inline typename Traits::MatrixU matrixU() const { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return Traits::getU(m_matrix); + } + + /** \returns a view of the lower triangular matrix L */ + inline typename Traits::MatrixL matrixL() const { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return Traits::getL(m_matrix); + } + + /** \returns the permutation matrix P as a transposition sequence. + */ + inline const TranspositionType& transpositionsP() const { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_transpositions; + } + + /** \returns the coefficients of the diagonal matrix D */ + inline Diagonal vectorD() const { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_matrix.diagonal(); + } + + /** \returns true if the matrix is positive (semidefinite) */ + inline bool isPositive() const { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign; + } + + /** \returns true if the matrix is negative (semidefinite) */ + inline bool isNegative(void) const { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign; + } + +#ifdef EIGEN_PARSED_BY_DOXYGEN + /** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * This function also supports in-place solves using the syntax x = decompositionObject.solve(x) . + * + * \note_about_checking_solutions + * + * More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$ + * by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$, + * \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then + * \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the + * least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function + * computes the least-square solution of \f$ A x = b \f$ if \f$ A \f$ is singular. + * + * \sa MatrixBase::ldlt(), SelfAdjointView::ldlt() + */ + template + inline const Solve solve(const MatrixBase& b) const; +#endif + + template + bool solveInPlace(MatrixBase& bAndX) const; + + template + LDLT& compute(const EigenBase& matrix); + + /** \returns an estimate of the reciprocal condition number of the matrix of + * which \c *this is the LDLT decomposition. + */ + RealScalar rcond() const { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return internal::rcond_estimate_helper(m_l1_norm, *this); + } + + template + LDLT& rankUpdate(const MatrixBase& w, const RealScalar& alpha = 1); + + /** \returns the internal LDLT decomposition matrix + * + * TODO: document the storage layout + */ + inline const MatrixType& matrixLDLT() const { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_matrix; + } + + MatrixType reconstructedMatrix() const; + + /** \returns the adjoint of \c *this, that is, a const reference to the decomposition itself as the underlying matrix + * is self-adjoint. + * + * This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as: + * \code x = decomposition.adjoint().solve(b) \endcode + */ + const LDLT& adjoint() const { return *this; } + + EIGEN_DEVICE_FUNC inline EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); } + EIGEN_DEVICE_FUNC inline EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was successful, + * \c NumericalIssue if the factorization failed because of a zero pivot. + */ + ComputationInfo info() const { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_info; + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + template + void _solve_impl(const RhsType& rhs, DstType& dst) const; + + template + void _solve_impl_transposed(const RhsType& rhs, DstType& dst) const; +#endif + + protected: + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) + + /** \internal + * Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U. + * The strict upper part is used during the decomposition, the strict lower + * part correspond to the coefficients of L (its diagonal is equal to 1 and + * is not stored), and the diagonal entries correspond to D. + */ + MatrixType m_matrix; + RealScalar m_l1_norm; + TranspositionType m_transpositions; + TmpMatrixType m_temporary; + internal::SignMatrix m_sign; + bool m_isInitialized; + ComputationInfo m_info; +}; + +namespace internal { + +template +struct ldlt_inplace; + +template <> +struct ldlt_inplace { + template + static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign) { + using std::abs; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename TranspositionType::StorageIndex IndexType; + eigen_assert(mat.rows() == mat.cols()); + const Index size = mat.rows(); + bool found_zero_pivot = false; + bool ret = true; + + if (size <= 1) { + transpositions.setIdentity(); + if (size == 0) + sign = ZeroSign; + else if (numext::real(mat.coeff(0, 0)) > static_cast(0)) + sign = PositiveSemiDef; + else if (numext::real(mat.coeff(0, 0)) < static_cast(0)) + sign = NegativeSemiDef; + else + sign = ZeroSign; + return true; + } + + for (Index k = 0; k < size; ++k) { + // Find largest diagonal element + Index index_of_biggest_in_corner; + mat.diagonal().tail(size - k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner); + index_of_biggest_in_corner += k; + + transpositions.coeffRef(k) = IndexType(index_of_biggest_in_corner); + if (k != index_of_biggest_in_corner) { + // apply the transposition while taking care to consider only + // the lower triangular part + Index s = size - index_of_biggest_in_corner - 1; // trailing size after the biggest element + mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k)); + mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s)); + std::swap(mat.coeffRef(k, k), mat.coeffRef(index_of_biggest_in_corner, index_of_biggest_in_corner)); + for (Index i = k + 1; i < index_of_biggest_in_corner; ++i) { + Scalar tmp = mat.coeffRef(i, k); + mat.coeffRef(i, k) = numext::conj(mat.coeffRef(index_of_biggest_in_corner, i)); + mat.coeffRef(index_of_biggest_in_corner, i) = numext::conj(tmp); + } + if (NumTraits::IsComplex) + mat.coeffRef(index_of_biggest_in_corner, k) = numext::conj(mat.coeff(index_of_biggest_in_corner, k)); + } + + // partition the matrix: + // A00 | - | - + // lu = A10 | A11 | - + // A20 | A21 | A22 + Index rs = size - k - 1; + Block A21(mat, k + 1, k, rs, 1); + Block A10(mat, k, 0, 1, k); + Block A20(mat, k + 1, 0, rs, k); + + if (k > 0) { + temp.head(k) = mat.diagonal().real().head(k).asDiagonal() * A10.adjoint(); + mat.coeffRef(k, k) -= (A10 * temp.head(k)).value(); + if (rs > 0) A21.noalias() -= A20 * temp.head(k); + } + + // In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot + // was smaller than the cutoff value. However, since LDLT is not rank-revealing + // we should only make sure that we do not introduce INF or NaN values. + // Remark that LAPACK also uses 0 as the cutoff value. + RealScalar realAkk = numext::real(mat.coeffRef(k, k)); + bool pivot_is_valid = (abs(realAkk) > RealScalar(0)); + + if (k == 0 && !pivot_is_valid) { + // The entire diagonal is zero, there is nothing more to do + // except filling the transpositions, and checking whether the matrix is zero. + sign = ZeroSign; + for (Index j = 0; j < size; ++j) { + transpositions.coeffRef(j) = IndexType(j); + ret = ret && (mat.col(j).tail(size - j - 1).array() == Scalar(0)).all(); + } + return ret; + } + + if ((rs > 0) && pivot_is_valid) + A21 /= realAkk; + else if (rs > 0) + ret = ret && (A21.array() == Scalar(0)).all(); + + if (found_zero_pivot && pivot_is_valid) + ret = false; // factorization failed + else if (!pivot_is_valid) + found_zero_pivot = true; + + if (sign == PositiveSemiDef) { + if (realAkk < static_cast(0)) sign = Indefinite; + } else if (sign == NegativeSemiDef) { + if (realAkk > static_cast(0)) sign = Indefinite; + } else if (sign == ZeroSign) { + if (realAkk > static_cast(0)) + sign = PositiveSemiDef; + else if (realAkk < static_cast(0)) + sign = NegativeSemiDef; + } + } + + return ret; + } + + // Reference for the algorithm: Davis and Hager, "Multiple Rank + // Modifications of a Sparse Cholesky Factorization" (Algorithm 1) + // Trivial rearrangements of their computations (Timothy E. Holy) + // allow their algorithm to work for rank-1 updates even if the + // original matrix is not of full rank. + // Here only rank-1 updates are implemented, to reduce the + // requirement for intermediate storage and improve accuracy + template + static bool updateInPlace(MatrixType& mat, MatrixBase& w, + const typename MatrixType::RealScalar& sigma = 1) { + using numext::isfinite; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + + const Index size = mat.rows(); + eigen_assert(mat.cols() == size && w.size() == size); + + RealScalar alpha = 1; + + // Apply the update + for (Index j = 0; j < size; j++) { + // Check for termination due to an original decomposition of low-rank + if (!(isfinite)(alpha)) break; + + // Update the diagonal terms + RealScalar dj = numext::real(mat.coeff(j, j)); + Scalar wj = w.coeff(j); + RealScalar swj2 = sigma * numext::abs2(wj); + RealScalar gamma = dj * alpha + swj2; + + mat.coeffRef(j, j) += swj2 / alpha; + alpha += swj2 / dj; + + // Update the terms of L + Index rs = size - j - 1; + w.tail(rs) -= wj * mat.col(j).tail(rs); + if (!numext::is_exactly_zero(gamma)) mat.col(j).tail(rs) += (sigma * numext::conj(wj) / gamma) * w.tail(rs); + } + return true; + } + + template + static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, + const typename MatrixType::RealScalar& sigma = 1) { + // Apply the permutation to the input w + tmp = transpositions * w; + + return ldlt_inplace::updateInPlace(mat, tmp, sigma); + } +}; + +template <> +struct ldlt_inplace { + template + static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, + SignMatrix& sign) { + Transpose matt(mat); + return ldlt_inplace::unblocked(matt, transpositions, temp, sign); + } + + template + static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, + const typename MatrixType::RealScalar& sigma = 1) { + Transpose matt(mat); + return ldlt_inplace::update(matt, transpositions, tmp, w.conjugate(), sigma); + } +}; + +template +struct LDLT_Traits { + typedef const TriangularView MatrixL; + typedef const TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } + static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); } +}; + +template +struct LDLT_Traits { + typedef const TriangularView MatrixL; + typedef const TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return MatrixL(m.adjoint()); } + static inline MatrixU getU(const MatrixType& m) { return MatrixU(m); } +}; + +} // end namespace internal + +/** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix + */ +template +template +LDLT& LDLT::compute(const EigenBase& a) { + eigen_assert(a.rows() == a.cols()); + const Index size = a.rows(); + + m_matrix = a.derived(); + + // Compute matrix L1 norm = max abs column sum. + m_l1_norm = RealScalar(0); + // TODO move this code to SelfAdjointView + for (Index col = 0; col < size; ++col) { + RealScalar abs_col_sum; + if (UpLo_ == Lower) + abs_col_sum = + m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>(); + else + abs_col_sum = + m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>(); + if (abs_col_sum > m_l1_norm) m_l1_norm = abs_col_sum; + } + + m_transpositions.resize(size); + m_isInitialized = false; + m_temporary.resize(size); + m_sign = internal::ZeroSign; + + m_info = internal::ldlt_inplace::unblocked(m_matrix, m_transpositions, m_temporary, m_sign) ? Success + : NumericalIssue; + + m_isInitialized = true; + return *this; +} + +/** Update the LDLT decomposition: given A = L D L^T, efficiently compute the decomposition of A + sigma w w^T. + * \param w a vector to be incorporated into the decomposition. + * \param sigma a scalar, +1 for updates and -1 for "downdates," which correspond to removing previously-added column + * vectors. Optional; default value is +1. \sa setZero() + */ +template +template +LDLT& LDLT::rankUpdate( + const MatrixBase& w, const typename LDLT::RealScalar& sigma) { + typedef typename TranspositionType::StorageIndex IndexType; + const Index size = w.rows(); + if (m_isInitialized) { + eigen_assert(m_matrix.rows() == size); + } else { + m_matrix.resize(size, size); + m_matrix.setZero(); + m_transpositions.resize(size); + for (Index i = 0; i < size; i++) m_transpositions.coeffRef(i) = IndexType(i); + m_temporary.resize(size); + m_sign = sigma >= 0 ? internal::PositiveSemiDef : internal::NegativeSemiDef; + m_isInitialized = true; + } + + internal::ldlt_inplace::update(m_matrix, m_transpositions, m_temporary, w, sigma); + + return *this; +} + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +template +void LDLT::_solve_impl(const RhsType& rhs, DstType& dst) const { + _solve_impl_transposed(rhs, dst); +} + +template +template +void LDLT::_solve_impl_transposed(const RhsType& rhs, DstType& dst) const { + // dst = P b + dst = m_transpositions * rhs; + + // dst = L^-1 (P b) + // dst = L^-*T (P b) + matrixL().template conjugateIf().solveInPlace(dst); + + // dst = D^-* (L^-1 P b) + // dst = D^-1 (L^-*T P b) + // more precisely, use pseudo-inverse of D (see bug 241) + using std::abs; + const typename Diagonal::RealReturnType vecD(vectorD()); + // In some previous versions, tolerance was set to the max of 1/highest (or rather numeric_limits::min()) + // and the maximal diagonal entry * epsilon as motivated by LAPACK's xGELSS: + // RealScalar tolerance = numext::maxi(vecD.array().abs().maxCoeff() * NumTraits::epsilon(),RealScalar(1) + // / NumTraits::highest()); However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the + // highest diagonal element is not well justified and leads to numerical issues in some cases. Moreover, Lapack's + // xSYTRS routines use 0 for the tolerance. Using numeric_limits::min() gives us more robustness to denormals. + RealScalar tolerance = (std::numeric_limits::min)(); + for (Index i = 0; i < vecD.size(); ++i) { + if (abs(vecD(i)) > tolerance) + dst.row(i) /= vecD(i); + else + dst.row(i).setZero(); + } + + // dst = L^-* (D^-* L^-1 P b) + // dst = L^-T (D^-1 L^-*T P b) + matrixL().transpose().template conjugateIf().solveInPlace(dst); + + // dst = P^T (L^-* D^-* L^-1 P b) = A^-1 b + // dst = P^-T (L^-T D^-1 L^-*T P b) = A^-1 b + dst = m_transpositions.transpose() * dst; +} +#endif + +/** \internal use x = ldlt_object.solve(x); + * + * This is the \em in-place version of solve(). + * + * \param bAndX represents both the right-hand side matrix b and result x. + * + * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD. + * + * This version avoids a copy when the right hand side matrix b is not + * needed anymore. + * + * \sa LDLT::solve(), MatrixBase::ldlt() + */ +template +template +bool LDLT::solveInPlace(MatrixBase& bAndX) const { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + eigen_assert(m_matrix.rows() == bAndX.rows()); + + bAndX = this->solve(bAndX); + + return true; +} + +/** \returns the matrix represented by the decomposition, + * i.e., it returns the product: P^T L D L^* P. + * This function is provided for debug purpose. */ +template +MatrixType LDLT::reconstructedMatrix() const { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + const Index size = m_matrix.rows(); + MatrixType res(size, size); + + // P + res.setIdentity(); + res = transpositionsP() * res; + // L^* P + res = matrixU() * res; + // D(L^*P) + res = vectorD().real().asDiagonal() * res; + // L(DL^*P) + res = matrixL() * res; + // P^T (LDL^*P) + res = transpositionsP().transpose() * res; + + return res; +} + +/** \cholesky_module + * \returns the Cholesky decomposition with full pivoting without square root of \c *this + * \sa MatrixBase::ldlt() + */ +template +inline const LDLT::PlainObject, UpLo> +SelfAdjointView::ldlt() const { + return LDLT(m_matrix); +} + +/** \cholesky_module + * \returns the Cholesky decomposition with full pivoting without square root of \c *this + * \sa SelfAdjointView::ldlt() + */ +template +inline const LDLT::PlainObject> MatrixBase::ldlt() const { + return LDLT(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_LDLT_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Cholesky/LLT.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Cholesky/LLT.h new file mode 100644 index 0000000000000000000000000000000000000000..01b44769a2623c126096e40e25f8006ec451628c --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Cholesky/LLT.h @@ -0,0 +1,514 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_LLT_H +#define EIGEN_LLT_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +template +struct traits > : traits { + typedef MatrixXpr XprKind; + typedef SolverStorage StorageKind; + typedef int StorageIndex; + enum { Flags = 0 }; +}; + +template +struct LLT_Traits; +} // namespace internal + +/** \ingroup Cholesky_Module + * + * \class LLT + * + * \brief Standard Cholesky decomposition (LL^T) of a matrix and associated features + * + * \tparam MatrixType_ the type of the matrix of which we are computing the LL^T Cholesky decomposition + * \tparam UpLo_ the triangular part that will be used for the decomposition: Lower (default) or Upper. + * The other triangular part won't be read. + * + * This class performs a LL^T Cholesky decomposition of a symmetric, positive definite + * matrix A such that A = LL^* = U^*U, where L is lower triangular. + * + * While the Cholesky decomposition is particularly useful to solve selfadjoint problems like D^*D x = b, + * for that purpose, we recommend the Cholesky decomposition without square root which is more stable + * and even faster. Nevertheless, this standard Cholesky decomposition remains useful in many other + * situations like generalised eigen problems with hermitian matrices. + * + * Remember that Cholesky decompositions are not rank-revealing. This LLT decomposition is only stable on positive + * definite matrices, use LDLT instead for the semidefinite case. Also, do not use a Cholesky decomposition to determine + * whether a system of equations has a solution. + * + * Example: \include LLT_example.cpp + * Output: \verbinclude LLT_example.out + * + * \b Performance: for best performance, it is recommended to use a column-major storage format + * with the Lower triangular part (the default), or, equivalently, a row-major storage format + * with the Upper triangular part. Otherwise, you might get a 20% slowdown for the full factorization + * step, and rank-updates can be up to 3 times slower. + * + * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. + * + * Note that during the decomposition, only the lower (or upper, as defined by UpLo_) triangular part of A is + * considered. Therefore, the strict lower part does not have to store correct values. + * + * \sa MatrixBase::llt(), SelfAdjointView::llt(), class LDLT + */ +template +class LLT : public SolverBase > { + public: + typedef MatrixType_ MatrixType; + typedef SolverBase Base; + friend class SolverBase; + + EIGEN_GENERIC_PUBLIC_INTERFACE(LLT) + enum { MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; + + enum { PacketSize = internal::packet_traits::size, AlignmentMask = int(PacketSize) - 1, UpLo = UpLo_ }; + + typedef internal::LLT_Traits Traits; + + /** + * \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via LLT::compute(const MatrixType&). + */ + LLT() : m_matrix(), m_isInitialized(false) {} + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem \a size. + * \sa LLT() + */ + explicit LLT(Index size) : m_matrix(size, size), m_isInitialized(false) {} + + template + explicit LLT(const EigenBase& matrix) : m_matrix(matrix.rows(), matrix.cols()), m_isInitialized(false) { + compute(matrix.derived()); + } + + /** \brief Constructs a LLT factorization from a given matrix + * + * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when + * \c MatrixType is a Eigen::Ref. + * + * \sa LLT(const EigenBase&) + */ + template + explicit LLT(EigenBase& matrix) : m_matrix(matrix.derived()), m_isInitialized(false) { + compute(matrix.derived()); + } + + /** \returns a view of the upper triangular matrix U */ + inline typename Traits::MatrixU matrixU() const { + eigen_assert(m_isInitialized && "LLT is not initialized."); + return Traits::getU(m_matrix); + } + + /** \returns a view of the lower triangular matrix L */ + inline typename Traits::MatrixL matrixL() const { + eigen_assert(m_isInitialized && "LLT is not initialized."); + return Traits::getL(m_matrix); + } + +#ifdef EIGEN_PARSED_BY_DOXYGEN + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * Since this LLT class assumes anyway that the matrix A is invertible, the solution + * theoretically exists and is unique regardless of b. + * + * Example: \include LLT_solve.cpp + * Output: \verbinclude LLT_solve.out + * + * \sa solveInPlace(), MatrixBase::llt(), SelfAdjointView::llt() + */ + template + inline const Solve solve(const MatrixBase& b) const; +#endif + + template + void solveInPlace(const MatrixBase& bAndX) const; + + template + LLT& compute(const EigenBase& matrix); + + /** \returns an estimate of the reciprocal condition number of the matrix of + * which \c *this is the Cholesky decomposition. + */ + RealScalar rcond() const { + eigen_assert(m_isInitialized && "LLT is not initialized."); + eigen_assert(m_info == Success && "LLT failed because matrix appears to be negative"); + return internal::rcond_estimate_helper(m_l1_norm, *this); + } + + /** \returns the LLT decomposition matrix + * + * TODO: document the storage layout + */ + inline const MatrixType& matrixLLT() const { + eigen_assert(m_isInitialized && "LLT is not initialized."); + return m_matrix; + } + + MatrixType reconstructedMatrix() const; + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was successful, + * \c NumericalIssue if the matrix.appears not to be positive definite. + */ + ComputationInfo info() const { + eigen_assert(m_isInitialized && "LLT is not initialized."); + return m_info; + } + + /** \returns the adjoint of \c *this, that is, a const reference to the decomposition itself as the underlying matrix + * is self-adjoint. + * + * This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as: + * \code x = decomposition.adjoint().solve(b) \endcode + */ + const LLT& adjoint() const EIGEN_NOEXCEPT { return *this; } + + inline EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); } + inline EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); } + + template + LLT& rankUpdate(const VectorType& vec, const RealScalar& sigma = 1); + +#ifndef EIGEN_PARSED_BY_DOXYGEN + template + void _solve_impl(const RhsType& rhs, DstType& dst) const; + + template + void _solve_impl_transposed(const RhsType& rhs, DstType& dst) const; +#endif + + protected: + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) + + /** \internal + * Used to compute and store L + * The strict upper part is not used and even not initialized. + */ + MatrixType m_matrix; + RealScalar m_l1_norm; + bool m_isInitialized; + ComputationInfo m_info; +}; + +namespace internal { + +template +struct llt_inplace; + +template +static Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, + const typename MatrixType::RealScalar& sigma) { + using std::sqrt; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::ColXpr ColXpr; + typedef internal::remove_all_t ColXprCleaned; + typedef typename ColXprCleaned::SegmentReturnType ColXprSegment; + typedef Matrix TempVectorType; + typedef typename TempVectorType::SegmentReturnType TempVecSegment; + + Index n = mat.cols(); + eigen_assert(mat.rows() == n && vec.size() == n); + + TempVectorType temp; + + if (sigma > 0) { + // This version is based on Givens rotations. + // It is faster than the other one below, but only works for updates, + // i.e., for sigma > 0 + temp = sqrt(sigma) * vec; + + for (Index i = 0; i < n; ++i) { + JacobiRotation g; + g.makeGivens(mat(i, i), -temp(i), &mat(i, i)); + + Index rs = n - i - 1; + if (rs > 0) { + ColXprSegment x(mat.col(i).tail(rs)); + TempVecSegment y(temp.tail(rs)); + apply_rotation_in_the_plane(x, y, g); + } + } + } else { + temp = vec; + RealScalar beta = 1; + for (Index j = 0; j < n; ++j) { + RealScalar Ljj = numext::real(mat.coeff(j, j)); + RealScalar dj = numext::abs2(Ljj); + Scalar wj = temp.coeff(j); + RealScalar swj2 = sigma * numext::abs2(wj); + RealScalar gamma = dj * beta + swj2; + + RealScalar x = dj + swj2 / beta; + if (x <= RealScalar(0)) return j; + RealScalar nLjj = sqrt(x); + mat.coeffRef(j, j) = nLjj; + beta += swj2 / dj; + + // Update the terms of L + Index rs = n - j - 1; + if (rs) { + temp.tail(rs) -= (wj / Ljj) * mat.col(j).tail(rs); + if (!numext::is_exactly_zero(gamma)) + mat.col(j).tail(rs) = + (nLjj / Ljj) * mat.col(j).tail(rs) + (nLjj * sigma * numext::conj(wj) / gamma) * temp.tail(rs); + } + } + } + return -1; +} + +template +struct llt_inplace { + typedef typename NumTraits::Real RealScalar; + template + static Index unblocked(MatrixType& mat) { + using std::sqrt; + + eigen_assert(mat.rows() == mat.cols()); + const Index size = mat.rows(); + for (Index k = 0; k < size; ++k) { + Index rs = size - k - 1; // remaining size + + Block A21(mat, k + 1, k, rs, 1); + Block A10(mat, k, 0, 1, k); + Block A20(mat, k + 1, 0, rs, k); + + RealScalar x = numext::real(mat.coeff(k, k)); + if (k > 0) x -= A10.squaredNorm(); + if (x <= RealScalar(0)) return k; + mat.coeffRef(k, k) = x = sqrt(x); + if (k > 0 && rs > 0) A21.noalias() -= A20 * A10.adjoint(); + if (rs > 0) A21 /= x; + } + return -1; + } + + template + static Index blocked(MatrixType& m) { + eigen_assert(m.rows() == m.cols()); + Index size = m.rows(); + if (size < 32) return unblocked(m); + + Index blockSize = size / 8; + blockSize = (blockSize / 16) * 16; + blockSize = (std::min)((std::max)(blockSize, Index(8)), Index(128)); + + for (Index k = 0; k < size; k += blockSize) { + // partition the matrix: + // A00 | - | - + // lu = A10 | A11 | - + // A20 | A21 | A22 + Index bs = (std::min)(blockSize, size - k); + Index rs = size - k - bs; + Block A11(m, k, k, bs, bs); + Block A21(m, k + bs, k, rs, bs); + Block A22(m, k + bs, k + bs, rs, rs); + + Index ret; + if ((ret = unblocked(A11)) >= 0) return k + ret; + if (rs > 0) A11.adjoint().template triangularView().template solveInPlace(A21); + if (rs > 0) + A22.template selfadjointView().rankUpdate(A21, + typename NumTraits::Literal(-1)); // bottleneck + } + return -1; + } + + template + static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) { + return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); + } +}; + +template +struct llt_inplace { + typedef typename NumTraits::Real RealScalar; + + template + static EIGEN_STRONG_INLINE Index unblocked(MatrixType& mat) { + Transpose matt(mat); + return llt_inplace::unblocked(matt); + } + template + static EIGEN_STRONG_INLINE Index blocked(MatrixType& mat) { + Transpose matt(mat); + return llt_inplace::blocked(matt); + } + template + static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) { + Transpose matt(mat); + return llt_inplace::rankUpdate(matt, vec.conjugate(), sigma); + } +}; + +template +struct LLT_Traits { + typedef const TriangularView MatrixL; + typedef const TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } + static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); } + static bool inplace_decomposition(MatrixType& m) { + return llt_inplace::blocked(m) == -1; + } +}; + +template +struct LLT_Traits { + typedef const TriangularView MatrixL; + typedef const TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return MatrixL(m.adjoint()); } + static inline MatrixU getU(const MatrixType& m) { return MatrixU(m); } + static bool inplace_decomposition(MatrixType& m) { + return llt_inplace::blocked(m) == -1; + } +}; + +} // end namespace internal + +/** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix + * + * \returns a reference to *this + * + * Example: \include TutorialLinAlgComputeTwice.cpp + * Output: \verbinclude TutorialLinAlgComputeTwice.out + */ +template +template +LLT& LLT::compute(const EigenBase& a) { + eigen_assert(a.rows() == a.cols()); + const Index size = a.rows(); + m_matrix.resize(size, size); + if (!internal::is_same_dense(m_matrix, a.derived())) m_matrix = a.derived(); + + // Compute matrix L1 norm = max abs column sum. + m_l1_norm = RealScalar(0); + // TODO move this code to SelfAdjointView + for (Index col = 0; col < size; ++col) { + RealScalar abs_col_sum; + if (UpLo_ == Lower) + abs_col_sum = + m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>(); + else + abs_col_sum = + m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>(); + if (abs_col_sum > m_l1_norm) m_l1_norm = abs_col_sum; + } + + m_isInitialized = true; + bool ok = Traits::inplace_decomposition(m_matrix); + m_info = ok ? Success : NumericalIssue; + + return *this; +} + +/** Performs a rank one update (or dowdate) of the current decomposition. + * If A = LL^* before the rank one update, + * then after it we have LL^* = A + sigma * v v^* where \a v must be a vector + * of same dimension. + */ +template +template +LLT& LLT::rankUpdate(const VectorType& v, const RealScalar& sigma) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType); + eigen_assert(v.size() == m_matrix.cols()); + eigen_assert(m_isInitialized); + if (internal::llt_inplace::rankUpdate(m_matrix, v, sigma) >= 0) + m_info = NumericalIssue; + else + m_info = Success; + + return *this; +} + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +template +void LLT::_solve_impl(const RhsType& rhs, DstType& dst) const { + _solve_impl_transposed(rhs, dst); +} + +template +template +void LLT::_solve_impl_transposed(const RhsType& rhs, DstType& dst) const { + dst = rhs; + + matrixL().template conjugateIf().solveInPlace(dst); + matrixU().template conjugateIf().solveInPlace(dst); +} +#endif + +/** \internal use x = llt_object.solve(x); + * + * This is the \em in-place version of solve(). + * + * \param bAndX represents both the right-hand side matrix b and result x. + * + * This version avoids a copy when the right hand side matrix b is not needed anymore. + * + * \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here. + * This function will const_cast it, so constness isn't honored here. + * + * \sa LLT::solve(), MatrixBase::llt() + */ +template +template +void LLT::solveInPlace(const MatrixBase& bAndX) const { + eigen_assert(m_isInitialized && "LLT is not initialized."); + eigen_assert(m_matrix.rows() == bAndX.rows()); + matrixL().solveInPlace(bAndX); + matrixU().solveInPlace(bAndX); +} + +/** \returns the matrix represented by the decomposition, + * i.e., it returns the product: L L^*. + * This function is provided for debug purpose. */ +template +MatrixType LLT::reconstructedMatrix() const { + eigen_assert(m_isInitialized && "LLT is not initialized."); + return matrixL() * matrixL().adjoint().toDenseMatrix(); +} + +/** \cholesky_module + * \returns the LLT decomposition of \c *this + * \sa SelfAdjointView::llt() + */ +template +inline const LLT::PlainObject> MatrixBase::llt() const { + return LLT(derived()); +} + +/** \cholesky_module + * \returns the LLT decomposition of \c *this + * \sa SelfAdjointView::llt() + */ +template +inline const LLT::PlainObject, UpLo> SelfAdjointView::llt() + const { + return LLT(m_matrix); +} + +} // end namespace Eigen + +#endif // EIGEN_LLT_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Cholesky/LLT_LAPACKE.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Cholesky/LLT_LAPACKE.h new file mode 100644 index 0000000000000000000000000000000000000000..cb55b1564b9a69cbbe7948d61ff5aaa30124c6dd --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Cholesky/LLT_LAPACKE.h @@ -0,0 +1,124 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to LAPACKe + * LLt decomposition based on LAPACKE_?potrf function. + ******************************************************************************** +*/ + +#ifndef EIGEN_LLT_LAPACKE_H +#define EIGEN_LLT_LAPACKE_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +namespace lapacke_helpers { +// ------------------------------------------------------------------------------------------------------------------- +// Dispatch for rank update handling upper and lower parts +// ------------------------------------------------------------------------------------------------------------------- + +template +struct rank_update {}; + +template <> +struct rank_update { + template + static Index run(MatrixType &mat, const VectorType &vec, const typename MatrixType::RealScalar &sigma) { + return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); + } +}; + +template <> +struct rank_update { + template + static Index run(MatrixType &mat, const VectorType &vec, const typename MatrixType::RealScalar &sigma) { + Transpose matt(mat); + return Eigen::internal::llt_rank_update_lower(matt, vec.conjugate(), sigma); + } +}; + +// ------------------------------------------------------------------------------------------------------------------- +// Generic lapacke llt implementation that hands of to the dispatches +// ------------------------------------------------------------------------------------------------------------------- + +template +struct lapacke_llt { + EIGEN_STATIC_ASSERT(((Mode == Lower) || (Mode == Upper)), MODE_MUST_BE_UPPER_OR_LOWER) + template + static Index blocked(MatrixType &m) { + eigen_assert(m.rows() == m.cols()); + if (m.rows() == 0) { + return -1; + } + /* Set up parameters for ?potrf */ + lapack_int size = to_lapack(m.rows()); + lapack_int matrix_order = lapack_storage_of(m); + constexpr char uplo = Mode == Upper ? 'U' : 'L'; + Scalar *a = &(m.coeffRef(0, 0)); + lapack_int lda = to_lapack(m.outerStride()); + + lapack_int info = potrf(matrix_order, uplo, size, to_lapack(a), lda); + info = (info == 0) ? -1 : info > 0 ? info - 1 : size; + return info; + } + + template + static Index rankUpdate(MatrixType &mat, const VectorType &vec, const typename MatrixType::RealScalar &sigma) { + return rank_update::run(mat, vec, sigma); + } +}; +} // namespace lapacke_helpers +// end namespace lapacke_helpers + +/* + * Here, we just put the generic implementation from lapacke_llt into a full specialization of the llt_inplace + * type. By being a full specialization, the versions defined here thus get precedence over the generic implementation + * in LLT.h for double, float and complex double, complex float types. + */ + +#define EIGEN_LAPACKE_LLT(EIGTYPE) \ + template <> \ + struct llt_inplace : public lapacke_helpers::lapacke_llt {}; \ + template <> \ + struct llt_inplace : public lapacke_helpers::lapacke_llt {}; + +EIGEN_LAPACKE_LLT(double) +EIGEN_LAPACKE_LLT(float) +EIGEN_LAPACKE_LLT(std::complex) +EIGEN_LAPACKE_LLT(std::complex) + +#undef EIGEN_LAPACKE_LLT + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_LLT_LAPACKE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/CholmodSupport/CholmodSupport.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/CholmodSupport/CholmodSupport.h new file mode 100644 index 0000000000000000000000000000000000000000..7e3c881aec9891435d82b082bce449a96bbb9944 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/CholmodSupport/CholmodSupport.h @@ -0,0 +1,738 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CHOLMODSUPPORT_H +#define EIGEN_CHOLMODSUPPORT_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +template +struct cholmod_configure_matrix; + +template <> +struct cholmod_configure_matrix { + template + static void run(CholmodType& mat) { + mat.xtype = CHOLMOD_REAL; + mat.dtype = CHOLMOD_DOUBLE; + } +}; + +template <> +struct cholmod_configure_matrix > { + template + static void run(CholmodType& mat) { + mat.xtype = CHOLMOD_COMPLEX; + mat.dtype = CHOLMOD_DOUBLE; + } +}; + +// Other scalar types are not yet supported by Cholmod +// template<> struct cholmod_configure_matrix { +// template +// static void run(CholmodType& mat) { +// mat.xtype = CHOLMOD_REAL; +// mat.dtype = CHOLMOD_SINGLE; +// } +// }; +// +// template<> struct cholmod_configure_matrix > { +// template +// static void run(CholmodType& mat) { +// mat.xtype = CHOLMOD_COMPLEX; +// mat.dtype = CHOLMOD_SINGLE; +// } +// }; + +} // namespace internal + +/** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object. + * Note that the data are shared. + */ +template +cholmod_sparse viewAsCholmod(Ref > mat) { + cholmod_sparse res; + res.nzmax = mat.nonZeros(); + res.nrow = mat.rows(); + res.ncol = mat.cols(); + res.p = mat.outerIndexPtr(); + res.i = mat.innerIndexPtr(); + res.x = mat.valuePtr(); + res.z = 0; + res.sorted = 1; + if (mat.isCompressed()) { + res.packed = 1; + res.nz = 0; + } else { + res.packed = 0; + res.nz = mat.innerNonZeroPtr(); + } + + res.dtype = 0; + res.stype = -1; + + if (internal::is_same::value) { + res.itype = CHOLMOD_INT; + } else if (internal::is_same::value) { + res.itype = CHOLMOD_LONG; + } else { + eigen_assert(false && "Index type not supported yet"); + } + + // setup res.xtype + internal::cholmod_configure_matrix::run(res); + + res.stype = 0; + + return res; +} + +template +const cholmod_sparse viewAsCholmod(const SparseMatrix& mat) { + cholmod_sparse res = viewAsCholmod(Ref >(mat.const_cast_derived())); + return res; +} + +template +const cholmod_sparse viewAsCholmod(const SparseVector& mat) { + cholmod_sparse res = viewAsCholmod(Ref >(mat.const_cast_derived())); + return res; +} + +/** Returns a view of the Eigen sparse matrix \a mat as Cholmod sparse matrix. + * The data are not copied but shared. */ +template +cholmod_sparse viewAsCholmod(const SparseSelfAdjointView, UpLo>& mat) { + cholmod_sparse res = viewAsCholmod(Ref >(mat.matrix().const_cast_derived())); + + if (UpLo == Upper) res.stype = 1; + if (UpLo == Lower) res.stype = -1; + // swap stype for rowmajor matrices (only works for real matrices) + EIGEN_STATIC_ASSERT((Options_ & RowMajorBit) == 0 || NumTraits::IsComplex == 0, + THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + if (Options_ & RowMajorBit) res.stype *= -1; + + return res; +} + +/** Returns a view of the Eigen \b dense matrix \a mat as Cholmod dense matrix. + * The data are not copied but shared. */ +template +cholmod_dense viewAsCholmod(MatrixBase& mat) { + EIGEN_STATIC_ASSERT((internal::traits::Flags & RowMajorBit) == 0, + THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + typedef typename Derived::Scalar Scalar; + + cholmod_dense res; + res.nrow = mat.rows(); + res.ncol = mat.cols(); + res.nzmax = res.nrow * res.ncol; + res.d = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride(); + res.x = (void*)(mat.derived().data()); + res.z = 0; + + internal::cholmod_configure_matrix::run(res); + + return res; +} + +/** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix. + * The data are not copied but shared. */ +template +Map > viewAsEigen(cholmod_sparse& cm) { + return Map >( + cm.nrow, cm.ncol, static_cast(cm.p)[cm.ncol], static_cast(cm.p), + static_cast(cm.i), static_cast(cm.x)); +} + +/** Returns a view of the Cholmod sparse matrix factor \a cm as an Eigen sparse matrix. + * The data are not copied but shared. */ +template +Map > viewAsEigen(cholmod_factor& cm) { + return Map >( + cm.n, cm.n, static_cast(cm.p)[cm.n], static_cast(cm.p), + static_cast(cm.i), static_cast(cm.x)); +} + +namespace internal { + +// template specializations for int and long that call the correct cholmod method + +#define EIGEN_CHOLMOD_SPECIALIZE0(ret, name) \ + template \ + inline ret cm_##name(cholmod_common& Common) { \ + return cholmod_##name(&Common); \ + } \ + template <> \ + inline ret cm_##name(cholmod_common & Common) { \ + return cholmod_l_##name(&Common); \ + } + +#define EIGEN_CHOLMOD_SPECIALIZE1(ret, name, t1, a1) \ + template \ + inline ret cm_##name(t1& a1, cholmod_common& Common) { \ + return cholmod_##name(&a1, &Common); \ + } \ + template <> \ + inline ret cm_##name(t1 & a1, cholmod_common & Common) { \ + return cholmod_l_##name(&a1, &Common); \ + } + +EIGEN_CHOLMOD_SPECIALIZE0(int, start) +EIGEN_CHOLMOD_SPECIALIZE0(int, finish) + +EIGEN_CHOLMOD_SPECIALIZE1(int, free_factor, cholmod_factor*, L) +EIGEN_CHOLMOD_SPECIALIZE1(int, free_dense, cholmod_dense*, X) +EIGEN_CHOLMOD_SPECIALIZE1(int, free_sparse, cholmod_sparse*, A) + +EIGEN_CHOLMOD_SPECIALIZE1(cholmod_factor*, analyze, cholmod_sparse, A) +EIGEN_CHOLMOD_SPECIALIZE1(cholmod_sparse*, factor_to_sparse, cholmod_factor, L) + +template +inline cholmod_dense* cm_solve(int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common& Common) { + return cholmod_solve(sys, &L, &B, &Common); +} +template <> +inline cholmod_dense* cm_solve(int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common& Common) { + return cholmod_l_solve(sys, &L, &B, &Common); +} + +template +inline cholmod_sparse* cm_spsolve(int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common& Common) { + return cholmod_spsolve(sys, &L, &B, &Common); +} +template <> +inline cholmod_sparse* cm_spsolve(int sys, cholmod_factor& L, cholmod_sparse& B, + cholmod_common& Common) { + return cholmod_l_spsolve(sys, &L, &B, &Common); +} + +template +inline int cm_factorize_p(cholmod_sparse* A, double beta[2], StorageIndex_* fset, std::size_t fsize, cholmod_factor* L, + cholmod_common& Common) { + return cholmod_factorize_p(A, beta, fset, fsize, L, &Common); +} +template <> +inline int cm_factorize_p(cholmod_sparse* A, double beta[2], SuiteSparse_long* fset, + std::size_t fsize, cholmod_factor* L, cholmod_common& Common) { + return cholmod_l_factorize_p(A, beta, fset, fsize, L, &Common); +} + +#undef EIGEN_CHOLMOD_SPECIALIZE0 +#undef EIGEN_CHOLMOD_SPECIALIZE1 + +} // namespace internal + +enum CholmodMode { CholmodAuto, CholmodSimplicialLLt, CholmodSupernodalLLt, CholmodLDLt }; + +/** \ingroup CholmodSupport_Module + * \class CholmodBase + * \brief The base class for the direct Cholesky factorization of Cholmod + * \sa class CholmodSupernodalLLT, class CholmodSimplicialLDLT, class CholmodSimplicialLLT + */ +template +class CholmodBase : public SparseSolverBase { + protected: + typedef SparseSolverBase Base; + using Base::derived; + using Base::m_isInitialized; + + public: + typedef MatrixType_ MatrixType; + enum { UpLo = UpLo_ }; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef MatrixType CholMatrixType; + typedef typename MatrixType::StorageIndex StorageIndex; + enum { ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; + + public: + CholmodBase() : m_cholmodFactor(0), m_info(Success), m_factorizationIsOk(false), m_analysisIsOk(false) { + EIGEN_STATIC_ASSERT((internal::is_same::value), CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY); + m_shiftOffset[0] = m_shiftOffset[1] = 0.0; + internal::cm_start(m_cholmod); + } + + explicit CholmodBase(const MatrixType& matrix) + : m_cholmodFactor(0), m_info(Success), m_factorizationIsOk(false), m_analysisIsOk(false) { + EIGEN_STATIC_ASSERT((internal::is_same::value), CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY); + m_shiftOffset[0] = m_shiftOffset[1] = 0.0; + internal::cm_start(m_cholmod); + compute(matrix); + } + + ~CholmodBase() { + if (m_cholmodFactor) internal::cm_free_factor(m_cholmodFactor, m_cholmod); + internal::cm_finish(m_cholmod); + } + + inline StorageIndex cols() const { return internal::convert_index(m_cholmodFactor->n); } + inline StorageIndex rows() const { return internal::convert_index(m_cholmodFactor->n); } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was successful, + * \c NumericalIssue if the matrix.appears to be negative. + */ + ComputationInfo info() const { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + + /** Computes the sparse Cholesky decomposition of \a matrix */ + Derived& compute(const MatrixType& matrix) { + analyzePattern(matrix); + factorize(matrix); + return derived(); + } + + /** Performs a symbolic decomposition on the sparsity pattern of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + void analyzePattern(const MatrixType& matrix) { + if (m_cholmodFactor) { + internal::cm_free_factor(m_cholmodFactor, m_cholmod); + m_cholmodFactor = 0; + } + cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView()); + m_cholmodFactor = internal::cm_analyze(A, m_cholmod); + + this->m_isInitialized = true; + this->m_info = Success; + m_analysisIsOk = true; + m_factorizationIsOk = false; + } + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been + * performed. + * + * \sa analyzePattern() + */ + void factorize(const MatrixType& matrix) { + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView()); + internal::cm_factorize_p(&A, m_shiftOffset, 0, 0, m_cholmodFactor, m_cholmod); + + // If the factorization failed, either the input matrix was zero (so m_cholmodFactor == nullptr), or minor is the + // column at which it failed. On success minor == n. + this->m_info = + (m_cholmodFactor != nullptr && m_cholmodFactor->minor == m_cholmodFactor->n ? Success : NumericalIssue); + m_factorizationIsOk = true; + } + + /** Returns a reference to the Cholmod's configuration structure to get a full control over the performed operations. + * See the Cholmod user guide for details. */ + cholmod_common& cholmod() { return m_cholmod; } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal */ + template + void _solve_impl(const MatrixBase& b, MatrixBase& dest) const { + eigen_assert(m_factorizationIsOk && + "The decomposition is not in a valid state for solving, you must first call either compute() or " + "symbolic()/numeric()"); + const Index size = m_cholmodFactor->n; + EIGEN_UNUSED_VARIABLE(size); + eigen_assert(size == b.rows()); + + // Cholmod needs column-major storage without inner-stride, which corresponds to the default behavior of Ref. + Ref > b_ref(b.derived()); + + cholmod_dense b_cd = viewAsCholmod(b_ref); + cholmod_dense* x_cd = internal::cm_solve(CHOLMOD_A, *m_cholmodFactor, b_cd, m_cholmod); + if (!x_cd) { + this->m_info = NumericalIssue; + return; + } + // TODO optimize this copy by swapping when possible (be careful with alignment, etc.) + // NOTE Actually, the copy can be avoided by calling cholmod_solve2 instead of cholmod_solve + dest = Matrix::Map(reinterpret_cast(x_cd->x), + b.rows(), b.cols()); + internal::cm_free_dense(x_cd, m_cholmod); + } + + /** \internal */ + template + void _solve_impl(const SparseMatrixBase& b, SparseMatrixBase& dest) const { + eigen_assert(m_factorizationIsOk && + "The decomposition is not in a valid state for solving, you must first call either compute() or " + "symbolic()/numeric()"); + const Index size = m_cholmodFactor->n; + EIGEN_UNUSED_VARIABLE(size); + eigen_assert(size == b.rows()); + + // note: cs stands for Cholmod Sparse + Ref > b_ref( + b.const_cast_derived()); + cholmod_sparse b_cs = viewAsCholmod(b_ref); + cholmod_sparse* x_cs = internal::cm_spsolve(CHOLMOD_A, *m_cholmodFactor, b_cs, m_cholmod); + if (!x_cs) { + this->m_info = NumericalIssue; + return; + } + // TODO optimize this copy by swapping when possible (be careful with alignment, etc.) + // NOTE cholmod_spsolve in fact just calls the dense solver for blocks of 4 columns at a time (similar to Eigen's + // sparse solver) + dest.derived() = viewAsEigen(*x_cs); + internal::cm_free_sparse(x_cs, m_cholmod); + } +#endif // EIGEN_PARSED_BY_DOXYGEN + + /** Sets the shift parameter that will be used to adjust the diagonal coefficients during the numerical factorization. + * + * During the numerical factorization, an offset term is added to the diagonal coefficients:\n + * \c d_ii = \a offset + \c d_ii + * + * The default is \a offset=0. + * + * \returns a reference to \c *this. + */ + Derived& setShift(const RealScalar& offset) { + m_shiftOffset[0] = double(offset); + return derived(); + } + + /** \returns the determinant of the underlying matrix from the current factorization */ + Scalar determinant() const { + using std::exp; + return exp(logDeterminant()); + } + + /** \returns the log determinant of the underlying matrix from the current factorization */ + Scalar logDeterminant() const { + using numext::real; + using std::log; + eigen_assert(m_factorizationIsOk && + "The decomposition is not in a valid state for solving, you must first call either compute() or " + "symbolic()/numeric()"); + + RealScalar logDet = 0; + Scalar* x = static_cast(m_cholmodFactor->x); + if (m_cholmodFactor->is_super) { + // Supernodal factorization stored as a packed list of dense column-major blocks, + // as described by the following structure: + + // super[k] == index of the first column of the j-th super node + StorageIndex* super = static_cast(m_cholmodFactor->super); + // pi[k] == offset to the description of row indices + StorageIndex* pi = static_cast(m_cholmodFactor->pi); + // px[k] == offset to the respective dense block + StorageIndex* px = static_cast(m_cholmodFactor->px); + + Index nb_super_nodes = m_cholmodFactor->nsuper; + for (Index k = 0; k < nb_super_nodes; ++k) { + StorageIndex ncols = super[k + 1] - super[k]; + StorageIndex nrows = pi[k + 1] - pi[k]; + + Map, 0, InnerStride<> > sk(x + px[k], ncols, InnerStride<>(nrows + 1)); + logDet += sk.real().log().sum(); + } + } else { + // Simplicial factorization stored as standard CSC matrix. + StorageIndex* p = static_cast(m_cholmodFactor->p); + Index size = m_cholmodFactor->n; + for (Index k = 0; k < size; ++k) logDet += log(real(x[p[k]])); + } + if (m_cholmodFactor->is_ll) logDet *= 2.0; + return logDet; + } + + template + void dumpMemory(Stream& /*s*/) {} + + protected: + mutable cholmod_common m_cholmod; + cholmod_factor* m_cholmodFactor; + double m_shiftOffset[2]; + mutable ComputationInfo m_info; + int m_factorizationIsOk; + int m_analysisIsOk; +}; + +/** \ingroup CholmodSupport_Module + * \class CholmodSimplicialLLT + * \brief A simplicial direct Cholesky (LLT) factorization and solver based on Cholmod + * + * This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization + * using the Cholmod library. + * This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Therefore, it has little practical + * interest. The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices X and B can be + * either dense or sparse. + * + * \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam UpLo_ the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * \implsparsesolverconcept + * + * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non + * compressed. + * + * \warning Only double precision real and complex scalar types are supported by Cholmod. + * + * \sa \ref TutorialSparseSolverConcept, class CholmodSupernodalLLT, class SimplicialLLT + */ +template +class CholmodSimplicialLLT : public CholmodBase > { + typedef CholmodBase Base; + using Base::m_cholmod; + + public: + typedef MatrixType_ MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef TriangularView MatrixL; + typedef TriangularView MatrixU; + + CholmodSimplicialLLT() : Base() { init(); } + + CholmodSimplicialLLT(const MatrixType& matrix) : Base() { + init(); + this->compute(matrix); + } + + ~CholmodSimplicialLLT() {} + + /** \returns an expression of the factor L */ + inline MatrixL matrixL() const { return viewAsEigen(*Base::m_cholmodFactor); } + + /** \returns an expression of the factor U (= L^*) */ + inline MatrixU matrixU() const { return matrixL().adjoint(); } + + protected: + void init() { + m_cholmod.final_asis = 0; + m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; + m_cholmod.final_ll = 1; + } +}; + +/** \ingroup CholmodSupport_Module + * \class CholmodSimplicialLDLT + * \brief A simplicial direct Cholesky (LDLT) factorization and solver based on Cholmod + * + * This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization + * using the Cholmod library. + * This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Therefore, it has little practical + * interest. The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices X and B can be + * either dense or sparse. + * + * \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam UpLo_ the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * \implsparsesolverconcept + * + * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non + * compressed. + * + * \warning Only double precision real and complex scalar types are supported by Cholmod. + * + * \sa \ref TutorialSparseSolverConcept, class CholmodSupernodalLLT, class SimplicialLDLT + */ +template +class CholmodSimplicialLDLT : public CholmodBase > { + typedef CholmodBase Base; + using Base::m_cholmod; + + public: + typedef MatrixType_ MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef Matrix VectorType; + typedef TriangularView MatrixL; + typedef TriangularView MatrixU; + + CholmodSimplicialLDLT() : Base() { init(); } + + CholmodSimplicialLDLT(const MatrixType& matrix) : Base() { + init(); + this->compute(matrix); + } + + ~CholmodSimplicialLDLT() {} + + /** \returns a vector expression of the diagonal D */ + inline VectorType vectorD() const { + auto cholmodL = viewAsEigen(*Base::m_cholmodFactor); + + VectorType D{cholmodL.rows()}; + + for (Index k = 0; k < cholmodL.outerSize(); ++k) { + typename decltype(cholmodL)::InnerIterator it{cholmodL, k}; + D(k) = it.value(); + } + + return D; + } + + /** \returns an expression of the factor L */ + inline MatrixL matrixL() const { return viewAsEigen(*Base::m_cholmodFactor); } + + /** \returns an expression of the factor U (= L^*) */ + inline MatrixU matrixU() const { return matrixL().adjoint(); } + + protected: + void init() { + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; + } +}; + +/** \ingroup CholmodSupport_Module + * \class CholmodSupernodalLLT + * \brief A supernodal Cholesky (LLT) factorization and solver based on Cholmod + * + * This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization + * using the Cholmod library. + * This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM. + * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices + * X and B can be either dense or sparse. + * + * \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam UpLo_ the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * \implsparsesolverconcept + * + * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non + * compressed. + * + * \warning Only double precision real and complex scalar types are supported by Cholmod. + * + * \sa \ref TutorialSparseSolverConcept + */ +template +class CholmodSupernodalLLT : public CholmodBase > { + typedef CholmodBase Base; + using Base::m_cholmod; + + public: + typedef MatrixType_ MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::StorageIndex StorageIndex; + + CholmodSupernodalLLT() : Base() { init(); } + + CholmodSupernodalLLT(const MatrixType& matrix) : Base() { + init(); + this->compute(matrix); + } + + ~CholmodSupernodalLLT() {} + + /** \returns an expression of the factor L */ + inline MatrixType matrixL() const { + // Convert Cholmod factor's supernodal storage format to Eigen's CSC storage format + cholmod_sparse* cholmodL = internal::cm_factor_to_sparse(*Base::m_cholmodFactor, m_cholmod); + MatrixType L = viewAsEigen(*cholmodL); + internal::cm_free_sparse(cholmodL, m_cholmod); + + return L; + } + + /** \returns an expression of the factor U (= L^*) */ + inline MatrixType matrixU() const { return matrixL().adjoint(); } + + protected: + void init() { + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_SUPERNODAL; + } +}; + +/** \ingroup CholmodSupport_Module + * \class CholmodDecomposition + * \brief A general Cholesky factorization and solver based on Cholmod + * + * This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization + * using the Cholmod library. The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices + * X and B can be either dense or sparse. + * + * This variant permits to change the underlying Cholesky method at runtime. + * On the other hand, it does not provide access to the result of the factorization. + * The default is to let Cholmod automatically choose between a simplicial and supernodal factorization. + * + * \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam UpLo_ the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * \implsparsesolverconcept + * + * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non + * compressed. + * + * \warning Only double precision real and complex scalar types are supported by Cholmod. + * + * \sa \ref TutorialSparseSolverConcept + */ +template +class CholmodDecomposition : public CholmodBase > { + typedef CholmodBase Base; + using Base::m_cholmod; + + public: + typedef MatrixType_ MatrixType; + + CholmodDecomposition() : Base() { init(); } + + CholmodDecomposition(const MatrixType& matrix) : Base() { + init(); + this->compute(matrix); + } + + ~CholmodDecomposition() {} + + void setMode(CholmodMode mode) { + switch (mode) { + case CholmodAuto: + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_AUTO; + break; + case CholmodSimplicialLLt: + m_cholmod.final_asis = 0; + m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; + m_cholmod.final_ll = 1; + break; + case CholmodSupernodalLLt: + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_SUPERNODAL; + break; + case CholmodLDLt: + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; + break; + default: + break; + } + } + + protected: + void init() { + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_AUTO; + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CHOLMODSUPPORT_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/CholmodSupport/InternalHeaderCheck.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/CholmodSupport/InternalHeaderCheck.h new file mode 100644 index 0000000000000000000000000000000000000000..0fb3abc704095b8874a447c99451f7f12f1fd66b --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/CholmodSupport/InternalHeaderCheck.h @@ -0,0 +1,3 @@ +#ifndef EIGEN_CHOLMODSUPPORT_MODULE_H +#error "Please include Eigen/CholmodSupport instead of including headers inside the src directory directly." +#endif diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ArithmeticSequence.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ArithmeticSequence.h new file mode 100644 index 0000000000000000000000000000000000000000..ae6373dda2d2b75c3b8be9ef5e0fff5edeb21ccd --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ArithmeticSequence.h @@ -0,0 +1,239 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARITHMETIC_SEQUENCE_H +#define EIGEN_ARITHMETIC_SEQUENCE_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +// Helper to cleanup the type of the increment: +template +struct cleanup_seq_incr { + typedef typename cleanup_index_type::type type; +}; + +} // namespace internal + +//-------------------------------------------------------------------------------- +// seq(first,last,incr) and seqN(first,size,incr) +//-------------------------------------------------------------------------------- + +template > +class ArithmeticSequence; + +template +ArithmeticSequence::type, + typename internal::cleanup_index_type::type, + typename internal::cleanup_seq_incr::type> +seqN(FirstType first, SizeType size, IncrType incr); + +/** \class ArithmeticSequence + * \ingroup Core_Module + * + * This class represents an arithmetic progression \f$ a_0, a_1, a_2, ..., a_{n-1}\f$ defined by + * its \em first value \f$ a_0 \f$, its \em size (aka length) \em n, and the \em increment (aka stride) + * that is equal to \f$ a_{i+1}-a_{i}\f$ for any \em i. + * + * It is internally used as the return type of the Eigen::seq and Eigen::seqN functions, and as the input arguments + * of DenseBase::operator()(const RowIndices&, const ColIndices&), and most of the time this is the + * only way it is used. + * + * \tparam FirstType type of the first element, usually an Index, + * but internally it can be a symbolic expression + * \tparam SizeType type representing the size of the sequence, usually an Index + * or a compile time integral constant. Internally, it can also be a symbolic expression + * \tparam IncrType type of the increment, can be a runtime Index, or a compile time integral constant (default is + * compile-time 1) + * + * \sa Eigen::seq, Eigen::seqN, DenseBase::operator()(const RowIndices&, const ColIndices&), class IndexedView + */ +template +class ArithmeticSequence { + public: + constexpr ArithmeticSequence() = default; + constexpr ArithmeticSequence(FirstType first, SizeType size) : m_first(first), m_size(size) {} + constexpr ArithmeticSequence(FirstType first, SizeType size, IncrType incr) + : m_first(first), m_size(size), m_incr(incr) {} + + enum { + // SizeAtCompileTime = internal::get_fixed_value::value, + IncrAtCompileTime = internal::get_fixed_value::value + }; + + /** \returns the size, i.e., number of elements, of the sequence */ + constexpr Index size() const { return m_size; } + + /** \returns the first element \f$ a_0 \f$ in the sequence */ + constexpr Index first() const { return m_first; } + + /** \returns the value \f$ a_i \f$ at index \a i in the sequence. */ + constexpr Index operator[](Index i) const { return m_first + i * m_incr; } + + constexpr const FirstType& firstObject() const { return m_first; } + constexpr const SizeType& sizeObject() const { return m_size; } + constexpr const IncrType& incrObject() const { return m_incr; } + + protected: + FirstType m_first; + SizeType m_size; + IncrType m_incr; + + public: + constexpr auto reverse() const -> decltype(Eigen::seqN(m_first + (m_size + fix<-1>()) * m_incr, m_size, -m_incr)) { + return seqN(m_first + (m_size + fix<-1>()) * m_incr, m_size, -m_incr); + } +}; + +/** \returns an ArithmeticSequence starting at \a first, of length \a size, and increment \a incr + * + * \sa seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType) */ +template +ArithmeticSequence::type, + typename internal::cleanup_index_type::type, + typename internal::cleanup_seq_incr::type> +seqN(FirstType first, SizeType size, IncrType incr) { + return ArithmeticSequence::type, + typename internal::cleanup_index_type::type, + typename internal::cleanup_seq_incr::type>(first, size, incr); +} + +/** \returns an ArithmeticSequence starting at \a first, of length \a size, and unit increment + * + * \sa seqN(FirstType,SizeType,IncrType), seq(FirstType,LastType) */ +template +ArithmeticSequence::type, + typename internal::cleanup_index_type::type> +seqN(FirstType first, SizeType size) { + return ArithmeticSequence::type, + typename internal::cleanup_index_type::type>(first, size); +} + +#ifdef EIGEN_PARSED_BY_DOXYGEN + +/** \returns an ArithmeticSequence starting at \a f, up (or down) to \a l, and with positive (or negative) increment \a + * incr + * + * It is essentially an alias to: + * \code + * seqN(f, (l-f+incr)/incr, incr); + * \endcode + * + * \sa seqN(FirstType,SizeType,IncrType), seq(FirstType,LastType) + */ +template +auto seq(FirstType f, LastType l, IncrType incr); + +/** \returns an ArithmeticSequence starting at \a f, up (or down) to \a l, and unit increment + * + * It is essentially an alias to: + * \code + * seqN(f,l-f+1); + * \endcode + * + * \sa seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType) + */ +template +auto seq(FirstType f, LastType l); + +#else // EIGEN_PARSED_BY_DOXYGEN + +template +auto seq(FirstType f, LastType l) + -> decltype(seqN(typename internal::cleanup_index_type::type(f), + (typename internal::cleanup_index_type::type(l) - + typename internal::cleanup_index_type::type(f) + fix<1>()))) { + return seqN(typename internal::cleanup_index_type::type(f), + (typename internal::cleanup_index_type::type(l) - + typename internal::cleanup_index_type::type(f) + fix<1>())); +} + +template +auto seq(FirstType f, LastType l, IncrType incr) + -> decltype(seqN(typename internal::cleanup_index_type::type(f), + (typename internal::cleanup_index_type::type(l) - + typename internal::cleanup_index_type::type(f) + + typename internal::cleanup_seq_incr::type(incr)) / + typename internal::cleanup_seq_incr::type(incr), + typename internal::cleanup_seq_incr::type(incr))) { + typedef typename internal::cleanup_seq_incr::type CleanedIncrType; + return seqN(typename internal::cleanup_index_type::type(f), + (typename internal::cleanup_index_type::type(l) - + typename internal::cleanup_index_type::type(f) + CleanedIncrType(incr)) / + CleanedIncrType(incr), + CleanedIncrType(incr)); +} + +#endif // EIGEN_PARSED_BY_DOXYGEN + +namespace placeholders { + +/** \cpp11 + * \returns a symbolic ArithmeticSequence representing the last \a size elements with increment \a incr. + * + * It is a shortcut for: \code seqN(last-(size-fix<1>)*incr, size, incr) \endcode + * + * \sa lastN(SizeType), seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType) */ +template +auto lastN(SizeType size, IncrType incr) + -> decltype(seqN(Eigen::placeholders::last - (size - fix<1>()) * incr, size, incr)) { + return seqN(Eigen::placeholders::last - (size - fix<1>()) * incr, size, incr); +} + +/** \cpp11 + * \returns a symbolic ArithmeticSequence representing the last \a size elements with a unit increment. + * + * It is a shortcut for: \code seq(last+fix<1>-size, last) \endcode + * + * \sa lastN(SizeType,IncrType, seqN(FirstType,SizeType), seq(FirstType,LastType) */ +template +auto lastN(SizeType size) -> decltype(seqN(Eigen::placeholders::last + fix<1>() - size, size)) { + return seqN(Eigen::placeholders::last + fix<1>() - size, size); +} + +} // namespace placeholders + +/** \namespace Eigen::indexing + * \ingroup Core_Module + * + * The sole purpose of this namespace is to be able to import all functions + * and symbols that are expected to be used within operator() for indexing + * and slicing. If you already imported the whole Eigen namespace: + * \code using namespace Eigen; \endcode + * then you are already all set. Otherwise, if you don't want/cannot import + * the whole Eigen namespace, the following line: + * \code using namespace Eigen::indexing; \endcode + * is equivalent to: + * \code + using Eigen::fix; + using Eigen::seq; + using Eigen::seqN; + using Eigen::placeholders::all; + using Eigen::placeholders::last; + using Eigen::placeholders::lastN; // c++11 only + using Eigen::placeholders::lastp1; + \endcode + */ +namespace indexing { +using Eigen::fix; +using Eigen::seq; +using Eigen::seqN; +using Eigen::placeholders::all; +using Eigen::placeholders::last; +using Eigen::placeholders::lastN; +using Eigen::placeholders::lastp1; +} // namespace indexing + +} // end namespace Eigen + +#endif // EIGEN_ARITHMETIC_SEQUENCE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Array.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Array.h new file mode 100644 index 0000000000000000000000000000000000000000..fb1f48a2b0dec4777db613bb25b2a778f57f3983 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Array.h @@ -0,0 +1,369 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARRAY_H +#define EIGEN_ARRAY_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { +template +struct traits> + : traits> { + typedef ArrayXpr XprKind; + typedef ArrayBase> XprBase; +}; +} // namespace internal + +/** \class Array + * \ingroup Core_Module + * + * \brief General-purpose arrays with easy API for coefficient-wise operations + * + * The %Array class is very similar to the Matrix class. It provides + * general-purpose one- and two-dimensional arrays. The difference between the + * %Array and the %Matrix class is primarily in the API: the API for the + * %Array class provides easy access to coefficient-wise operations, while the + * API for the %Matrix class provides easy access to linear-algebra + * operations. + * + * See documentation of class Matrix for detailed information on the template parameters + * storage layout. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_ARRAY_PLUGIN. + * + * \sa \blank \ref TutorialArrayClass, \ref TopicClassHierarchy + */ +template +class Array : public PlainObjectBase> { + public: + typedef PlainObjectBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Array) + + enum { Options = Options_ }; + typedef typename Base::PlainObject PlainObject; + + protected: + template + friend struct internal::conservative_resize_like_impl; + + using Base::m_storage; + + public: + using Base::base; + using Base::coeff; + using Base::coeffRef; + + /** + * The usage of + * using Base::operator=; + * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped + * the usage of 'using'. This should be done only for operator=. + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const EigenBase& other) { + return Base::operator=(other); + } + + /** Set all the entries to \a value. + * \sa DenseBase::setConstant(), DenseBase::fill() + */ + /* This overload is needed because the usage of + * using Base::operator=; + * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped + * the usage of 'using'. This should be done only for operator=. + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const Scalar& value) { + Base::setConstant(value); + return *this; + } + + /** Copies the value of the expression \a other into \c *this with automatic resizing. + * + * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), + * it will be initialized. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const DenseBase& other) { + return Base::_set(other); + } + + /** + * \brief Assigns arrays to each other. + * + * \note This is a special case of the templated operator=. Its purpose is + * to prevent a default operator= from hiding the templated operator=. + * + * \callgraph + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const Array& other) { return Base::_set(other); } + + /** Default constructor. + * + * For fixed-size matrices, does nothing. + * + * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix + * is called a null matrix. This constructor is the unique way to create null matrices: resizing + * a matrix to 0 is not supported. + * + * \sa resize(Index,Index) + */ +#ifdef EIGEN_INITIALIZE_COEFFS + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Array() : Base() { EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } +#else + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Array() = default; +#endif + /** \brief Move constructor */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Array(Array&&) = default; + EIGEN_DEVICE_FUNC Array& operator=(Array&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable::value) { + Base::operator=(std::move(other)); + return *this; + } + + /** \copydoc PlainObjectBase(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const + * ArgTypes&... args) + * + * Example: \include Array_variadic_ctor_cxx11.cpp + * Output: \verbinclude Array_variadic_ctor_cxx11.out + * + * \sa Array(const std::initializer_list>&) + * \sa Array(const Scalar&), Array(const Scalar&,const Scalar&) + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, + const ArgTypes&... args) + : Base(a0, a1, a2, a3, args...) {} + + /** \brief Constructs an array and initializes it from the coefficients given as initializer-lists grouped by row. + * \cpp11 + * + * In the general case, the constructor takes a list of rows, each row being represented as a list of coefficients: + * + * Example: \include Array_initializer_list_23_cxx11.cpp + * Output: \verbinclude Array_initializer_list_23_cxx11.out + * + * Each of the inner initializer lists must contain the exact same number of elements, otherwise an assertion is + * triggered. + * + * In the case of a compile-time column 1D array, implicit transposition from a single row is allowed. + * Therefore Array{{1,2,3,4,5}} is legal and the more verbose syntax + * Array{{1},{2},{3},{4},{5}} can be avoided: + * + * Example: \include Array_initializer_list_vector_cxx11.cpp + * Output: \verbinclude Array_initializer_list_vector_cxx11.out + * + * In the case of fixed-sized arrays, the initializer list sizes must exactly match the array sizes, + * and implicit transposition is allowed for compile-time 1D arrays only. + * + * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Array( + const std::initializer_list>& list) + : Base(list) {} + +#ifndef EIGEN_PARSED_BY_DOXYGEN + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Array(const T& x) { + Base::template _init1(x); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const T0& val0, const T1& val1) { + this->template _init2(val0, val1); + } + +#else + /** \brief Constructs a fixed-sized array initialized with coefficients starting at \a data */ + EIGEN_DEVICE_FUNC explicit Array(const Scalar* data); + /** Constructs a vector or row-vector with given dimension. \only_for_vectors + * + * Note that this is only useful for dynamic-size vectors. For fixed-size vectors, + * it is redundant to pass the dimension here, so it makes more sense to use the default + * constructor Array() instead. + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Array(Index dim); + /** constructs an initialized 1x1 Array with the given coefficient + * \sa const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args */ + Array(const Scalar& value); + /** constructs an uninitialized array with \a rows rows and \a cols columns. + * + * This is useful for dynamic-size arrays. For fixed-size arrays, + * it is redundant to pass these parameters, so one should use the default constructor + * Array() instead. */ + Array(Index rows, Index cols); + /** constructs an initialized 2D vector with given coefficients + * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) */ + Array(const Scalar& val0, const Scalar& val1); +#endif // end EIGEN_PARSED_BY_DOXYGEN + + /** constructs an initialized 3D vector with given coefficients + * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2) { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 3) + m_storage.data()[0] = val0; + m_storage.data()[1] = val1; + m_storage.data()[2] = val2; + } + /** constructs an initialized 4D vector with given coefficients + * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2, + const Scalar& val3) { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 4) + m_storage.data()[0] = val0; + m_storage.data()[1] = val1; + m_storage.data()[2] = val2; + m_storage.data()[3] = val3; + } + + /** Copy constructor */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Array(const Array&) = default; + + private: + struct PrivateType {}; + + public: + /** \sa MatrixBase::operator=(const EigenBase&) */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array( + const EigenBase& other, + std::enable_if_t::value, PrivateType> = + PrivateType()) + : Base(other.derived()) {} + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { return 1; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { return this->innerSize(); } + +#ifdef EIGEN_ARRAY_PLUGIN +#include EIGEN_ARRAY_PLUGIN +#endif + + private: + template + friend struct internal::matrix_swap_impl; +}; + +/** \defgroup arraytypedefs Global array typedefs + * \ingroup Core_Module + * + * %Eigen defines several typedef shortcuts for most common 1D and 2D array types. + * + * The general patterns are the following: + * + * \c ArrayRowsColsType where \c Rows and \c Cols can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for + * dynamic size, and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c + * cd for complex double. + * + * For example, \c Array33d is a fixed-size 3x3 array type of doubles, and \c ArrayXXf is a dynamic-size matrix of + * floats. + * + * There are also \c ArraySizeType which are self-explanatory. For example, \c Array4cf is + * a fixed-size 1D array of 4 complex floats. + * + * With \cpp11, template alias are also defined for common sizes. + * They follow the same pattern as above except that the scalar type suffix is replaced by a + * template parameter, i.e.: + * - `ArrayRowsCols` where `Rows` and `Cols` can be \c 2,\c 3,\c 4, or \c X for fixed or dynamic size. + * - `ArraySize` where `Size` can be \c 2,\c 3,\c 4 or \c X for fixed or dynamic size 1D arrays. + * + * \sa class Array + */ + +#define EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ + /** \ingroup arraytypedefs */ \ + typedef Array Array##SizeSuffix##SizeSuffix##TypeSuffix; \ + /** \ingroup arraytypedefs */ \ + typedef Array Array##SizeSuffix##TypeSuffix; + +#define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \ + /** \ingroup arraytypedefs */ \ + typedef Array Array##Size##X##TypeSuffix; \ + /** \ingroup arraytypedefs */ \ + typedef Array Array##X##Size##TypeSuffix; + +#define EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ + EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 2, 2) \ + EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 3, 3) \ + EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 4, 4) \ + EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \ + EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \ + EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \ + EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 4) + +EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(int, i) +EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(float, f) +EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(double, d) +EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex, cf) +EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex, cd) + +#undef EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES +#undef EIGEN_MAKE_ARRAY_TYPEDEFS +#undef EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS + +#define EIGEN_MAKE_ARRAY_TYPEDEFS(Size, SizeSuffix) \ + /** \ingroup arraytypedefs */ \ + /** \brief \cpp11 */ \ + template \ + using Array##SizeSuffix##SizeSuffix = Array; \ + /** \ingroup arraytypedefs */ \ + /** \brief \cpp11 */ \ + template \ + using Array##SizeSuffix = Array; + +#define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Size) \ + /** \ingroup arraytypedefs */ \ + /** \brief \cpp11 */ \ + template \ + using Array##Size##X = Array; \ + /** \ingroup arraytypedefs */ \ + /** \brief \cpp11 */ \ + template \ + using Array##X##Size = Array; + +EIGEN_MAKE_ARRAY_TYPEDEFS(2, 2) +EIGEN_MAKE_ARRAY_TYPEDEFS(3, 3) +EIGEN_MAKE_ARRAY_TYPEDEFS(4, 4) +EIGEN_MAKE_ARRAY_TYPEDEFS(Dynamic, X) +EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(2) +EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(3) +EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(4) + +#undef EIGEN_MAKE_ARRAY_TYPEDEFS +#undef EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS + +#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \ + using Eigen::Matrix##SizeSuffix##TypeSuffix; \ + using Eigen::Vector##SizeSuffix##TypeSuffix; \ + using Eigen::RowVector##SizeSuffix##TypeSuffix; + +#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(TypeSuffix) \ + EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \ + EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \ + EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \ + EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) + +#define EIGEN_USING_ARRAY_TYPEDEFS \ + EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(i) \ + EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(f) \ + EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(d) \ + EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cf) \ + EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cd) + +} // end namespace Eigen + +#endif // EIGEN_ARRAY_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ArrayBase.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ArrayBase.h new file mode 100644 index 0000000000000000000000000000000000000000..6237df454be146a550d134e462d43bca53c30298 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ArrayBase.h @@ -0,0 +1,222 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARRAYBASE_H +#define EIGEN_ARRAYBASE_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +template +class MatrixWrapper; + +/** \class ArrayBase + * \ingroup Core_Module + * + * \brief Base class for all 1D and 2D array, and related expressions + * + * An array is similar to a dense vector or matrix. While matrices are mathematical + * objects with well defined linear algebra operators, an array is just a collection + * of scalar values arranged in a one or two dimensional fashion. As the main consequence, + * all operations applied to an array are performed coefficient wise. Furthermore, + * arrays support scalar math functions of the c++ standard library (e.g., std::sin(x)), and convenient + * constructors allowing to easily write generic code working for both scalar values + * and arrays. + * + * This class is the base that is inherited by all array expression types. + * + * \tparam Derived is the derived type, e.g., an array or an expression type. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_ARRAYBASE_PLUGIN. + * + * \sa class MatrixBase, \ref TopicClassHierarchy + */ +template +class ArrayBase : public DenseBase { + public: +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** The base class for a given storage type. */ + typedef ArrayBase StorageBaseType; + + typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl; + + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + + typedef DenseBase Base; + using Base::ColsAtCompileTime; + using Base::Flags; + using Base::IsVectorAtCompileTime; + using Base::MaxColsAtCompileTime; + using Base::MaxRowsAtCompileTime; + using Base::MaxSizeAtCompileTime; + using Base::RowsAtCompileTime; + using Base::SizeAtCompileTime; + + using Base::coeff; + using Base::coeffRef; + using Base::cols; + using Base::const_cast_derived; + using Base::derived; + using Base::lazyAssign; + using Base::rows; + using Base::size; + using Base::operator-; + using Base::operator=; + using Base::operator+=; + using Base::operator-=; + using Base::operator*=; + using Base::operator/=; + + typedef typename Base::CoeffReturnType CoeffReturnType; + +#endif // not EIGEN_PARSED_BY_DOXYGEN + +#ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Base::PlainObject PlainObject; + + /** \internal Represents a matrix with all coefficients equal to one another*/ + typedef CwiseNullaryOp, PlainObject> ConstantReturnType; +#endif // not EIGEN_PARSED_BY_DOXYGEN + +#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::ArrayBase +#define EIGEN_DOC_UNARY_ADDONS(X, Y) +#include "../plugins/MatrixCwiseUnaryOps.inc" +#include "../plugins/ArrayCwiseUnaryOps.inc" +#include "../plugins/CommonCwiseBinaryOps.inc" +#include "../plugins/MatrixCwiseBinaryOps.inc" +#include "../plugins/ArrayCwiseBinaryOps.inc" +#ifdef EIGEN_ARRAYBASE_PLUGIN +#include EIGEN_ARRAYBASE_PLUGIN +#endif +#undef EIGEN_CURRENT_STORAGE_BASE_CLASS +#undef EIGEN_DOC_UNARY_ADDONS + + /** Special case of the template operator=, in order to prevent the compiler + * from generating a default operator= (issue hit with g++ 4.1) + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const ArrayBase& other) { + internal::call_assignment(derived(), other.derived()); + return derived(); + } + + /** Set all the entries to \a value. + * \sa DenseBase::setConstant(), DenseBase::fill() */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Scalar& value) { + Base::setConstant(value); + return derived(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const Scalar& scalar); + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const Scalar& scalar); + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const ArrayBase& other); + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const ArrayBase& other); + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*=(const ArrayBase& other); + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator/=(const ArrayBase& other); + + public: + EIGEN_DEVICE_FUNC ArrayBase& array() { return *this; } + EIGEN_DEVICE_FUNC const ArrayBase& array() const { return *this; } + + /** \returns an \link Eigen::MatrixBase Matrix \endlink expression of this array + * \sa MatrixBase::array() */ + EIGEN_DEVICE_FUNC MatrixWrapper matrix() { return MatrixWrapper(derived()); } + EIGEN_DEVICE_FUNC const MatrixWrapper matrix() const { + return MatrixWrapper(derived()); + } + + // template + // inline void evalTo(Dest& dst) const { dst = matrix(); } + + protected: + EIGEN_DEFAULT_COPY_CONSTRUCTOR(ArrayBase) + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(ArrayBase) + + private: + explicit ArrayBase(Index); + ArrayBase(Index, Index); + template + explicit ArrayBase(const ArrayBase&); + + protected: + // mixing arrays and matrices is not legal + template + Derived& operator+=(const MatrixBase&) { + EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar)) == -1, + YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); + return *this; + } + // mixing arrays and matrices is not legal + template + Derived& operator-=(const MatrixBase&) { + EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar)) == -1, + YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); + return *this; + } +}; + +/** replaces \c *this by \c *this - \a other. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase::operator-=(const ArrayBase& other) { + call_assignment(derived(), other.derived(), internal::sub_assign_op()); + return derived(); +} + +/** replaces \c *this by \c *this + \a other. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase::operator+=(const ArrayBase& other) { + call_assignment(derived(), other.derived(), internal::add_assign_op()); + return derived(); +} + +/** replaces \c *this by \c *this * \a other coefficient wise. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase::operator*=(const ArrayBase& other) { + call_assignment(derived(), other.derived(), internal::mul_assign_op()); + return derived(); +} + +/** replaces \c *this by \c *this / \a other coefficient wise. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase::operator/=(const ArrayBase& other) { + call_assignment(derived(), other.derived(), internal::div_assign_op()); + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_ARRAYBASE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ArrayWrapper.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ArrayWrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..b636d88ad18d22444af110ec8e50951c2734cc39 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ArrayWrapper.h @@ -0,0 +1,173 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARRAYWRAPPER_H +#define EIGEN_ARRAYWRAPPER_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +/** \class ArrayWrapper + * \ingroup Core_Module + * + * \brief Expression of a mathematical vector or matrix as an array object + * + * This class is the return type of MatrixBase::array(), and most of the time + * this is the only way it is use. + * + * \sa MatrixBase::array(), class MatrixWrapper + */ + +namespace internal { +template +struct traits > : public traits > { + typedef ArrayXpr XprKind; + // Let's remove NestByRefBit + enum { + Flags0 = traits >::Flags, + LvalueBitFlag = is_lvalue::value ? LvalueBit : 0, + Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag + }; +}; +} // namespace internal + +template +class ArrayWrapper : public ArrayBase > { + public: + typedef ArrayBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper) + typedef internal::remove_all_t NestedExpression; + + typedef std::conditional_t::value, Scalar, const Scalar> + ScalarWithConstIfNotLvalue; + + typedef typename internal::ref_selector::non_const_type NestedExpressionType; + + using Base::coeffRef; + + EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE ArrayWrapper(ExpressionType& matrix) : m_expression(matrix) {} + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { + return m_expression.outerStride(); + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { + return m_expression.innerStride(); + } + + EIGEN_DEVICE_FUNC constexpr ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } + EIGEN_DEVICE_FUNC constexpr const Scalar* data() const { return m_expression.data(); } + + EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { + return m_expression.coeffRef(rowId, colId); + } + + EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { return m_expression.coeffRef(index); } + + template + EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const { + dst = m_expression; + } + + EIGEN_DEVICE_FUNC const internal::remove_all_t& nestedExpression() const { + return m_expression; + } + + /** Forwards the resizing request to the nested expression + * \sa DenseBase::resize(Index) */ + EIGEN_DEVICE_FUNC void resize(Index newSize) { m_expression.resize(newSize); } + /** Forwards the resizing request to the nested expression + * \sa DenseBase::resize(Index,Index)*/ + EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) { m_expression.resize(rows, cols); } + + protected: + NestedExpressionType m_expression; +}; + +/** \class MatrixWrapper + * \ingroup Core_Module + * + * \brief Expression of an array as a mathematical vector or matrix + * + * This class is the return type of ArrayBase::matrix(), and most of the time + * this is the only way it is use. + * + * \sa MatrixBase::matrix(), class ArrayWrapper + */ + +namespace internal { +template +struct traits > : public traits > { + typedef MatrixXpr XprKind; + // Let's remove NestByRefBit + enum { + Flags0 = traits >::Flags, + LvalueBitFlag = is_lvalue::value ? LvalueBit : 0, + Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag + }; +}; +} // namespace internal + +template +class MatrixWrapper : public MatrixBase > { + public: + typedef MatrixBase > Base; + EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper) + typedef internal::remove_all_t NestedExpression; + + typedef std::conditional_t::value, Scalar, const Scalar> + ScalarWithConstIfNotLvalue; + + typedef typename internal::ref_selector::non_const_type NestedExpressionType; + + using Base::coeffRef; + + EIGEN_DEVICE_FUNC explicit inline MatrixWrapper(ExpressionType& matrix) : m_expression(matrix) {} + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { + return m_expression.outerStride(); + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { + return m_expression.innerStride(); + } + + EIGEN_DEVICE_FUNC constexpr ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } + EIGEN_DEVICE_FUNC constexpr const Scalar* data() const { return m_expression.data(); } + + EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { + return m_expression.derived().coeffRef(rowId, colId); + } + + EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { return m_expression.coeffRef(index); } + + EIGEN_DEVICE_FUNC const internal::remove_all_t& nestedExpression() const { + return m_expression; + } + + /** Forwards the resizing request to the nested expression + * \sa DenseBase::resize(Index) */ + EIGEN_DEVICE_FUNC void resize(Index newSize) { m_expression.resize(newSize); } + /** Forwards the resizing request to the nested expression + * \sa DenseBase::resize(Index,Index)*/ + EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) { m_expression.resize(rows, cols); } + + protected: + NestedExpressionType m_expression; +}; + +} // end namespace Eigen + +#endif // EIGEN_ARRAYWRAPPER_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Assign.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Assign.h new file mode 100644 index 0000000000000000000000000000000000000000..4b30f7bb6264b282b09fa4010f9cb7d24810c1f6 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Assign.h @@ -0,0 +1,80 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007 Michael Olbrich +// Copyright (C) 2006-2010 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ASSIGN_H +#define EIGEN_ASSIGN_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::lazyAssign(const DenseBase& other) { + enum { SameType = internal::is_same::value }; + + EIGEN_STATIC_ASSERT_LVALUE(Derived) + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived, OtherDerived) + EIGEN_STATIC_ASSERT( + SameType, + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + eigen_assert(rows() == other.rows() && cols() == other.cols()); + internal::call_assignment_no_alias(derived(), other.derived()); + + return derived(); +} + +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::operator=(const DenseBase& other) { + internal::call_assignment(derived(), other.derived()); + return derived(); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::operator=(const DenseBase& other) { + internal::call_assignment(derived(), other.derived()); + return derived(); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const MatrixBase& other) { + internal::call_assignment(derived(), other.derived()); + return derived(); +} + +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const DenseBase& other) { + internal::call_assignment(derived(), other.derived()); + return derived(); +} + +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const EigenBase& other) { + internal::call_assignment(derived(), other.derived()); + return derived(); +} + +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::operator=( + const ReturnByValue& other) { + other.derived().evalTo(derived()); + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_ASSIGN_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/AssignEvaluator.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/AssignEvaluator.h new file mode 100644 index 0000000000000000000000000000000000000000..895484166fce0ced847af33ceeb73d3a4a7e6985 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/AssignEvaluator.h @@ -0,0 +1,965 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Benoit Jacob +// Copyright (C) 2011-2014 Gael Guennebaud +// Copyright (C) 2011-2012 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ASSIGN_EVALUATOR_H +#define EIGEN_ASSIGN_EVALUATOR_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +// This implementation is based on Assign.h + +namespace internal { + +/*************************************************************************** + * Part 1 : the logic deciding a strategy for traversal and unrolling * + ***************************************************************************/ + +// copy_using_evaluator_traits is based on assign_traits + +template +struct copy_using_evaluator_traits { + typedef typename DstEvaluator::XprType Dst; + typedef typename Dst::Scalar DstScalar; + + enum { DstFlags = DstEvaluator::Flags, SrcFlags = SrcEvaluator::Flags }; + + public: + enum { + DstAlignment = DstEvaluator::Alignment, + SrcAlignment = SrcEvaluator::Alignment, + DstHasDirectAccess = (DstFlags & DirectAccessBit) == DirectAccessBit, + JointAlignment = plain_enum_min(DstAlignment, SrcAlignment) + }; + + private: + enum { + InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime) + : int(DstFlags) & RowMajorBit ? int(Dst::ColsAtCompileTime) + : int(Dst::RowsAtCompileTime), + InnerMaxSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime) + : int(DstFlags) & RowMajorBit ? int(Dst::MaxColsAtCompileTime) + : int(Dst::MaxRowsAtCompileTime), + RestrictedInnerSize = min_size_prefer_fixed(InnerSize, MaxPacketSize), + RestrictedLinearSize = min_size_prefer_fixed(Dst::SizeAtCompileTime, MaxPacketSize), + OuterStride = int(outer_stride_at_compile_time::ret), + MaxSizeAtCompileTime = Dst::SizeAtCompileTime + }; + + // TODO distinguish between linear traversal and inner-traversals + typedef typename find_best_packet::type LinearPacketType; + typedef typename find_best_packet::type InnerPacketType; + + enum { + LinearPacketSize = unpacket_traits::size, + InnerPacketSize = unpacket_traits::size + }; + + public: + enum { + LinearRequiredAlignment = unpacket_traits::alignment, + InnerRequiredAlignment = unpacket_traits::alignment + }; + + private: + enum { + DstIsRowMajor = DstFlags & RowMajorBit, + SrcIsRowMajor = SrcFlags & RowMajorBit, + StorageOrdersAgree = (int(DstIsRowMajor) == int(SrcIsRowMajor)), + MightVectorize = bool(StorageOrdersAgree) && (int(DstFlags) & int(SrcFlags) & ActualPacketAccessBit) && + bool(functor_traits::PacketAccess), + MayInnerVectorize = MightVectorize && int(InnerSize) != Dynamic && int(InnerSize) % int(InnerPacketSize) == 0 && + int(OuterStride) != Dynamic && int(OuterStride) % int(InnerPacketSize) == 0 && + (EIGEN_UNALIGNED_VECTORIZE || int(JointAlignment) >= int(InnerRequiredAlignment)), + MayLinearize = bool(StorageOrdersAgree) && (int(DstFlags) & int(SrcFlags) & LinearAccessBit), + MayLinearVectorize = bool(MightVectorize) && bool(MayLinearize) && bool(DstHasDirectAccess) && + (EIGEN_UNALIGNED_VECTORIZE || (int(DstAlignment) >= int(LinearRequiredAlignment)) || + MaxSizeAtCompileTime == Dynamic), + /* If the destination isn't aligned, we have to do runtime checks and we don't unroll, + so it's only good for large enough sizes. */ + MaySliceVectorize = bool(MightVectorize) && bool(DstHasDirectAccess) && + (int(InnerMaxSize) == Dynamic || + int(InnerMaxSize) >= (EIGEN_UNALIGNED_VECTORIZE ? InnerPacketSize : (3 * InnerPacketSize))) + /* slice vectorization can be slow, so we only want it if the slices are big, which is + indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block + in a fixed-size matrix + However, with EIGEN_UNALIGNED_VECTORIZE and unrolling, slice vectorization is still worth it */ + }; + + public: + enum { + Traversal = int(Dst::SizeAtCompileTime) == 0 + ? int(AllAtOnceTraversal) // If compile-size is zero, traversing will fail at compile-time. + : (int(MayLinearVectorize) && (LinearPacketSize > InnerPacketSize)) ? int(LinearVectorizedTraversal) + : int(MayInnerVectorize) ? int(InnerVectorizedTraversal) + : int(MayLinearVectorize) ? int(LinearVectorizedTraversal) + : int(MaySliceVectorize) ? int(SliceVectorizedTraversal) + : int(MayLinearize) ? int(LinearTraversal) + : int(DefaultTraversal), + Vectorized = int(Traversal) == InnerVectorizedTraversal || int(Traversal) == LinearVectorizedTraversal || + int(Traversal) == SliceVectorizedTraversal + }; + + typedef std::conditional_t PacketType; + + private: + enum { + ActualPacketSize = int(Traversal) == LinearVectorizedTraversal ? LinearPacketSize + : Vectorized ? InnerPacketSize + : 1, + UnrollingLimit = EIGEN_UNROLLING_LIMIT * ActualPacketSize, + MayUnrollCompletely = + int(Dst::SizeAtCompileTime) != Dynamic && + int(Dst::SizeAtCompileTime) * (int(DstEvaluator::CoeffReadCost) + int(SrcEvaluator::CoeffReadCost)) <= + int(UnrollingLimit), + MayUnrollInner = + int(InnerSize) != Dynamic && + int(InnerSize) * (int(DstEvaluator::CoeffReadCost) + int(SrcEvaluator::CoeffReadCost)) <= int(UnrollingLimit) + }; + + public: + enum { + Unrolling = (int(Traversal) == int(InnerVectorizedTraversal) || int(Traversal) == int(DefaultTraversal)) + ? (int(MayUnrollCompletely) ? int(CompleteUnrolling) + : int(MayUnrollInner) ? int(InnerUnrolling) + : int(NoUnrolling)) + : int(Traversal) == int(LinearVectorizedTraversal) + ? (bool(MayUnrollCompletely) && + (EIGEN_UNALIGNED_VECTORIZE || (int(DstAlignment) >= int(LinearRequiredAlignment))) + ? int(CompleteUnrolling) + : int(NoUnrolling)) + : int(Traversal) == int(LinearTraversal) + ? (bool(MayUnrollCompletely) ? int(CompleteUnrolling) : int(NoUnrolling)) +#if EIGEN_UNALIGNED_VECTORIZE + : int(Traversal) == int(SliceVectorizedTraversal) + ? (bool(MayUnrollInner) ? int(InnerUnrolling) : int(NoUnrolling)) +#endif + : int(NoUnrolling) + }; + +#ifdef EIGEN_DEBUG_ASSIGN + static void debug() { + std::cerr << "DstXpr: " << typeid(typename DstEvaluator::XprType).name() << std::endl; + std::cerr << "SrcXpr: " << typeid(typename SrcEvaluator::XprType).name() << std::endl; + std::cerr.setf(std::ios::hex, std::ios::basefield); + std::cerr << "DstFlags" + << " = " << DstFlags << " (" << demangle_flags(DstFlags) << " )" << std::endl; + std::cerr << "SrcFlags" + << " = " << SrcFlags << " (" << demangle_flags(SrcFlags) << " )" << std::endl; + std::cerr.unsetf(std::ios::hex); + EIGEN_DEBUG_VAR(DstAlignment) + EIGEN_DEBUG_VAR(SrcAlignment) + EIGEN_DEBUG_VAR(LinearRequiredAlignment) + EIGEN_DEBUG_VAR(InnerRequiredAlignment) + EIGEN_DEBUG_VAR(JointAlignment) + EIGEN_DEBUG_VAR(InnerSize) + EIGEN_DEBUG_VAR(InnerMaxSize) + EIGEN_DEBUG_VAR(LinearPacketSize) + EIGEN_DEBUG_VAR(InnerPacketSize) + EIGEN_DEBUG_VAR(ActualPacketSize) + EIGEN_DEBUG_VAR(StorageOrdersAgree) + EIGEN_DEBUG_VAR(MightVectorize) + EIGEN_DEBUG_VAR(MayLinearize) + EIGEN_DEBUG_VAR(MayInnerVectorize) + EIGEN_DEBUG_VAR(MayLinearVectorize) + EIGEN_DEBUG_VAR(MaySliceVectorize) + std::cerr << "Traversal" + << " = " << Traversal << " (" << demangle_traversal(Traversal) << ")" << std::endl; + EIGEN_DEBUG_VAR(SrcEvaluator::CoeffReadCost) + EIGEN_DEBUG_VAR(DstEvaluator::CoeffReadCost) + EIGEN_DEBUG_VAR(Dst::SizeAtCompileTime) + EIGEN_DEBUG_VAR(UnrollingLimit) + EIGEN_DEBUG_VAR(MayUnrollCompletely) + EIGEN_DEBUG_VAR(MayUnrollInner) + std::cerr << "Unrolling" + << " = " << Unrolling << " (" << demangle_unrolling(Unrolling) << ")" << std::endl; + std::cerr << std::endl; + } +#endif +}; + +/*************************************************************************** + * Part 2 : meta-unrollers + ***************************************************************************/ + +/************************ +*** Default traversal *** +************************/ + +template +struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling { + // FIXME: this is not very clean, perhaps this information should be provided by the kernel? + typedef typename Kernel::DstEvaluatorType DstEvaluatorType; + typedef typename DstEvaluatorType::XprType DstXprType; + + enum { outer = Index / DstXprType::InnerSizeAtCompileTime, inner = Index % DstXprType::InnerSizeAtCompileTime }; + + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel& kernel) { + kernel.assignCoeffByOuterInner(outer, inner); + copy_using_evaluator_DefaultTraversal_CompleteUnrolling::run(kernel); + } +}; + +template +struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void run(Kernel&) {} +}; + +template +struct copy_using_evaluator_DefaultTraversal_InnerUnrolling { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel& kernel, Index outer) { + kernel.assignCoeffByOuterInner(outer, Index_); + copy_using_evaluator_DefaultTraversal_InnerUnrolling::run(kernel, outer); + } +}; + +template +struct copy_using_evaluator_DefaultTraversal_InnerUnrolling { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index) {} +}; + +/*********************** +*** Linear traversal *** +***********************/ + +template +struct copy_using_evaluator_LinearTraversal_CompleteUnrolling { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel& kernel) { + kernel.assignCoeff(Index); + copy_using_evaluator_LinearTraversal_CompleteUnrolling::run(kernel); + } +}; + +template +struct copy_using_evaluator_LinearTraversal_CompleteUnrolling { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) {} +}; + +/************************** +*** Inner vectorization *** +**************************/ + +template +struct copy_using_evaluator_innervec_CompleteUnrolling { + // FIXME: this is not very clean, perhaps this information should be provided by the kernel? + typedef typename Kernel::DstEvaluatorType DstEvaluatorType; + typedef typename DstEvaluatorType::XprType DstXprType; + typedef typename Kernel::PacketType PacketType; + + enum { + outer = Index / DstXprType::InnerSizeAtCompileTime, + inner = Index % DstXprType::InnerSizeAtCompileTime, + SrcAlignment = Kernel::AssignmentTraits::SrcAlignment, + DstAlignment = Kernel::AssignmentTraits::DstAlignment + }; + + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel& kernel) { + kernel.template assignPacketByOuterInner(outer, inner); + enum { NextIndex = Index + unpacket_traits::size }; + copy_using_evaluator_innervec_CompleteUnrolling::run(kernel); + } +}; + +template +struct copy_using_evaluator_innervec_CompleteUnrolling { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void run(Kernel&) {} +}; + +template +struct copy_using_evaluator_innervec_InnerUnrolling { + typedef typename Kernel::PacketType PacketType; + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel& kernel, Index outer) { + kernel.template assignPacketByOuterInner(outer, Index_); + enum { NextIndex = Index_ + unpacket_traits::size }; + copy_using_evaluator_innervec_InnerUnrolling::run(kernel, + outer); + } +}; + +template +struct copy_using_evaluator_innervec_InnerUnrolling { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index) {} +}; + +/*************************************************************************** + * Part 3 : implementation of all cases + ***************************************************************************/ + +// dense_assignment_loop is based on assign_impl + +template +struct dense_assignment_loop; + +/************************ +***** Special Cases ***** +************************/ + +// Zero-sized assignment is a no-op. +template +struct dense_assignment_loop { + EIGEN_DEVICE_FUNC static void EIGEN_STRONG_INLINE EIGEN_CONSTEXPR run(Kernel& /*kernel*/) { + EIGEN_STATIC_ASSERT(int(Kernel::DstEvaluatorType::XprType::SizeAtCompileTime) == 0, + EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT) + } +}; + +/************************ +*** Default traversal *** +************************/ + +template +struct dense_assignment_loop { + EIGEN_DEVICE_FUNC static void EIGEN_STRONG_INLINE run(Kernel& kernel) { + for (Index outer = 0; outer < kernel.outerSize(); ++outer) { + for (Index inner = 0; inner < kernel.innerSize(); ++inner) { + kernel.assignCoeffByOuterInner(outer, inner); + } + } + } +}; + +template +struct dense_assignment_loop { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel& kernel) { + typedef typename Kernel::DstEvaluatorType::XprType DstXprType; + copy_using_evaluator_DefaultTraversal_CompleteUnrolling::run(kernel); + } +}; + +template +struct dense_assignment_loop { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel& kernel) { + typedef typename Kernel::DstEvaluatorType::XprType DstXprType; + + const Index outerSize = kernel.outerSize(); + for (Index outer = 0; outer < outerSize; ++outer) + copy_using_evaluator_DefaultTraversal_InnerUnrolling::run(kernel, + outer); + } +}; + +/*************************** +*** Linear vectorization *** +***************************/ + +// The goal of unaligned_dense_assignment_loop is simply to factorize the handling +// of the non vectorizable beginning and ending parts + +template +struct unaligned_dense_assignment_loop { + // if IsAligned = true, then do nothing + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void run(Kernel&, Index, Index) {} +}; + +template <> +struct unaligned_dense_assignment_loop { + // MSVC must not inline this functions. If it does, it fails to optimize the + // packet access path. + // FIXME check which version exhibits this issue +#if EIGEN_COMP_MSVC + template + static EIGEN_DONT_INLINE void run(Kernel& kernel, Index start, Index end) +#else + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void run(Kernel& kernel, Index start, Index end) +#endif + { + for (Index index = start; index < end; ++index) kernel.assignCoeff(index); + } +}; + +template +struct copy_using_evaluator_linearvec_CompleteUnrolling { + // FIXME: this is not very clean, perhaps this information should be provided by the kernel? + typedef typename Kernel::DstEvaluatorType DstEvaluatorType; + typedef typename DstEvaluatorType::XprType DstXprType; + typedef typename Kernel::PacketType PacketType; + + enum { SrcAlignment = Kernel::AssignmentTraits::SrcAlignment, DstAlignment = Kernel::AssignmentTraits::DstAlignment }; + + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel& kernel) { + kernel.template assignPacket(Index); + enum { NextIndex = Index + unpacket_traits::size }; + copy_using_evaluator_linearvec_CompleteUnrolling::run(kernel); + } +}; + +template +struct copy_using_evaluator_linearvec_CompleteUnrolling { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void run(Kernel&) {} +}; + +template +struct dense_assignment_loop { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void run(Kernel& kernel) { + const Index size = kernel.size(); + typedef typename Kernel::Scalar Scalar; + typedef typename Kernel::PacketType PacketType; + enum { + requestedAlignment = Kernel::AssignmentTraits::LinearRequiredAlignment, + packetSize = unpacket_traits::size, + dstIsAligned = int(Kernel::AssignmentTraits::DstAlignment) >= int(requestedAlignment), + dstAlignment = packet_traits::AlignedOnScalar ? int(requestedAlignment) + : int(Kernel::AssignmentTraits::DstAlignment), + srcAlignment = Kernel::AssignmentTraits::JointAlignment + }; + const Index alignedStart = + dstIsAligned ? 0 : internal::first_aligned(kernel.dstDataPtr(), size); + const Index alignedEnd = alignedStart + ((size - alignedStart) / packetSize) * packetSize; + + unaligned_dense_assignment_loop::run(kernel, 0, alignedStart); + + for (Index index = alignedStart; index < alignedEnd; index += packetSize) + kernel.template assignPacket(index); + + unaligned_dense_assignment_loop<>::run(kernel, alignedEnd, size); + } +}; + +template +struct dense_assignment_loop { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void run(Kernel& kernel) { + typedef typename Kernel::DstEvaluatorType::XprType DstXprType; + typedef typename Kernel::PacketType PacketType; + + enum { + size = DstXprType::SizeAtCompileTime, + packetSize = unpacket_traits::size, + alignedSize = (int(size) / packetSize) * packetSize + }; + + copy_using_evaluator_linearvec_CompleteUnrolling::run(kernel); + copy_using_evaluator_LinearTraversal_CompleteUnrolling::run(kernel); + } +}; + +/************************** +*** Inner vectorization *** +**************************/ + +template +struct dense_assignment_loop { + typedef typename Kernel::PacketType PacketType; + enum { SrcAlignment = Kernel::AssignmentTraits::SrcAlignment, DstAlignment = Kernel::AssignmentTraits::DstAlignment }; + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void run(Kernel& kernel) { + const Index innerSize = kernel.innerSize(); + const Index outerSize = kernel.outerSize(); + const Index packetSize = unpacket_traits::size; + for (Index outer = 0; outer < outerSize; ++outer) + for (Index inner = 0; inner < innerSize; inner += packetSize) + kernel.template assignPacketByOuterInner(outer, inner); + } +}; + +template +struct dense_assignment_loop { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel& kernel) { + typedef typename Kernel::DstEvaluatorType::XprType DstXprType; + copy_using_evaluator_innervec_CompleteUnrolling::run(kernel); + } +}; + +template +struct dense_assignment_loop { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel& kernel) { + typedef typename Kernel::DstEvaluatorType::XprType DstXprType; + typedef typename Kernel::AssignmentTraits Traits; + const Index outerSize = kernel.outerSize(); + for (Index outer = 0; outer < outerSize; ++outer) + copy_using_evaluator_innervec_InnerUnrolling::run(kernel, outer); + } +}; + +/*********************** +*** Linear traversal *** +***********************/ + +template +struct dense_assignment_loop { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void run(Kernel& kernel) { + const Index size = kernel.size(); + for (Index i = 0; i < size; ++i) kernel.assignCoeff(i); + } +}; + +template +struct dense_assignment_loop { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void run(Kernel& kernel) { + typedef typename Kernel::DstEvaluatorType::XprType DstXprType; + copy_using_evaluator_LinearTraversal_CompleteUnrolling::run(kernel); + } +}; + +/************************** +*** Slice vectorization *** +***************************/ + +template +struct dense_assignment_loop { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void run(Kernel& kernel) { + typedef typename Kernel::Scalar Scalar; + typedef typename Kernel::PacketType PacketType; + enum { + packetSize = unpacket_traits::size, + requestedAlignment = int(Kernel::AssignmentTraits::InnerRequiredAlignment), + alignable = + packet_traits::AlignedOnScalar || int(Kernel::AssignmentTraits::DstAlignment) >= sizeof(Scalar), + dstIsAligned = int(Kernel::AssignmentTraits::DstAlignment) >= int(requestedAlignment), + dstAlignment = alignable ? int(requestedAlignment) : int(Kernel::AssignmentTraits::DstAlignment) + }; + const Scalar* dst_ptr = kernel.dstDataPtr(); + if ((!bool(dstIsAligned)) && (std::uintptr_t(dst_ptr) % sizeof(Scalar)) > 0) { + // the pointer is not aligned-on scalar, so alignment is not possible + return dense_assignment_loop::run(kernel); + } + const Index packetAlignedMask = packetSize - 1; + const Index innerSize = kernel.innerSize(); + const Index outerSize = kernel.outerSize(); + const Index alignedStep = alignable ? (packetSize - kernel.outerStride() % packetSize) & packetAlignedMask : 0; + Index alignedStart = + ((!alignable) || bool(dstIsAligned)) ? 0 : internal::first_aligned(dst_ptr, innerSize); + + for (Index outer = 0; outer < outerSize; ++outer) { + const Index alignedEnd = alignedStart + ((innerSize - alignedStart) & ~packetAlignedMask); + // do the non-vectorizable part of the assignment + for (Index inner = 0; inner < alignedStart; ++inner) kernel.assignCoeffByOuterInner(outer, inner); + + // do the vectorizable part of the assignment + for (Index inner = alignedStart; inner < alignedEnd; inner += packetSize) + kernel.template assignPacketByOuterInner(outer, inner); + + // do the non-vectorizable part of the assignment + for (Index inner = alignedEnd; inner < innerSize; ++inner) kernel.assignCoeffByOuterInner(outer, inner); + + alignedStart = numext::mini((alignedStart + alignedStep) % packetSize, innerSize); + } + } +}; + +#if EIGEN_UNALIGNED_VECTORIZE +template +struct dense_assignment_loop { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void run(Kernel& kernel) { + typedef typename Kernel::DstEvaluatorType::XprType DstXprType; + typedef typename Kernel::PacketType PacketType; + + enum { + innerSize = DstXprType::InnerSizeAtCompileTime, + packetSize = unpacket_traits::size, + vectorizableSize = (int(innerSize) / int(packetSize)) * int(packetSize), + size = DstXprType::SizeAtCompileTime + }; + + for (Index outer = 0; outer < kernel.outerSize(); ++outer) { + copy_using_evaluator_innervec_InnerUnrolling::run(kernel, outer); + copy_using_evaluator_DefaultTraversal_InnerUnrolling::run(kernel, outer); + } + } +}; +#endif + +/*************************************************************************** + * Part 4 : Generic dense assignment kernel + ***************************************************************************/ + +// This class generalize the assignment of a coefficient (or packet) from one dense evaluator +// to another dense writable evaluator. +// It is parametrized by the two evaluators, and the actual assignment functor. +// This abstraction level permits to keep the evaluation loops as simple and as generic as possible. +// One can customize the assignment using this generic dense_assignment_kernel with different +// functors, or by completely overloading it, by-passing a functor. +template +class generic_dense_assignment_kernel { + protected: + typedef typename DstEvaluatorTypeT::XprType DstXprType; + typedef typename SrcEvaluatorTypeT::XprType SrcXprType; + + public: + typedef DstEvaluatorTypeT DstEvaluatorType; + typedef SrcEvaluatorTypeT SrcEvaluatorType; + typedef typename DstEvaluatorType::Scalar Scalar; + typedef copy_using_evaluator_traits AssignmentTraits; + typedef typename AssignmentTraits::PacketType PacketType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE generic_dense_assignment_kernel(DstEvaluatorType& dst, + const SrcEvaluatorType& src, + const Functor& func, DstXprType& dstExpr) + : m_dst(dst), m_src(src), m_functor(func), m_dstExpr(dstExpr) { +#ifdef EIGEN_DEBUG_ASSIGN + AssignmentTraits::debug(); +#endif + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index size() const EIGEN_NOEXCEPT { return m_dstExpr.size(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index innerSize() const EIGEN_NOEXCEPT { return m_dstExpr.innerSize(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index outerSize() const EIGEN_NOEXCEPT { return m_dstExpr.outerSize(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_dstExpr.rows(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_dstExpr.cols(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index outerStride() const EIGEN_NOEXCEPT { return m_dstExpr.outerStride(); } + + EIGEN_DEVICE_FUNC DstEvaluatorType& dstEvaluator() EIGEN_NOEXCEPT { return m_dst; } + EIGEN_DEVICE_FUNC const SrcEvaluatorType& srcEvaluator() const EIGEN_NOEXCEPT { return m_src; } + + /// Assign src(row,col) to dst(row,col) through the assignment functor. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Index row, Index col) { + m_functor.assignCoeff(m_dst.coeffRef(row, col), m_src.coeff(row, col)); + } + + /// \sa assignCoeff(Index,Index) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Index index) { + m_functor.assignCoeff(m_dst.coeffRef(index), m_src.coeff(index)); + } + + /// \sa assignCoeff(Index,Index) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeffByOuterInner(Index outer, Index inner) { + Index row = rowIndexByOuterInner(outer, inner); + Index col = colIndexByOuterInner(outer, inner); + assignCoeff(row, col); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacket(Index row, Index col) { + m_functor.template assignPacket(&m_dst.coeffRef(row, col), + m_src.template packet(row, col)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacket(Index index) { + m_functor.template assignPacket(&m_dst.coeffRef(index), m_src.template packet(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacketByOuterInner(Index outer, Index inner) { + Index row = rowIndexByOuterInner(outer, inner); + Index col = colIndexByOuterInner(outer, inner); + assignPacket(row, col); + } + + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) { + typedef typename DstEvaluatorType::ExpressionTraits Traits; + return int(Traits::RowsAtCompileTime) == 1 ? 0 + : int(Traits::ColsAtCompileTime) == 1 ? inner + : int(DstEvaluatorType::Flags) & RowMajorBit ? outer + : inner; + } + + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) { + typedef typename DstEvaluatorType::ExpressionTraits Traits; + return int(Traits::ColsAtCompileTime) == 1 ? 0 + : int(Traits::RowsAtCompileTime) == 1 ? inner + : int(DstEvaluatorType::Flags) & RowMajorBit ? inner + : outer; + } + + EIGEN_DEVICE_FUNC const Scalar* dstDataPtr() const { return m_dstExpr.data(); } + + protected: + DstEvaluatorType& m_dst; + const SrcEvaluatorType& m_src; + const Functor& m_functor; + // TODO find a way to avoid the needs of the original expression + DstXprType& m_dstExpr; +}; + +// Special kernel used when computing small products whose operands have dynamic dimensions. It ensures that the +// PacketSize used is no larger than 4, thereby increasing the chance that vectorized instructions will be used +// when computing the product. + +template +class restricted_packet_dense_assignment_kernel + : public generic_dense_assignment_kernel { + protected: + typedef generic_dense_assignment_kernel Base; + + public: + typedef typename Base::Scalar Scalar; + typedef typename Base::DstXprType DstXprType; + typedef copy_using_evaluator_traits AssignmentTraits; + typedef typename AssignmentTraits::PacketType PacketType; + + EIGEN_DEVICE_FUNC restricted_packet_dense_assignment_kernel(DstEvaluatorTypeT& dst, const SrcEvaluatorTypeT& src, + const Functor& func, DstXprType& dstExpr) + : Base(dst, src, func, dstExpr) {} +}; + +/*************************************************************************** + * Part 5 : Entry point for dense rectangular assignment + ***************************************************************************/ + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize_if_allowed(DstXprType& dst, const SrcXprType& src, + const Functor& /*func*/) { + EIGEN_ONLY_USED_FOR_DEBUG(dst); + EIGEN_ONLY_USED_FOR_DEBUG(src); + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize_if_allowed(DstXprType& dst, const SrcXprType& src, + const internal::assign_op& /*func*/) { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if (((dst.rows() != dstRows) || (dst.cols() != dstCols))) dst.resize(dstRows, dstCols); + eigen_assert(dst.rows() == dstRows && dst.cols() == dstCols); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void call_dense_assignment_loop(DstXprType& dst, + const SrcXprType& src, + const Functor& func) { + typedef evaluator DstEvaluatorType; + typedef evaluator SrcEvaluatorType; + + SrcEvaluatorType srcEvaluator(src); + + // NOTE To properly handle A = (A*A.transpose())/s with A rectangular, + // we need to resize the destination after the source evaluator has been created. + resize_if_allowed(dst, src, func); + + DstEvaluatorType dstEvaluator(dst); + + typedef generic_dense_assignment_kernel Kernel; + Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived()); + + dense_assignment_loop::run(kernel); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType& dst, const SrcXprType& src) { + call_dense_assignment_loop(dst, src, internal::assign_op()); +} + +/*************************************************************************** + * Part 6 : Generic assignment + ***************************************************************************/ + +// Based on the respective shapes of the destination and source, +// the class AssignmentKind determine the kind of assignment mechanism. +// AssignmentKind must define a Kind typedef. +template +struct AssignmentKind; + +// Assignment kind defined in this file: +struct Dense2Dense {}; +struct EigenBase2EigenBase {}; + +template +struct AssignmentKind { + typedef EigenBase2EigenBase Kind; +}; +template <> +struct AssignmentKind { + typedef Dense2Dense Kind; +}; + +// This is the main assignment class +template ::Shape, + typename evaluator_traits::Shape>::Kind, + typename EnableIf = void> +struct Assignment; + +// The only purpose of this call_assignment() function is to deal with noalias() / "assume-aliasing" and automatic +// transposition. Indeed, I (Gael) think that this concept of "assume-aliasing" was a mistake, and it makes thing quite +// complicated. So this intermediate function removes everything related to "assume-aliasing" such that Assignment does +// not has to bother about these annoying details. + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment(Dst& dst, const Src& src) { + call_assignment(dst, src, internal::assign_op()); +} +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment(const Dst& dst, const Src& src) { + call_assignment(dst, src, internal::assign_op()); +} + +// Deal with "assume-aliasing" +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void call_assignment( + Dst& dst, const Src& src, const Func& func, std::enable_if_t::value, void*> = 0) { + typename plain_matrix_type::type tmp(src); + call_assignment_no_alias(dst, tmp, func); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment( + Dst& dst, const Src& src, const Func& func, std::enable_if_t::value, void*> = 0) { + call_assignment_no_alias(dst, src, func); +} + +// by-pass "assume-aliasing" +// When there is no aliasing, we require that 'dst' has been properly resized +template class StorageBase, typename Src, typename Func> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void call_assignment(NoAlias& dst, + const Src& src, const Func& func) { + call_assignment_no_alias(dst.expression(), src, func); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void call_assignment_no_alias(Dst& dst, const Src& src, + const Func& func) { + enum { + NeedToTranspose = ((int(Dst::RowsAtCompileTime) == 1 && int(Src::ColsAtCompileTime) == 1) || + (int(Dst::ColsAtCompileTime) == 1 && int(Src::RowsAtCompileTime) == 1)) && + int(Dst::SizeAtCompileTime) != 1 + }; + + typedef std::conditional_t, Dst> ActualDstTypeCleaned; + typedef std::conditional_t, Dst&> ActualDstType; + ActualDstType actualDst(dst); + + // TODO check whether this is the right place to perform these checks: + EIGEN_STATIC_ASSERT_LVALUE(Dst) + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(ActualDstTypeCleaned, Src) + EIGEN_CHECK_BINARY_COMPATIBILIY(Func, typename ActualDstTypeCleaned::Scalar, typename Src::Scalar); + + Assignment::run(actualDst, src, func); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_restricted_packet_assignment_no_alias(Dst& dst, const Src& src, + const Func& func) { + typedef evaluator DstEvaluatorType; + typedef evaluator SrcEvaluatorType; + typedef restricted_packet_dense_assignment_kernel Kernel; + + EIGEN_STATIC_ASSERT_LVALUE(Dst) + EIGEN_CHECK_BINARY_COMPATIBILIY(Func, typename Dst::Scalar, typename Src::Scalar); + + SrcEvaluatorType srcEvaluator(src); + resize_if_allowed(dst, src, func); + + DstEvaluatorType dstEvaluator(dst); + Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived()); + + dense_assignment_loop::run(kernel); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void call_assignment_no_alias(Dst& dst, const Src& src) { + call_assignment_no_alias(dst, src, internal::assign_op()); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void call_assignment_no_alias_no_transpose(Dst& dst, + const Src& src, + const Func& func) { + // TODO check whether this is the right place to perform these checks: + EIGEN_STATIC_ASSERT_LVALUE(Dst) + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Dst, Src) + EIGEN_CHECK_BINARY_COMPATIBILIY(Func, typename Dst::Scalar, typename Src::Scalar); + + Assignment::run(dst, src, func); +} +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void call_assignment_no_alias_no_transpose(Dst& dst, + const Src& src) { + call_assignment_no_alias_no_transpose(dst, src, internal::assign_op()); +} + +// forward declaration +template +EIGEN_DEVICE_FUNC void check_for_aliasing(const Dst& dst, const Src& src); + +// Generic Dense to Dense assignment +// Note that the last template argument "Weak" is needed to make it possible to perform +// both partial specialization+SFINAE without ambiguous specialization +template +struct Assignment { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(DstXprType& dst, const SrcXprType& src, const Functor& func) { +#ifndef EIGEN_NO_DEBUG + internal::check_for_aliasing(dst, src); +#endif + + call_dense_assignment_loop(dst, src, func); + } +}; + +template +struct Assignment, SrcPlainObject>, + assign_op, Dense2Dense, Weak> { + using Scalar = typename DstXprType::Scalar; + using NullaryOp = scalar_constant_op; + using SrcXprType = CwiseNullaryOp; + using Functor = assign_op; + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(DstXprType& dst, const SrcXprType& src, + const Functor& /*func*/) { + eigen_fill_impl::run(dst, src); + } +}; + +template +struct Assignment, SrcPlainObject>, + assign_op, Dense2Dense, Weak> { + using Scalar = typename DstXprType::Scalar; + using NullaryOp = scalar_zero_op; + using SrcXprType = CwiseNullaryOp; + using Functor = assign_op; + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(DstXprType& dst, const SrcXprType& src, + const Functor& /*func*/) { + eigen_zero_impl::run(dst, src); + } +}; + +// Generic assignment through evalTo. +// TODO: not sure we have to keep that one, but it helps porting current code to new evaluator mechanism. +// Note that the last template argument "Weak" is needed to make it possible to perform +// both partial specialization+SFINAE without ambiguous specialization +template +struct Assignment { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run( + DstXprType& dst, const SrcXprType& src, + const internal::assign_op& /*func*/) { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if ((dst.rows() != dstRows) || (dst.cols() != dstCols)) dst.resize(dstRows, dstCols); + + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); + src.evalTo(dst); + } + + // NOTE The following two functions are templated to avoid their instantiation if not needed + // This is needed because some expressions supports evalTo only and/or have 'void' as scalar type. + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run( + DstXprType& dst, const SrcXprType& src, + const internal::add_assign_op& /*func*/) { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if ((dst.rows() != dstRows) || (dst.cols() != dstCols)) dst.resize(dstRows, dstCols); + + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); + src.addTo(dst); + } + + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run( + DstXprType& dst, const SrcXprType& src, + const internal::sub_assign_op& /*func*/) { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if ((dst.rows() != dstRows) || (dst.cols() != dstCols)) dst.resize(dstRows, dstCols); + + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); + src.subTo(dst); + } +}; + +} // namespace internal + +} // end namespace Eigen + +#endif // EIGEN_ASSIGN_EVALUATOR_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Assign_MKL.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Assign_MKL.h new file mode 100644 index 0000000000000000000000000000000000000000..ad112200e0fd640dfcee356287e0cd242a114508 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Assign_MKL.h @@ -0,0 +1,183 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + Copyright (C) 2015 Gael Guennebaud + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * MKL VML support for coefficient-wise unary Eigen expressions like a=b.sin() + ******************************************************************************** +*/ + +#ifndef EIGEN_ASSIGN_VML_H +#define EIGEN_ASSIGN_VML_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +template +class vml_assign_traits { + private: + enum { + DstHasDirectAccess = Dst::Flags & DirectAccessBit, + SrcHasDirectAccess = Src::Flags & DirectAccessBit, + StorageOrdersAgree = (int(Dst::IsRowMajor) == int(Src::IsRowMajor)), + InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime) + : int(Dst::Flags) & RowMajorBit ? int(Dst::ColsAtCompileTime) + : int(Dst::RowsAtCompileTime), + InnerMaxSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime) + : int(Dst::Flags) & RowMajorBit ? int(Dst::MaxColsAtCompileTime) + : int(Dst::MaxRowsAtCompileTime), + MaxSizeAtCompileTime = Dst::SizeAtCompileTime, + + MightEnableVml = StorageOrdersAgree && DstHasDirectAccess && SrcHasDirectAccess && + Src::InnerStrideAtCompileTime == 1 && Dst::InnerStrideAtCompileTime == 1, + MightLinearize = MightEnableVml && (int(Dst::Flags) & int(Src::Flags) & LinearAccessBit), + VmlSize = MightLinearize ? MaxSizeAtCompileTime : InnerMaxSize, + LargeEnough = VmlSize == Dynamic || VmlSize >= EIGEN_MKL_VML_THRESHOLD + }; + + public: + enum { EnableVml = MightEnableVml && LargeEnough, Traversal = MightLinearize ? LinearTraversal : DefaultTraversal }; +}; + +#define EIGEN_PP_EXPAND(ARG) ARG +#if !defined(EIGEN_FAST_MATH) || (EIGEN_FAST_MATH != 1) +#define EIGEN_VMLMODE_EXPAND_xLA , VML_HA +#else +#define EIGEN_VMLMODE_EXPAND_xLA , VML_LA +#endif + +#define EIGEN_VMLMODE_EXPAND_x_ + +#define EIGEN_VMLMODE_PREFIX_xLA vm +#define EIGEN_VMLMODE_PREFIX_x_ v +#define EIGEN_VMLMODE_PREFIX(VMLMODE) EIGEN_CAT(EIGEN_VMLMODE_PREFIX_x, VMLMODE) + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE, VMLMODE) \ + template \ + struct Assignment, SrcXprNested>, \ + assign_op, Dense2Dense, \ + std::enable_if_t::EnableVml>> { \ + typedef CwiseUnaryOp, SrcXprNested> SrcXprType; \ + static void run(DstXprType &dst, const SrcXprType &src, const assign_op &func) { \ + resize_if_allowed(dst, src, func); \ + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \ + if (vml_assign_traits::Traversal == (int)LinearTraversal) { \ + VMLOP(dst.size(), (const VMLTYPE *)src.nestedExpression().data(), \ + (VMLTYPE *)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \ + } else { \ + const Index outerSize = dst.outerSize(); \ + for (Index outer = 0; outer < outerSize; ++outer) { \ + const EIGENTYPE *src_ptr = src.IsRowMajor ? &(src.nestedExpression().coeffRef(outer, 0)) \ + : &(src.nestedExpression().coeffRef(0, outer)); \ + EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer, 0)) : &(dst.coeffRef(0, outer)); \ + VMLOP(dst.innerSize(), (const VMLTYPE *)src_ptr, \ + (VMLTYPE *)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \ + } \ + } \ + } \ + }; + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP, VMLMODE) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE), s##VMLOP), float, float, VMLMODE) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE), d##VMLOP), double, double, VMLMODE) + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(EIGENOP, VMLOP, VMLMODE) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE), c##VMLOP), scomplex, \ + MKL_Complex8, VMLMODE) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE), z##VMLOP), dcomplex, \ + MKL_Complex16, VMLMODE) + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS(EIGENOP, VMLOP, VMLMODE) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP, VMLMODE) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(EIGENOP, VMLOP, VMLMODE) + +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(sin, Sin, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(asin, Asin, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(sinh, Sinh, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(cos, Cos, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(acos, Acos, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(cosh, Cosh, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(tan, Tan, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(atan, Atan, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(tanh, Tanh, LA) +// EIGEN_MKL_VML_DECLARE_UNARY_CALLS(abs, Abs, _) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(exp, Exp, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(log, Ln, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(log10, Log10, LA) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS(sqrt, Sqrt, _) + +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(square, Sqr, _) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(arg, Arg, _) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(round, Round, _) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(floor, Floor, _) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(ceil, Ceil, _) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(cbrt, Cbrt, _) + +#define EIGEN_MKL_VML_DECLARE_POW_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE, VMLMODE) \ + template \ + struct Assignment, SrcXprNested, \ + const CwiseNullaryOp, Plain>>, \ + assign_op, Dense2Dense, \ + std::enable_if_t::EnableVml>> { \ + typedef CwiseBinaryOp, SrcXprNested, \ + const CwiseNullaryOp, Plain>> \ + SrcXprType; \ + static void run(DstXprType &dst, const SrcXprType &src, const assign_op &func) { \ + resize_if_allowed(dst, src, func); \ + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \ + VMLTYPE exponent = reinterpret_cast(src.rhs().functor().m_other); \ + if (vml_assign_traits::Traversal == LinearTraversal) { \ + VMLOP(dst.size(), (const VMLTYPE *)src.lhs().data(), exponent, \ + (VMLTYPE *)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \ + } else { \ + const Index outerSize = dst.outerSize(); \ + for (Index outer = 0; outer < outerSize; ++outer) { \ + const EIGENTYPE *src_ptr = \ + src.IsRowMajor ? &(src.lhs().coeffRef(outer, 0)) : &(src.lhs().coeffRef(0, outer)); \ + EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer, 0)) : &(dst.coeffRef(0, outer)); \ + VMLOP(dst.innerSize(), (const VMLTYPE *)src_ptr, exponent, \ + (VMLTYPE *)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \ + } \ + } \ + } \ + }; + +EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmsPowx, float, float, LA) +EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmdPowx, double, double, LA) +EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmcPowx, scomplex, MKL_Complex8, LA) +EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmzPowx, dcomplex, MKL_Complex16, LA) + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_ASSIGN_VML_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/BandMatrix.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/BandMatrix.h new file mode 100644 index 0000000000000000000000000000000000000000..ca991cad26ad2e7d1e3b5d8da54c27a1c7e899cd --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/BandMatrix.h @@ -0,0 +1,338 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BANDMATRIX_H +#define EIGEN_BANDMATRIX_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +template +class BandMatrixBase : public EigenBase { + public: + enum { + Flags = internal::traits::Flags, + CoeffReadCost = internal::traits::CoeffReadCost, + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime, + Supers = internal::traits::Supers, + Subs = internal::traits::Subs, + Options = internal::traits::Options + }; + typedef typename internal::traits::Scalar Scalar; + typedef Matrix DenseMatrixType; + typedef typename DenseMatrixType::StorageIndex StorageIndex; + typedef typename internal::traits::CoefficientsType CoefficientsType; + typedef EigenBase Base; + + protected: + enum { + DataRowsAtCompileTime = ((Supers != Dynamic) && (Subs != Dynamic)) ? 1 + Supers + Subs : Dynamic, + SizeAtCompileTime = min_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime) + }; + + public: + using Base::cols; + using Base::derived; + using Base::rows; + + /** \returns the number of super diagonals */ + inline Index supers() const { return derived().supers(); } + + /** \returns the number of sub diagonals */ + inline Index subs() const { return derived().subs(); } + + /** \returns an expression of the underlying coefficient matrix */ + inline const CoefficientsType& coeffs() const { return derived().coeffs(); } + + /** \returns an expression of the underlying coefficient matrix */ + inline CoefficientsType& coeffs() { return derived().coeffs(); } + + /** \returns a vector expression of the \a i -th column, + * only the meaningful part is returned. + * \warning the internal storage must be column major. */ + inline Block col(Index i) { + EIGEN_STATIC_ASSERT((int(Options) & int(RowMajor)) == 0, THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + Index start = 0; + Index len = coeffs().rows(); + if (i <= supers()) { + start = supers() - i; + len = (std::min)(rows(), std::max(0, coeffs().rows() - (supers() - i))); + } else if (i >= rows() - subs()) + len = std::max(0, coeffs().rows() - (i + 1 - rows() + subs())); + return Block(coeffs(), start, i, len, 1); + } + + /** \returns a vector expression of the main diagonal */ + inline Block diagonal() { + return Block(coeffs(), supers(), 0, 1, (std::min)(rows(), cols())); + } + + /** \returns a vector expression of the main diagonal (const version) */ + inline const Block diagonal() const { + return Block(coeffs(), supers(), 0, 1, (std::min)(rows(), cols())); + } + + template + struct DiagonalIntReturnType { + enum { + ReturnOpposite = + (int(Options) & int(SelfAdjoint)) && (((Index) > 0 && Supers == 0) || ((Index) < 0 && Subs == 0)), + Conjugate = ReturnOpposite && NumTraits::IsComplex, + ActualIndex = ReturnOpposite ? -Index : Index, + DiagonalSize = + (RowsAtCompileTime == Dynamic || ColsAtCompileTime == Dynamic) + ? Dynamic + : (ActualIndex < 0 ? min_size_prefer_dynamic(ColsAtCompileTime, RowsAtCompileTime + ActualIndex) + : min_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime - ActualIndex)) + }; + typedef Block BuildType; + typedef std::conditional_t, BuildType>, BuildType> + Type; + }; + + /** \returns a vector expression of the \a N -th sub or super diagonal */ + template + inline typename DiagonalIntReturnType::Type diagonal() { + return typename DiagonalIntReturnType::BuildType(coeffs(), supers() - N, (std::max)(0, N), 1, diagonalLength(N)); + } + + /** \returns a vector expression of the \a N -th sub or super diagonal */ + template + inline const typename DiagonalIntReturnType::Type diagonal() const { + return typename DiagonalIntReturnType::BuildType(coeffs(), supers() - N, (std::max)(0, N), 1, diagonalLength(N)); + } + + /** \returns a vector expression of the \a i -th sub or super diagonal */ + inline Block diagonal(Index i) { + eigen_assert((i < 0 && -i <= subs()) || (i >= 0 && i <= supers())); + return Block(coeffs(), supers() - i, std::max(0, i), 1, diagonalLength(i)); + } + + /** \returns a vector expression of the \a i -th sub or super diagonal */ + inline const Block diagonal(Index i) const { + eigen_assert((i < 0 && -i <= subs()) || (i >= 0 && i <= supers())); + return Block(coeffs(), supers() - i, std::max(0, i), 1, + diagonalLength(i)); + } + + template + inline void evalTo(Dest& dst) const { + dst.resize(rows(), cols()); + dst.setZero(); + dst.diagonal() = diagonal(); + for (Index i = 1; i <= supers(); ++i) dst.diagonal(i) = diagonal(i); + for (Index i = 1; i <= subs(); ++i) dst.diagonal(-i) = diagonal(-i); + } + + DenseMatrixType toDenseMatrix() const { + DenseMatrixType res(rows(), cols()); + evalTo(res); + return res; + } + + protected: + inline Index diagonalLength(Index i) const { + return i < 0 ? (std::min)(cols(), rows() + i) : (std::min)(rows(), cols() - i); + } +}; + +/** + * \class BandMatrix + * \ingroup Core_Module + * + * \brief Represents a rectangular matrix with a banded storage + * + * \tparam Scalar_ Numeric type, i.e. float, double, int + * \tparam Rows_ Number of rows, or \b Dynamic + * \tparam Cols_ Number of columns, or \b Dynamic + * \tparam Supers_ Number of super diagonal + * \tparam Subs_ Number of sub diagonal + * \tparam Options_ A combination of either \b #RowMajor or \b #ColMajor, and of \b #SelfAdjoint + * The former controls \ref TopicStorageOrders "storage order", and defaults to + * column-major. The latter controls whether the matrix represents a selfadjoint + * matrix in which case either Supers of Subs have to be null. + * + * \sa class TridiagonalMatrix + */ + +template +struct traits > { + typedef Scalar_ Scalar; + typedef Dense StorageKind; + typedef Eigen::Index StorageIndex; + enum { + CoeffReadCost = NumTraits::ReadCost, + RowsAtCompileTime = Rows_, + ColsAtCompileTime = Cols_, + MaxRowsAtCompileTime = Rows_, + MaxColsAtCompileTime = Cols_, + Flags = LvalueBit, + Supers = Supers_, + Subs = Subs_, + Options = Options_, + DataRowsAtCompileTime = ((Supers != Dynamic) && (Subs != Dynamic)) ? 1 + Supers + Subs : Dynamic + }; + typedef Matrix + CoefficientsType; +}; + +template +class BandMatrix : public BandMatrixBase > { + public: + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::StorageIndex StorageIndex; + typedef typename internal::traits::CoefficientsType CoefficientsType; + + explicit inline BandMatrix(Index rows = Rows, Index cols = Cols, Index supers = Supers, Index subs = Subs) + : m_coeffs(1 + supers + subs, cols), m_rows(rows), m_supers(supers), m_subs(subs) {} + + /** \returns the number of columns */ + inline EIGEN_CONSTEXPR Index rows() const { return m_rows.value(); } + + /** \returns the number of rows */ + inline EIGEN_CONSTEXPR Index cols() const { return m_coeffs.cols(); } + + /** \returns the number of super diagonals */ + inline EIGEN_CONSTEXPR Index supers() const { return m_supers.value(); } + + /** \returns the number of sub diagonals */ + inline EIGEN_CONSTEXPR Index subs() const { return m_subs.value(); } + + inline const CoefficientsType& coeffs() const { return m_coeffs; } + inline CoefficientsType& coeffs() { return m_coeffs; } + + protected: + CoefficientsType m_coeffs; + internal::variable_if_dynamic m_rows; + internal::variable_if_dynamic m_supers; + internal::variable_if_dynamic m_subs; +}; + +template +class BandMatrixWrapper; + +template +struct traits > { + typedef typename CoefficientsType_::Scalar Scalar; + typedef typename CoefficientsType_::StorageKind StorageKind; + typedef typename CoefficientsType_::StorageIndex StorageIndex; + enum { + CoeffReadCost = internal::traits::CoeffReadCost, + RowsAtCompileTime = Rows_, + ColsAtCompileTime = Cols_, + MaxRowsAtCompileTime = Rows_, + MaxColsAtCompileTime = Cols_, + Flags = LvalueBit, + Supers = Supers_, + Subs = Subs_, + Options = Options_, + DataRowsAtCompileTime = ((Supers != Dynamic) && (Subs != Dynamic)) ? 1 + Supers + Subs : Dynamic + }; + typedef CoefficientsType_ CoefficientsType; +}; + +template +class BandMatrixWrapper + : public BandMatrixBase > { + public: + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::CoefficientsType CoefficientsType; + typedef typename internal::traits::StorageIndex StorageIndex; + + explicit inline BandMatrixWrapper(const CoefficientsType& coeffs, Index rows = Rows_, Index cols = Cols_, + Index supers = Supers_, Index subs = Subs_) + : m_coeffs(coeffs), m_rows(rows), m_supers(supers), m_subs(subs) { + EIGEN_UNUSED_VARIABLE(cols); + // eigen_assert(coeffs.cols()==cols() && (supers()+subs()+1)==coeffs.rows()); + } + + /** \returns the number of columns */ + inline EIGEN_CONSTEXPR Index rows() const { return m_rows.value(); } + + /** \returns the number of rows */ + inline EIGEN_CONSTEXPR Index cols() const { return m_coeffs.cols(); } + + /** \returns the number of super diagonals */ + inline EIGEN_CONSTEXPR Index supers() const { return m_supers.value(); } + + /** \returns the number of sub diagonals */ + inline EIGEN_CONSTEXPR Index subs() const { return m_subs.value(); } + + inline const CoefficientsType& coeffs() const { return m_coeffs; } + + protected: + const CoefficientsType& m_coeffs; + internal::variable_if_dynamic m_rows; + internal::variable_if_dynamic m_supers; + internal::variable_if_dynamic m_subs; +}; + +/** + * \class TridiagonalMatrix + * \ingroup Core_Module + * + * \brief Represents a tridiagonal matrix with a compact banded storage + * + * \tparam Scalar Numeric type, i.e. float, double, int + * \tparam Size Number of rows and cols, or \b Dynamic + * \tparam Options Can be 0 or \b SelfAdjoint + * + * \sa class BandMatrix + */ +template +class TridiagonalMatrix : public BandMatrix { + typedef BandMatrix Base; + typedef typename Base::StorageIndex StorageIndex; + + public: + explicit TridiagonalMatrix(Index size = Size) : Base(size, size, Options & SelfAdjoint ? 0 : 1, 1) {} + + inline typename Base::template DiagonalIntReturnType<1>::Type super() { return Base::template diagonal<1>(); } + inline const typename Base::template DiagonalIntReturnType<1>::Type super() const { + return Base::template diagonal<1>(); + } + inline typename Base::template DiagonalIntReturnType<-1>::Type sub() { return Base::template diagonal<-1>(); } + inline const typename Base::template DiagonalIntReturnType<-1>::Type sub() const { + return Base::template diagonal<-1>(); + } + + protected: +}; + +struct BandShape {}; + +template +struct evaluator_traits > + : public evaluator_traits_base > { + typedef BandShape Shape; +}; + +template +struct evaluator_traits > + : public evaluator_traits_base > { + typedef BandShape Shape; +}; + +template <> +struct AssignmentKind { + typedef EigenBase2EigenBase Kind; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_BANDMATRIX_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Block.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Block.h new file mode 100644 index 0000000000000000000000000000000000000000..709264c69990545d58ea6bb7912bc8426eeb57c3 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Block.h @@ -0,0 +1,439 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BLOCK_H +#define EIGEN_BLOCK_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { +template +struct traits> : traits { + typedef typename traits::Scalar Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; + typedef typename ref_selector::type XprTypeNested; + typedef std::remove_reference_t XprTypeNested_; + enum { + MatrixRows = traits::RowsAtCompileTime, + MatrixCols = traits::ColsAtCompileTime, + RowsAtCompileTime = MatrixRows == 0 ? 0 : BlockRows, + ColsAtCompileTime = MatrixCols == 0 ? 0 : BlockCols, + MaxRowsAtCompileTime = BlockRows == 0 ? 0 + : RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime) + : int(traits::MaxRowsAtCompileTime), + MaxColsAtCompileTime = BlockCols == 0 ? 0 + : ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime) + : int(traits::MaxColsAtCompileTime), + + XprTypeIsRowMajor = (int(traits::Flags) & RowMajorBit) != 0, + IsRowMajor = (MaxRowsAtCompileTime == 1 && MaxColsAtCompileTime != 1) ? 1 + : (MaxColsAtCompileTime == 1 && MaxRowsAtCompileTime != 1) ? 0 + : XprTypeIsRowMajor, + HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor), + InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), + InnerStrideAtCompileTime = HasSameStorageOrderAsXprType ? int(inner_stride_at_compile_time::ret) + : int(outer_stride_at_compile_time::ret), + OuterStrideAtCompileTime = HasSameStorageOrderAsXprType ? int(outer_stride_at_compile_time::ret) + : int(inner_stride_at_compile_time::ret), + + // FIXME, this traits is rather specialized for dense object and it needs to be cleaned further + FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, + FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, + Flags = (traits::Flags & (DirectAccessBit | (InnerPanel_ ? CompressedAccessBit : 0))) | FlagsLvalueBit | + FlagsRowMajorBit, + // FIXME DirectAccessBit should not be handled by expressions + // + // Alignment is needed by MapBase's assertions + // We can sefely set it to false here. Internal alignment errors will be detected by an eigen_internal_assert in the + // respective evaluator + Alignment = 0, + InnerPanel = InnerPanel_ ? 1 : 0 + }; +}; + +template ::ret> +class BlockImpl_dense; + +} // end namespace internal + +template +class BlockImpl; + +/** \class Block + * \ingroup Core_Module + * + * \brief Expression of a fixed-size or dynamic-size block + * + * \tparam XprType the type of the expression in which we are taking a block + * \tparam BlockRows the number of rows of the block we are taking at compile time (optional) + * \tparam BlockCols the number of columns of the block we are taking at compile time (optional) + * \tparam InnerPanel is true, if the block maps to a set of rows of a row major matrix or + * to set of columns of a column major matrix (optional). The parameter allows to determine + * at compile time whether aligned access is possible on the block expression. + * + * This class represents an expression of either a fixed-size or dynamic-size block. It is the return + * type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block(Index,Index) and + * most of the time this is the only way it is used. + * + * However, if you want to directly manipulate block expressions, + * for instance if you want to write a function returning such an expression, you + * will need to use this class. + * + * Here is an example illustrating the dynamic case: + * \include class_Block.cpp + * Output: \verbinclude class_Block.out + * + * \note Even though this expression has dynamic size, in the case where \a XprType + * has fixed size, this expression inherits a fixed maximal size which means that evaluating + * it does not cause a dynamic memory allocation. + * + * Here is an example illustrating the fixed-size case: + * \include class_FixedBlock.cpp + * Output: \verbinclude class_FixedBlock.out + * + * \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock + */ +template +class Block + : public BlockImpl::StorageKind> { + typedef BlockImpl::StorageKind> Impl; + using BlockHelper = internal::block_xpr_helper; + + public: + // typedef typename Impl::Base Base; + typedef Impl Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(Block) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block) + + typedef internal::remove_all_t NestedExpression; + + /** Column or Row constructor + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Block(XprType& xpr, Index i) : Impl(xpr, i) { + eigen_assert((i >= 0) && (((BlockRows == 1) && (BlockCols == XprType::ColsAtCompileTime) && i < xpr.rows()) || + ((BlockRows == XprType::RowsAtCompileTime) && (BlockCols == 1) && i < xpr.cols()))); + } + + /** Fixed-size constructor + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Block(XprType& xpr, Index startRow, Index startCol) + : Impl(xpr, startRow, startCol) { + EIGEN_STATIC_ASSERT(RowsAtCompileTime != Dynamic && ColsAtCompileTime != Dynamic, + THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE) + eigen_assert(startRow >= 0 && BlockRows >= 0 && startRow + BlockRows <= xpr.rows() && startCol >= 0 && + BlockCols >= 0 && startCol + BlockCols <= xpr.cols()); + } + + /** Dynamic-size constructor + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Block(XprType& xpr, Index startRow, Index startCol, Index blockRows, + Index blockCols) + : Impl(xpr, startRow, startCol, blockRows, blockCols) { + eigen_assert((RowsAtCompileTime == Dynamic || RowsAtCompileTime == blockRows) && + (ColsAtCompileTime == Dynamic || ColsAtCompileTime == blockCols)); + eigen_assert(startRow >= 0 && blockRows >= 0 && startRow <= xpr.rows() - blockRows && startCol >= 0 && + blockCols >= 0 && startCol <= xpr.cols() - blockCols); + } + + // convert nested blocks (e.g. Block>) to a simple block expression (Block) + + using ConstUnwindReturnType = Block; + using UnwindReturnType = Block; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ConstUnwindReturnType unwind() const { + return ConstUnwindReturnType(BlockHelper::base(*this), BlockHelper::row(*this, 0), BlockHelper::col(*this, 0), + this->rows(), this->cols()); + } + + template ::value>> + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE UnwindReturnType unwind() { + return UnwindReturnType(BlockHelper::base(*this), BlockHelper::row(*this, 0), BlockHelper::col(*this, 0), + this->rows(), this->cols()); + } +}; + +// The generic default implementation for dense block simply forward to the internal::BlockImpl_dense +// that must be specialized for direct and non-direct access... +template +class BlockImpl + : public internal::BlockImpl_dense { + typedef internal::BlockImpl_dense Impl; + typedef typename XprType::StorageIndex StorageIndex; + + public: + typedef Impl Base; + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index i) : Impl(xpr, i) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index startRow, Index startCol) + : Impl(xpr, startRow, startCol) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, + Index blockCols) + : Impl(xpr, startRow, startCol, blockRows, blockCols) {} +}; + +namespace internal { + +/** \internal Internal implementation of dense Blocks in the general case. */ +template +class BlockImpl_dense : public internal::dense_xpr_base>::type { + typedef Block BlockType; + typedef typename internal::ref_selector::non_const_type XprTypeNested; + + public: + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(BlockType) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense) + + // class InnerIterator; // FIXME apparently never used + + /** Column or Row constructor + */ + EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index i) + : m_xpr(xpr), + // It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime, + // and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1, + // all other cases are invalid. + // The case a 1x1 matrix seems ambiguous, but the result is the same anyway. + m_startRow((BlockRows == 1) && (BlockCols == XprType::ColsAtCompileTime) ? i : 0), + m_startCol((BlockRows == XprType::RowsAtCompileTime) && (BlockCols == 1) ? i : 0), + m_blockRows(BlockRows == 1 ? 1 : xpr.rows()), + m_blockCols(BlockCols == 1 ? 1 : xpr.cols()) {} + + /** Fixed-size constructor + */ + EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol) + : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(BlockRows), m_blockCols(BlockCols) {} + + /** Dynamic-size constructor + */ + EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol, Index blockRows, + Index blockCols) + : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols) {} + + EIGEN_DEVICE_FUNC inline Index rows() const { return m_blockRows.value(); } + EIGEN_DEVICE_FUNC inline Index cols() const { return m_blockCols.value(); } + + EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index rowId, Index colId) { + EIGEN_STATIC_ASSERT_LVALUE(XprType) + return m_xpr.coeffRef(rowId + m_startRow.value(), colId + m_startCol.value()); + } + + EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { + return m_xpr.derived().coeffRef(rowId + m_startRow.value(), colId + m_startCol.value()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const { + return m_xpr.coeff(rowId + m_startRow.value(), colId + m_startCol.value()); + } + + EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index) { + EIGEN_STATIC_ASSERT_LVALUE(XprType) + return m_xpr.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { + return m_xpr.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const { + return m_xpr.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + template + EIGEN_DEVICE_FUNC inline PacketScalar packet(Index rowId, Index colId) const { + return m_xpr.template packet(rowId + m_startRow.value(), colId + m_startCol.value()); + } + + template + EIGEN_DEVICE_FUNC inline void writePacket(Index rowId, Index colId, const PacketScalar& val) { + m_xpr.template writePacket(rowId + m_startRow.value(), colId + m_startCol.value(), val); + } + + template + EIGEN_DEVICE_FUNC inline PacketScalar packet(Index index) const { + return m_xpr.template packet(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + template + EIGEN_DEVICE_FUNC inline void writePacket(Index index, const PacketScalar& val) { + m_xpr.template writePacket(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), val); + } + +#ifdef EIGEN_PARSED_BY_DOXYGEN + /** \sa MapBase::data() */ + EIGEN_DEVICE_FUNC constexpr const Scalar* data() const; + EIGEN_DEVICE_FUNC inline Index innerStride() const; + EIGEN_DEVICE_FUNC inline Index outerStride() const; +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const internal::remove_all_t& nestedExpression() const { + return m_xpr; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE XprType& nestedExpression() { return m_xpr; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR StorageIndex startRow() const EIGEN_NOEXCEPT { + return m_startRow.value(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR StorageIndex startCol() const EIGEN_NOEXCEPT { + return m_startCol.value(); + } + + protected: + XprTypeNested m_xpr; + const internal::variable_if_dynamic + m_startRow; + const internal::variable_if_dynamic + m_startCol; + const internal::variable_if_dynamic m_blockRows; + const internal::variable_if_dynamic m_blockCols; +}; + +/** \internal Internal implementation of dense Blocks in the direct access case.*/ +template +class BlockImpl_dense + : public MapBase> { + typedef Block BlockType; + typedef typename internal::ref_selector::non_const_type XprTypeNested; + enum { XprTypeIsRowMajor = (int(traits::Flags) & RowMajorBit) != 0 }; + + /** \internal Returns base+offset (unless base is null, in which case returns null). + * Adding an offset to nullptr is undefined behavior, so we must avoid it. + */ + template + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE static Scalar* add_to_nullable_pointer(Scalar* base, + Index offset) { + return base != nullptr ? base + offset : nullptr; + } + + public: + typedef MapBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(BlockType) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense) + + /** Column or Row constructor + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl_dense(XprType& xpr, Index i) + : Base((BlockRows == 0 || BlockCols == 0) + ? nullptr + : add_to_nullable_pointer( + xpr.data(), + i * (((BlockRows == 1) && (BlockCols == XprType::ColsAtCompileTime) && (!XprTypeIsRowMajor)) || + ((BlockRows == XprType::RowsAtCompileTime) && (BlockCols == 1) && + (XprTypeIsRowMajor)) + ? xpr.innerStride() + : xpr.outerStride())), + BlockRows == 1 ? 1 : xpr.rows(), BlockCols == 1 ? 1 : xpr.cols()), + m_xpr(xpr), + m_startRow((BlockRows == 1) && (BlockCols == XprType::ColsAtCompileTime) ? i : 0), + m_startCol((BlockRows == XprType::RowsAtCompileTime) && (BlockCols == 1) ? i : 0) { + init(); + } + + /** Fixed-size constructor + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl_dense(XprType& xpr, Index startRow, Index startCol) + : Base((BlockRows == 0 || BlockCols == 0) + ? nullptr + : add_to_nullable_pointer(xpr.data(), + xpr.innerStride() * (XprTypeIsRowMajor ? startCol : startRow) + + xpr.outerStride() * (XprTypeIsRowMajor ? startRow : startCol))), + m_xpr(xpr), + m_startRow(startRow), + m_startCol(startCol) { + init(); + } + + /** Dynamic-size constructor + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl_dense(XprType& xpr, Index startRow, Index startCol, Index blockRows, + Index blockCols) + : Base((blockRows == 0 || blockCols == 0) + ? nullptr + : add_to_nullable_pointer(xpr.data(), + xpr.innerStride() * (XprTypeIsRowMajor ? startCol : startRow) + + xpr.outerStride() * (XprTypeIsRowMajor ? startRow : startCol)), + blockRows, blockCols), + m_xpr(xpr), + m_startRow(startRow), + m_startCol(startCol) { + init(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const internal::remove_all_t& nestedExpression() const + EIGEN_NOEXCEPT { + return m_xpr; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE XprType& nestedExpression() { return m_xpr; } + + /** \sa MapBase::innerStride() */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index innerStride() const EIGEN_NOEXCEPT { + return internal::traits::HasSameStorageOrderAsXprType ? m_xpr.innerStride() : m_xpr.outerStride(); + } + + /** \sa MapBase::outerStride() */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index outerStride() const EIGEN_NOEXCEPT { + return internal::traits::HasSameStorageOrderAsXprType ? m_xpr.outerStride() : m_xpr.innerStride(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR StorageIndex startRow() const EIGEN_NOEXCEPT { + return m_startRow.value(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR StorageIndex startCol() const EIGEN_NOEXCEPT { + return m_startCol.value(); + } + +#ifndef __SUNPRO_CC + // FIXME sunstudio is not friendly with the above friend... + // META-FIXME there is no 'friend' keyword around here. Is this obsolete? + protected: +#endif + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal used by allowAligned() */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl_dense(XprType& xpr, const Scalar* data, Index blockRows, + Index blockCols) + : Base(data, blockRows, blockCols), m_xpr(xpr) { + init(); + } +#endif + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void init() { + m_outerStride = + internal::traits::HasSameStorageOrderAsXprType ? m_xpr.outerStride() : m_xpr.innerStride(); + } + + XprTypeNested m_xpr; + const internal::variable_if_dynamic + m_startRow; + const internal::variable_if_dynamic + m_startCol; + Index m_outerStride; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_BLOCK_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CommaInitializer.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CommaInitializer.h new file mode 100644 index 0000000000000000000000000000000000000000..c6291234b1bd48ae75d8164c5e696560e2b0d384 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CommaInitializer.h @@ -0,0 +1,149 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMMAINITIALIZER_H +#define EIGEN_COMMAINITIALIZER_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +/** \class CommaInitializer + * \ingroup Core_Module + * + * \brief Helper class used by the comma initializer operator + * + * This class is internally used to implement the comma initializer feature. It is + * the return type of MatrixBase::operator<<, and most of the time this is the only + * way it is used. + * + * \sa \blank \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished() + */ +template +struct CommaInitializer { + typedef typename XprType::Scalar Scalar; + + EIGEN_DEVICE_FUNC inline CommaInitializer(XprType& xpr, const Scalar& s) + : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1) { + eigen_assert(m_xpr.rows() > 0 && m_xpr.cols() > 0 && "Cannot comma-initialize a 0x0 matrix (operator<<)"); + m_xpr.coeffRef(0, 0) = s; + } + + template + EIGEN_DEVICE_FUNC inline CommaInitializer(XprType& xpr, const DenseBase& other) + : m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows()) { + eigen_assert(m_xpr.rows() >= other.rows() && m_xpr.cols() >= other.cols() && + "Cannot comma-initialize a 0x0 matrix (operator<<)"); + m_xpr.template block(0, 0, other.rows(), + other.cols()) = other; + } + + /* Copy/Move constructor which transfers ownership. This is crucial in + * absence of return value optimization to avoid assertions during destruction. */ + // FIXME in C++11 mode this could be replaced by a proper RValue constructor + EIGEN_DEVICE_FUNC inline CommaInitializer(const CommaInitializer& o) + : m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) { + // Mark original object as finished. In absence of R-value references we need to const_cast: + const_cast(o).m_row = m_xpr.rows(); + const_cast(o).m_col = m_xpr.cols(); + const_cast(o).m_currentBlockRows = 0; + } + + /* inserts a scalar value in the target matrix */ + EIGEN_DEVICE_FUNC CommaInitializer &operator,(const Scalar& s) { + if (m_col == m_xpr.cols()) { + m_row += m_currentBlockRows; + m_col = 0; + m_currentBlockRows = 1; + eigen_assert(m_row < m_xpr.rows() && "Too many rows passed to comma initializer (operator<<)"); + } + eigen_assert(m_col < m_xpr.cols() && "Too many coefficients passed to comma initializer (operator<<)"); + eigen_assert(m_currentBlockRows == 1); + m_xpr.coeffRef(m_row, m_col++) = s; + return *this; + } + + /* inserts a matrix expression in the target matrix */ + template + EIGEN_DEVICE_FUNC CommaInitializer &operator,(const DenseBase& other) { + if (m_col == m_xpr.cols() && (other.cols() != 0 || other.rows() != m_currentBlockRows)) { + m_row += m_currentBlockRows; + m_col = 0; + m_currentBlockRows = other.rows(); + eigen_assert(m_row + m_currentBlockRows <= m_xpr.rows() && + "Too many rows passed to comma initializer (operator<<)"); + } + eigen_assert((m_col + other.cols() <= m_xpr.cols()) && + "Too many coefficients passed to comma initializer (operator<<)"); + eigen_assert(m_currentBlockRows == other.rows()); + m_xpr.template block(m_row, m_col, other.rows(), + other.cols()) = other; + m_col += other.cols(); + return *this; + } + + EIGEN_DEVICE_FUNC inline ~CommaInitializer() +#if defined VERIFY_RAISES_ASSERT && (!defined EIGEN_NO_ASSERTION_CHECKING) && defined EIGEN_EXCEPTIONS + EIGEN_EXCEPTION_SPEC(Eigen::eigen_assert_exception) +#endif + { + finished(); + } + + /** \returns the built matrix once all its coefficients have been set. + * Calling finished is 100% optional. Its purpose is to write expressions + * like this: + * \code + * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished()); + * \endcode + */ + EIGEN_DEVICE_FUNC inline XprType& finished() { + eigen_assert(((m_row + m_currentBlockRows) == m_xpr.rows() || m_xpr.cols() == 0) && m_col == m_xpr.cols() && + "Too few coefficients passed to comma initializer (operator<<)"); + return m_xpr; + } + + XprType& m_xpr; // target expression + Index m_row; // current row id + Index m_col; // current col id + Index m_currentBlockRows; // current block height +}; + +/** \anchor MatrixBaseCommaInitRef + * Convenient operator to set the coefficients of a matrix. + * + * The coefficients must be provided in a row major order and exactly match + * the size of the matrix. Otherwise an assertion is raised. + * + * Example: \include MatrixBase_set.cpp + * Output: \verbinclude MatrixBase_set.out + * + * \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary + * order. + * + * \sa CommaInitializer::finished(), class CommaInitializer + */ +template +EIGEN_DEVICE_FUNC inline CommaInitializer DenseBase::operator<<(const Scalar& s) { + return CommaInitializer(*static_cast(this), s); +} + +/** \sa operator<<(const Scalar&) */ +template +template +EIGEN_DEVICE_FUNC inline CommaInitializer DenseBase::operator<<( + const DenseBase& other) { + return CommaInitializer(*static_cast(this), other); +} + +} // end namespace Eigen + +#endif // EIGEN_COMMAINITIALIZER_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ConditionEstimator.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ConditionEstimator.h new file mode 100644 index 0000000000000000000000000000000000000000..dd1770b1abcab1e0b2f59f692da2da97f6e71089 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ConditionEstimator.h @@ -0,0 +1,173 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Rasmus Munk Larsen (rmlarsen@google.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CONDITIONESTIMATOR_H +#define EIGEN_CONDITIONESTIMATOR_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +template +struct rcond_compute_sign { + static inline Vector run(const Vector& v) { + const RealVector v_abs = v.cwiseAbs(); + return (v_abs.array() == static_cast(0)) + .select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs)); + } +}; + +// Partial specialization to avoid elementwise division for real vectors. +template +struct rcond_compute_sign { + static inline Vector run(const Vector& v) { + return (v.array() < static_cast(0)) + .select(-Vector::Ones(v.size()), Vector::Ones(v.size())); + } +}; + +/** + * \returns an estimate of ||inv(matrix)||_1 given a decomposition of + * \a matrix that implements .solve() and .adjoint().solve() methods. + * + * This function implements Algorithms 4.1 and 5.1 from + * http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf + * which also forms the basis for the condition number estimators in + * LAPACK. Since at most 10 calls to the solve method of dec are + * performed, the total cost is O(dims^2), as opposed to O(dims^3) + * needed to compute the inverse matrix explicitly. + * + * The most common usage is in estimating the condition number + * ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be + * computed directly in O(n^2) operations. + * + * Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and + * LLT. + * + * \sa FullPivLU, PartialPivLU, LDLT, LLT. + */ +template +typename Decomposition::RealScalar rcond_invmatrix_L1_norm_estimate(const Decomposition& dec) { + typedef typename Decomposition::MatrixType MatrixType; + typedef typename Decomposition::Scalar Scalar; + typedef typename Decomposition::RealScalar RealScalar; + typedef typename internal::plain_col_type::type Vector; + typedef typename internal::plain_col_type::type RealVector; + const bool is_complex = (NumTraits::IsComplex != 0); + + eigen_assert(dec.rows() == dec.cols()); + const Index n = dec.rows(); + if (n == 0) return 0; + + // Disable Index to float conversion warning +#ifdef __INTEL_COMPILER +#pragma warning push +#pragma warning(disable : 2259) +#endif + Vector v = dec.solve(Vector::Ones(n) / Scalar(n)); +#ifdef __INTEL_COMPILER +#pragma warning pop +#endif + + // lower_bound is a lower bound on + // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1 + // and is the objective maximized by the ("super-") gradient ascent + // algorithm below. + RealScalar lower_bound = v.template lpNorm<1>(); + if (n == 1) return lower_bound; + + // Gradient ascent algorithm follows: We know that the optimum is achieved at + // one of the simplices v = e_i, so in each iteration we follow a + // super-gradient to move towards the optimal one. + RealScalar old_lower_bound = lower_bound; + Vector sign_vector(n); + Vector old_sign_vector; + Index v_max_abs_index = -1; + Index old_v_max_abs_index = v_max_abs_index; + for (int k = 0; k < 4; ++k) { + sign_vector = internal::rcond_compute_sign::run(v); + if (k > 0 && !is_complex && sign_vector == old_sign_vector) { + // Break if the solution stagnated. + break; + } + // v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )| + v = dec.adjoint().solve(sign_vector); + v.real().cwiseAbs().maxCoeff(&v_max_abs_index); + if (v_max_abs_index == old_v_max_abs_index) { + // Break if the solution stagnated. + break; + } + // Move to the new simplex e_j, where j = v_max_abs_index. + v = dec.solve(Vector::Unit(n, v_max_abs_index)); // v = inv(matrix) * e_j. + lower_bound = v.template lpNorm<1>(); + if (lower_bound <= old_lower_bound) { + // Break if the gradient step did not increase the lower_bound. + break; + } + if (!is_complex) { + old_sign_vector = sign_vector; + } + old_v_max_abs_index = v_max_abs_index; + old_lower_bound = lower_bound; + } + // The following calculates an independent estimate of ||matrix||_1 by + // multiplying matrix by a vector with entries of slowly increasing + // magnitude and alternating sign: + // v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1. + // This improvement to Hager's algorithm above is due to Higham. It was + // added to make the algorithm more robust in certain corner cases where + // large elements in the matrix might otherwise escape detection due to + // exact cancellation (especially when op and op_adjoint correspond to a + // sequence of backsubstitutions and permutations), which could cause + // Hager's algorithm to vastly underestimate ||matrix||_1. + Scalar alternating_sign(RealScalar(1)); + for (Index i = 0; i < n; ++i) { + // The static_cast is needed when Scalar is a complex and RealScalar implements expression templates + v[i] = alternating_sign * static_cast(RealScalar(1) + (RealScalar(i) / (RealScalar(n - 1)))); + alternating_sign = -alternating_sign; + } + v = dec.solve(v); + const RealScalar alternate_lower_bound = (2 * v.template lpNorm<1>()) / (3 * RealScalar(n)); + return numext::maxi(lower_bound, alternate_lower_bound); +} + +/** \brief Reciprocal condition number estimator. + * + * Computing a decomposition of a dense matrix takes O(n^3) operations, while + * this method estimates the condition number quickly and reliably in O(n^2) + * operations. + * + * \returns an estimate of the reciprocal condition number + * (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and + * its decomposition. Supports the following decompositions: FullPivLU, + * PartialPivLU, LDLT, and LLT. + * + * \sa FullPivLU, PartialPivLU, LDLT, LLT. + */ +template +typename Decomposition::RealScalar rcond_estimate_helper(typename Decomposition::RealScalar matrix_norm, + const Decomposition& dec) { + typedef typename Decomposition::RealScalar RealScalar; + eigen_assert(dec.rows() == dec.cols()); + if (dec.rows() == 0) return NumTraits::infinity(); + if (numext::is_exactly_zero(matrix_norm)) return RealScalar(0); + if (dec.rows() == 1) return RealScalar(1); + const RealScalar inverse_matrix_norm = rcond_invmatrix_L1_norm_estimate(dec); + return (numext::is_exactly_zero(inverse_matrix_norm) ? RealScalar(0) + : (RealScalar(1) / inverse_matrix_norm) / matrix_norm); +} + +} // namespace internal + +} // namespace Eigen + +#endif diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CoreEvaluators.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CoreEvaluators.h new file mode 100644 index 0000000000000000000000000000000000000000..156ca2b6073576f23d7d87e527ea5fa367fb85e0 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CoreEvaluators.h @@ -0,0 +1,1688 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Benoit Jacob +// Copyright (C) 2011-2014 Gael Guennebaud +// Copyright (C) 2011-2012 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COREEVALUATORS_H +#define EIGEN_COREEVALUATORS_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +// This class returns the evaluator kind from the expression storage kind. +// Default assumes index based accessors +template +struct storage_kind_to_evaluator_kind { + typedef IndexBased Kind; +}; + +// This class returns the evaluator shape from the expression storage kind. +// It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc. +template +struct storage_kind_to_shape; + +template <> +struct storage_kind_to_shape { + typedef DenseShape Shape; +}; +template <> +struct storage_kind_to_shape { + typedef SolverShape Shape; +}; +template <> +struct storage_kind_to_shape { + typedef PermutationShape Shape; +}; +template <> +struct storage_kind_to_shape { + typedef TranspositionsShape Shape; +}; + +// Evaluators have to be specialized with respect to various criteria such as: +// - storage/structure/shape +// - scalar type +// - etc. +// Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators. +// We currently distinguish the following kind of evaluators: +// - unary_evaluator for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose, +// MatrixWrapper, ArrayWrapper, Reverse, Replicate) +// - binary_evaluator for expression taking two arguments (CwiseBinaryOp) +// - ternary_evaluator for expression taking three arguments (CwiseTernaryOp) +// - product_evaluator for linear algebra products (Product); special case of binary_evaluator because it requires +// additional tags for dispatching. +// - mapbase_evaluator for Map, Block, Ref +// - block_evaluator for Block (special dispatching to a mapbase_evaluator or unary_evaluator) + +template ::Kind, + typename Arg2Kind = typename evaluator_traits::Kind, + typename Arg3Kind = typename evaluator_traits::Kind, + typename Arg1Scalar = typename traits::Scalar, + typename Arg2Scalar = typename traits::Scalar, + typename Arg3Scalar = typename traits::Scalar> +struct ternary_evaluator; + +template ::Kind, + typename RhsKind = typename evaluator_traits::Kind, + typename LhsScalar = typename traits::Scalar, + typename RhsScalar = typename traits::Scalar> +struct binary_evaluator; + +template ::Kind, + typename Scalar = typename T::Scalar> +struct unary_evaluator; + +// evaluator_traits contains traits for evaluator + +template +struct evaluator_traits_base { + // by default, get evaluator kind and shape from storage + typedef typename storage_kind_to_evaluator_kind::StorageKind>::Kind Kind; + typedef typename storage_kind_to_shape::StorageKind>::Shape Shape; +}; + +// Default evaluator traits +template +struct evaluator_traits : public evaluator_traits_base {}; + +template ::Shape> +struct evaluator_assume_aliasing { + static const bool value = false; +}; + +// By default, we assume a unary expression: +template +struct evaluator : public unary_evaluator { + typedef unary_evaluator Base; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const T& xpr) : Base(xpr) {} +}; + +// TODO: Think about const-correctness +template +struct evaluator : evaluator { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const T& xpr) : evaluator(xpr) {} +}; + +// ---------- base class for all evaluators ---------- + +template +struct evaluator_base { + // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle + // outer,inner indices. + typedef traits ExpressionTraits; + + enum { Alignment = 0 }; + // noncopyable: + // Don't make this class inherit noncopyable as this kills EBO (Empty Base Optimization) + // and make complex evaluator much larger than then should do. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr evaluator_base() = default; + + private: + EIGEN_DEVICE_FUNC evaluator_base(const evaluator_base&); + EIGEN_DEVICE_FUNC const evaluator_base& operator=(const evaluator_base&); +}; + +// -------------------- Matrix and Array -------------------- +// +// evaluator is a common base class for the +// Matrix and Array evaluators. +// Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense, +// so no need for more sophisticated dispatching. + +// this helper permits to completely eliminate m_outerStride if it is known at compiletime. +template +class plainobjectbase_evaluator_data { + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) + : data(ptr) { +#ifndef EIGEN_INTERNAL_DEBUGGING + EIGEN_UNUSED_VARIABLE(outerStride); +#endif + eigen_internal_assert(outerStride == OuterStride); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index outerStride() const EIGEN_NOEXCEPT { return OuterStride; } + const Scalar* data; +}; + +template +class plainobjectbase_evaluator_data { + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) + : data(ptr), m_outerStride(outerStride) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index outerStride() const { return m_outerStride; } + const Scalar* data; + + protected: + Index m_outerStride; +}; + +template +struct evaluator> : evaluator_base { + typedef PlainObjectBase PlainObjectType; + typedef typename PlainObjectType::Scalar Scalar; + typedef typename PlainObjectType::CoeffReturnType CoeffReturnType; + + enum { + IsRowMajor = PlainObjectType::IsRowMajor, + IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime, + RowsAtCompileTime = PlainObjectType::RowsAtCompileTime, + ColsAtCompileTime = PlainObjectType::ColsAtCompileTime, + + CoeffReadCost = NumTraits::ReadCost, + Flags = traits::EvaluatorFlags, + Alignment = traits::Alignment + }; + enum { + // We do not need to know the outer stride for vectors + OuterStrideAtCompileTime = IsVectorAtCompileTime ? 0 + : int(IsRowMajor) ? ColsAtCompileTime + : RowsAtCompileTime + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr evaluator() : m_d(0, OuterStrideAtCompileTime) { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr explicit evaluator(const PlainObjectType& m) + : m_d(m.data(), IsVectorAtCompileTime ? 0 : m.outerStride()) { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr CoeffReturnType coeff(Index row, Index col) const { + if (IsRowMajor) + return m_d.data[row * m_d.outerStride() + col]; + else + return m_d.data[row + col * m_d.outerStride()]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr CoeffReturnType coeff(Index index) const { return m_d.data[index]; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& coeffRef(Index row, Index col) { + if (IsRowMajor) + return const_cast(m_d.data)[row * m_d.outerStride() + col]; + else + return const_cast(m_d.data)[row + col * m_d.outerStride()]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& coeffRef(Index index) { + return const_cast(m_d.data)[index]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { + if (IsRowMajor) + return ploadt(m_d.data + row * m_d.outerStride() + col); + else + return ploadt(m_d.data + row + col * m_d.outerStride()); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index index) const { + return ploadt(m_d.data + index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { + if (IsRowMajor) + return pstoret(const_cast(m_d.data) + row * m_d.outerStride() + col, x); + else + return pstoret(const_cast(m_d.data) + row + col * m_d.outerStride(), x); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { + return pstoret(const_cast(m_d.data) + index, x); + } + + protected: + plainobjectbase_evaluator_data m_d; +}; + +template +struct evaluator> + : evaluator>> { + typedef Matrix XprType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr evaluator() = default; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr explicit evaluator(const XprType& m) + : evaluator>(m) {} +}; + +template +struct evaluator> + : evaluator>> { + typedef Array XprType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr evaluator() = default; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr explicit evaluator(const XprType& m) + : evaluator>(m) {} +}; + +// -------------------- Transpose -------------------- + +template +struct unary_evaluator, IndexBased> : evaluator_base> { + typedef Transpose XprType; + + enum { + CoeffReadCost = evaluator::CoeffReadCost, + Flags = evaluator::Flags ^ RowMajorBit, + Alignment = evaluator::Alignment + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {} + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { + return m_argImpl.coeff(col, row); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(index); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(col, row); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename XprType::Scalar& coeffRef(Index index) { + return m_argImpl.coeffRef(index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { + return m_argImpl.template packet(col, row); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index index) const { + return m_argImpl.template packet(index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { + m_argImpl.template writePacket(col, row, x); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { + m_argImpl.template writePacket(index, x); + } + + protected: + evaluator m_argImpl; +}; + +// -------------------- CwiseNullaryOp -------------------- +// Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator. +// Likewise, there is not need to more sophisticated dispatching here. + +template ::value, + bool has_unary = has_unary_operator::value, + bool has_binary = has_binary_operator::value> +struct nullary_wrapper { + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { + return op(i, j); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { + return op(i); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { + return op.template packetOp(i, j); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { + return op.template packetOp(i); + } +}; + +template +struct nullary_wrapper { + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType = 0, IndexType = 0) const { + return op(); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType = 0, IndexType = 0) const { + return op.template packetOp(); + } +}; + +template +struct nullary_wrapper { + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j = 0) const { + return op(i, j); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j = 0) const { + return op.template packetOp(i, j); + } +}; + +// We need the following specialization for vector-only functors assigned to a runtime vector, +// for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd. +// In this case, i==0 and j is used for the actual iteration. +template +struct nullary_wrapper { + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { + eigen_assert(i == 0 || j == 0); + return op(i + j); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { + eigen_assert(i == 0 || j == 0); + return op.template packetOp(i + j); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { + return op(i); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { + return op.template packetOp(i); + } +}; + +template +struct nullary_wrapper {}; + +#if 0 && EIGEN_COMP_MSVC > 0 +// Disable this ugly workaround. This is now handled in traits::match, +// but this piece of code might still become handly if some other weird compilation +// errors pop up again. + +// MSVC exhibits a weird compilation error when +// compiling: +// Eigen::MatrixXf A = MatrixXf::Random(3,3); +// Ref R = 2.f*A; +// and that has_*ary_operator> have not been instantiated yet. +// The "problem" is that evaluator<2.f*A> is instantiated by traits::match<2.f*A> +// and at that time has_*ary_operator returns true regardless of T. +// Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>. +// The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(), +// and packet() are really instantiated as implemented below: + +// This is a simple wrapper around Index to enforce the re-instantiation of +// has_*ary_operator when needed. +template struct nullary_wrapper_workaround_msvc { + nullary_wrapper_workaround_msvc(const T&); + operator T()const; +}; + +template +struct nullary_wrapper +{ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { + return nullary_wrapper >::value, + has_unary_operator >::value, + has_binary_operator >::value>().operator()(op,i,j); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { + return nullary_wrapper >::value, + has_unary_operator >::value, + has_binary_operator >::value>().operator()(op,i); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { + return nullary_wrapper >::value, + has_unary_operator >::value, + has_binary_operator >::value>().template packetOp(op,i,j); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { + return nullary_wrapper >::value, + has_unary_operator >::value, + has_binary_operator >::value>().template packetOp(op,i); + } +}; +#endif // MSVC workaround + +template +struct evaluator> + : evaluator_base> { + typedef CwiseNullaryOp XprType; + typedef internal::remove_all_t PlainObjectTypeCleaned; + + enum { + CoeffReadCost = internal::functor_traits::Cost, + + Flags = (evaluator::Flags & + (HereditaryBits | (functor_has_linear_access::ret ? LinearAccessBit : 0) | + (functor_traits::PacketAccess ? PacketAccessBit : 0))) | + (functor_traits::IsRepeatable ? 0 : EvalBeforeNestingBit), + Alignment = AlignedMax + }; + + EIGEN_DEVICE_FUNC explicit evaluator(const XprType& n) : m_functor(n.functor()), m_wrapper() { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::CoeffReturnType CoeffReturnType; + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(IndexType row, IndexType col) const { + return m_wrapper(m_functor, row, col); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(IndexType index) const { + return m_wrapper(m_functor, index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(IndexType row, IndexType col) const { + return m_wrapper.template packetOp(m_functor, row, col); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(IndexType index) const { + return m_wrapper.template packetOp(m_functor, index); + } + + protected: + const NullaryOp m_functor; + const internal::nullary_wrapper m_wrapper; +}; + +// -------------------- CwiseUnaryOp -------------------- + +template +struct unary_evaluator, IndexBased> : evaluator_base> { + typedef CwiseUnaryOp XprType; + + enum { + CoeffReadCost = int(evaluator::CoeffReadCost) + int(functor_traits::Cost), + + Flags = evaluator::Flags & + (HereditaryBits | LinearAccessBit | (functor_traits::PacketAccess ? PacketAccessBit : 0)), + Alignment = evaluator::Alignment + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& op) : m_d(op) { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { + return m_d.func()(m_d.argImpl.coeff(row, col)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_d.func()(m_d.argImpl.coeff(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { + return m_d.func().packetOp(m_d.argImpl.template packet(row, col)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index index) const { + return m_d.func().packetOp(m_d.argImpl.template packet(index)); + } + + protected: + // this helper permits to completely eliminate the functor if it is empty + struct Data { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Data(const XprType& xpr) + : op(xpr.functor()), argImpl(xpr.nestedExpression()) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const UnaryOp& func() const { return op; } + UnaryOp op; + evaluator argImpl; + }; + + Data m_d; +}; + +// ----------------------- Casting --------------------- + +template +struct unary_evaluator, ArgType>, IndexBased> { + using CastOp = core_cast_op; + using XprType = CwiseUnaryOp; + + // Use the largest packet type by default + using SrcPacketType = typename packet_traits::type; + static constexpr int SrcPacketSize = unpacket_traits::size; + static constexpr int SrcPacketBytes = SrcPacketSize * sizeof(SrcType); + + enum { + CoeffReadCost = int(evaluator::CoeffReadCost) + int(functor_traits::Cost), + PacketAccess = functor_traits::PacketAccess, + ActualPacketAccessBit = PacketAccess ? PacketAccessBit : 0, + Flags = evaluator::Flags & (HereditaryBits | LinearAccessBit | ActualPacketAccessBit), + IsRowMajor = (evaluator::Flags & RowMajorBit), + Alignment = evaluator::Alignment + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& xpr) + : m_argImpl(xpr.nestedExpression()), m_rows(xpr.rows()), m_cols(xpr.cols()) { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + template + using AltSrcScalarOp = std::enable_if_t<(unpacket_traits::size < SrcPacketSize && + !find_packet_by_size::size>::value), + bool>; + template + using SrcPacketArgs1 = + std::enable_if_t<(find_packet_by_size::size>::value), bool>; + template + using SrcPacketArgs2 = std::enable_if_t<(unpacket_traits::size) == (2 * SrcPacketSize), bool>; + template + using SrcPacketArgs4 = std::enable_if_t<(unpacket_traits::size) == (4 * SrcPacketSize), bool>; + template + using SrcPacketArgs8 = std::enable_if_t<(unpacket_traits::size) == (8 * SrcPacketSize), bool>; + + template = true> + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool check_array_bounds(Index, Index col, Index packetSize) const { + return col + packetSize <= cols(); + } + template = true> + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool check_array_bounds(Index row, Index, Index packetSize) const { + return row + packetSize <= rows(); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool check_array_bounds(Index index, Index packetSize) const { + return index + packetSize <= size(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE SrcType srcCoeff(Index row, Index col, Index offset) const { + Index actualRow = IsRowMajor ? row : row + offset; + Index actualCol = IsRowMajor ? col + offset : col; + return m_argImpl.coeff(actualRow, actualCol); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE SrcType srcCoeff(Index index, Index offset) const { + Index actualIndex = index + offset; + return m_argImpl.coeff(actualIndex); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DstType coeff(Index row, Index col) const { + return cast(srcCoeff(row, col, 0)); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DstType coeff(Index index) const { + return cast(srcCoeff(index, 0)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType srcPacket(Index row, Index col, Index offset) const { + constexpr int PacketSize = unpacket_traits::size; + Index actualRow = IsRowMajor ? row : row + (offset * PacketSize); + Index actualCol = IsRowMajor ? col + (offset * PacketSize) : col; + eigen_assert(check_array_bounds(actualRow, actualCol, PacketSize) && "Array index out of bounds"); + return m_argImpl.template packet(actualRow, actualCol); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType srcPacket(Index index, Index offset) const { + constexpr int PacketSize = unpacket_traits::size; + Index actualIndex = index + (offset * PacketSize); + eigen_assert(check_array_bounds(actualIndex, PacketSize) && "Array index out of bounds"); + return m_argImpl.template packet(actualIndex); + } + + // There is no source packet type with equal or fewer elements than DstPacketType. + // This is problematic as the evaluation loop may attempt to access data outside the bounds of the array. + // For example, consider the cast utilizing pcast with an array of size 4: {0.0f,1.0f,2.0f,3.0f}. + // The first iteration of the evaluation loop will load 16 bytes: {0.0f,1.0f,2.0f,3.0f} and cast to {0.0,1.0}, which + // is acceptable. The second iteration will load 16 bytes: {2.0f,3.0f,?,?}, which is outside the bounds of the array. + + // Instead, perform runtime check to determine if the load would access data outside the bounds of the array. + // If not, perform full load. Otherwise, revert to a scalar loop to perform a partial load. + // In either case, perform a vectorized cast of the source packet. + template = true> + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DstPacketType packet(Index row, Index col) const { + constexpr int DstPacketSize = unpacket_traits::size; + constexpr int SrcBytesIncrement = DstPacketSize * sizeof(SrcType); + constexpr int SrcLoadMode = plain_enum_min(SrcBytesIncrement, LoadMode); + SrcPacketType src; + if (EIGEN_PREDICT_TRUE(check_array_bounds(row, col, SrcPacketSize))) { + src = srcPacket(row, col, 0); + } else { + Array srcArray; + for (size_t k = 0; k < DstPacketSize; k++) srcArray[k] = srcCoeff(row, col, k); + for (size_t k = DstPacketSize; k < SrcPacketSize; k++) srcArray[k] = SrcType(0); + src = pload(srcArray.data()); + } + return pcast(src); + } + // Use the source packet type with the same size as DstPacketType, if it exists + template = true> + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DstPacketType packet(Index row, Index col) const { + constexpr int DstPacketSize = unpacket_traits::size; + using SizedSrcPacketType = typename find_packet_by_size::type; + constexpr int SrcBytesIncrement = DstPacketSize * sizeof(SrcType); + constexpr int SrcLoadMode = plain_enum_min(SrcBytesIncrement, LoadMode); + return pcast(srcPacket(row, col, 0)); + } + // unpacket_traits::size == 2 * SrcPacketSize + template = true> + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DstPacketType packet(Index row, Index col) const { + constexpr int SrcLoadMode = plain_enum_min(SrcPacketBytes, LoadMode); + return pcast(srcPacket(row, col, 0), + srcPacket(row, col, 1)); + } + // unpacket_traits::size == 4 * SrcPacketSize + template = true> + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DstPacketType packet(Index row, Index col) const { + constexpr int SrcLoadMode = plain_enum_min(SrcPacketBytes, LoadMode); + return pcast(srcPacket(row, col, 0), srcPacket(row, col, 1), + srcPacket(row, col, 2), + srcPacket(row, col, 3)); + } + // unpacket_traits::size == 8 * SrcPacketSize + template = true> + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DstPacketType packet(Index row, Index col) const { + constexpr int SrcLoadMode = plain_enum_min(SrcPacketBytes, LoadMode); + return pcast( + srcPacket(row, col, 0), srcPacket(row, col, 1), srcPacket(row, col, 2), + srcPacket(row, col, 3), srcPacket(row, col, 4), srcPacket(row, col, 5), + srcPacket(row, col, 6), srcPacket(row, col, 7)); + } + + // Analogous routines for linear access. + template = true> + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DstPacketType packet(Index index) const { + constexpr int DstPacketSize = unpacket_traits::size; + constexpr int SrcBytesIncrement = DstPacketSize * sizeof(SrcType); + constexpr int SrcLoadMode = plain_enum_min(SrcBytesIncrement, LoadMode); + SrcPacketType src; + if (EIGEN_PREDICT_TRUE(check_array_bounds(index, SrcPacketSize))) { + src = srcPacket(index, 0); + } else { + Array srcArray; + for (size_t k = 0; k < DstPacketSize; k++) srcArray[k] = srcCoeff(index, k); + for (size_t k = DstPacketSize; k < SrcPacketSize; k++) srcArray[k] = SrcType(0); + src = pload(srcArray.data()); + } + return pcast(src); + } + template = true> + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DstPacketType packet(Index index) const { + constexpr int DstPacketSize = unpacket_traits::size; + using SizedSrcPacketType = typename find_packet_by_size::type; + constexpr int SrcBytesIncrement = DstPacketSize * sizeof(SrcType); + constexpr int SrcLoadMode = plain_enum_min(SrcBytesIncrement, LoadMode); + return pcast(srcPacket(index, 0)); + } + template = true> + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DstPacketType packet(Index index) const { + constexpr int SrcLoadMode = plain_enum_min(SrcPacketBytes, LoadMode); + return pcast(srcPacket(index, 0), srcPacket(index, 1)); + } + template = true> + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DstPacketType packet(Index index) const { + constexpr int SrcLoadMode = plain_enum_min(SrcPacketBytes, LoadMode); + return pcast(srcPacket(index, 0), srcPacket(index, 1), + srcPacket(index, 2), srcPacket(index, 3)); + } + template = true> + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DstPacketType packet(Index index) const { + constexpr int SrcLoadMode = plain_enum_min(SrcPacketBytes, LoadMode); + return pcast(srcPacket(index, 0), srcPacket(index, 1), + srcPacket(index, 2), srcPacket(index, 3), + srcPacket(index, 4), srcPacket(index, 5), + srcPacket(index, 6), srcPacket(index, 7)); + } + + constexpr EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const { return m_rows; } + constexpr EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const { return m_cols; } + constexpr EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_rows * m_cols; } + + protected: + const evaluator m_argImpl; + const variable_if_dynamic m_rows; + const variable_if_dynamic m_cols; +}; + +// -------------------- CwiseTernaryOp -------------------- + +// this is a ternary expression +template +struct evaluator> + : public ternary_evaluator> { + typedef CwiseTernaryOp XprType; + typedef ternary_evaluator> Base; + + EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {} +}; + +template +struct ternary_evaluator, IndexBased, IndexBased> + : evaluator_base> { + typedef CwiseTernaryOp XprType; + + enum { + CoeffReadCost = int(evaluator::CoeffReadCost) + int(evaluator::CoeffReadCost) + + int(evaluator::CoeffReadCost) + int(functor_traits::Cost), + + Arg1Flags = evaluator::Flags, + Arg2Flags = evaluator::Flags, + Arg3Flags = evaluator::Flags, + SameType = is_same::value && + is_same::value, + StorageOrdersAgree = (int(Arg1Flags) & RowMajorBit) == (int(Arg2Flags) & RowMajorBit) && + (int(Arg1Flags) & RowMajorBit) == (int(Arg3Flags) & RowMajorBit), + Flags0 = (int(Arg1Flags) | int(Arg2Flags) | int(Arg3Flags)) & + (HereditaryBits | + (int(Arg1Flags) & int(Arg2Flags) & int(Arg3Flags) & + ((StorageOrdersAgree ? LinearAccessBit : 0) | + (functor_traits::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)))), + Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit), + Alignment = plain_enum_min(plain_enum_min(evaluator::Alignment, evaluator::Alignment), + evaluator::Alignment) + }; + + EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr) : m_d(xpr) { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { + return m_d.func()(m_d.arg1Impl.coeff(row, col), m_d.arg2Impl.coeff(row, col), m_d.arg3Impl.coeff(row, col)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_d.func()(m_d.arg1Impl.coeff(index), m_d.arg2Impl.coeff(index), m_d.arg3Impl.coeff(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { + return m_d.func().packetOp(m_d.arg1Impl.template packet(row, col), + m_d.arg2Impl.template packet(row, col), + m_d.arg3Impl.template packet(row, col)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index index) const { + return m_d.func().packetOp(m_d.arg1Impl.template packet(index), + m_d.arg2Impl.template packet(index), + m_d.arg3Impl.template packet(index)); + } + + protected: + // this helper permits to completely eliminate the functor if it is empty + struct Data { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Data(const XprType& xpr) + : op(xpr.functor()), arg1Impl(xpr.arg1()), arg2Impl(xpr.arg2()), arg3Impl(xpr.arg3()) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TernaryOp& func() const { return op; } + TernaryOp op; + evaluator arg1Impl; + evaluator arg2Impl; + evaluator arg3Impl; + }; + + Data m_d; +}; + +// specialization for expressions like (a < b).select(c, d) to enable full vectorization +template +struct evaluator, Arg1, Arg2, + CwiseBinaryOp, CmpLhsType, CmpRhsType>>> + : public ternary_evaluator< + CwiseTernaryOp, Arg1, Arg2, + CwiseBinaryOp, CmpLhsType, CmpRhsType>>> { + using DummyTernaryOp = scalar_boolean_select_op; + using DummyArg3 = CwiseBinaryOp, CmpLhsType, CmpRhsType>; + using DummyXprType = CwiseTernaryOp; + + using TernaryOp = scalar_boolean_select_op; + using Arg3 = CwiseBinaryOp, CmpLhsType, CmpRhsType>; + using XprType = CwiseTernaryOp; + + using Base = ternary_evaluator; + + EIGEN_DEVICE_FUNC explicit evaluator(const DummyXprType& xpr) + : Base(XprType(xpr.arg1(), xpr.arg2(), Arg3(xpr.arg3().lhs(), xpr.arg3().rhs()))) {} +}; + +// -------------------- CwiseBinaryOp -------------------- + +// this is a binary expression +template +struct evaluator> : public binary_evaluator> { + typedef CwiseBinaryOp XprType; + typedef binary_evaluator> Base; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) : Base(xpr) {} +}; + +template +struct binary_evaluator, IndexBased, IndexBased> + : evaluator_base> { + typedef CwiseBinaryOp XprType; + + enum { + CoeffReadCost = + int(evaluator::CoeffReadCost) + int(evaluator::CoeffReadCost) + int(functor_traits::Cost), + + LhsFlags = evaluator::Flags, + RhsFlags = evaluator::Flags, + SameType = is_same::value, + StorageOrdersAgree = (int(LhsFlags) & RowMajorBit) == (int(RhsFlags) & RowMajorBit), + Flags0 = (int(LhsFlags) | int(RhsFlags)) & + (HereditaryBits | + (int(LhsFlags) & int(RhsFlags) & + ((StorageOrdersAgree ? LinearAccessBit : 0) | + (functor_traits::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)))), + Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit), + Alignment = plain_enum_min(evaluator::Alignment, evaluator::Alignment) + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit binary_evaluator(const XprType& xpr) : m_d(xpr) { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { + return m_d.func()(m_d.lhsImpl.coeff(row, col), m_d.rhsImpl.coeff(row, col)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_d.func()(m_d.lhsImpl.coeff(index), m_d.rhsImpl.coeff(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { + return m_d.func().packetOp(m_d.lhsImpl.template packet(row, col), + m_d.rhsImpl.template packet(row, col)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index index) const { + return m_d.func().packetOp(m_d.lhsImpl.template packet(index), + m_d.rhsImpl.template packet(index)); + } + + protected: + // this helper permits to completely eliminate the functor if it is empty + struct Data { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Data(const XprType& xpr) + : op(xpr.functor()), lhsImpl(xpr.lhs()), rhsImpl(xpr.rhs()) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const BinaryOp& func() const { return op; } + BinaryOp op; + evaluator lhsImpl; + evaluator rhsImpl; + }; + + Data m_d; +}; + +// -------------------- CwiseUnaryView -------------------- + +template +struct unary_evaluator, IndexBased> + : evaluator_base> { + typedef CwiseUnaryView XprType; + + enum { + CoeffReadCost = int(evaluator::CoeffReadCost) + int(functor_traits::Cost), + + Flags = (evaluator::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)), + + Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost... + }; + + EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op) : m_d(op) { + EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { + return m_d.func()(m_d.argImpl.coeff(row, col)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_d.func()(m_d.argImpl.coeff(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { + return m_d.func()(m_d.argImpl.coeffRef(row, col)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { + return m_d.func()(m_d.argImpl.coeffRef(index)); + } + + protected: + // this helper permits to completely eliminate the functor if it is empty + struct Data { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Data(const XprType& xpr) + : op(xpr.functor()), argImpl(xpr.nestedExpression()) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const UnaryOp& func() const { return op; } + UnaryOp op; + evaluator argImpl; + }; + + Data m_d; +}; + +// -------------------- Map -------------------- + +// FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ? +// but that might complicate template specialization +template +struct mapbase_evaluator; + +template +struct mapbase_evaluator : evaluator_base { + typedef Derived XprType; + typedef typename XprType::PointerType PointerType; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + enum { + IsRowMajor = XprType::RowsAtCompileTime, + ColsAtCompileTime = XprType::ColsAtCompileTime, + CoeffReadCost = NumTraits::ReadCost + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit mapbase_evaluator(const XprType& map) + : m_data(const_cast(map.data())), + m_innerStride(map.innerStride()), + m_outerStride(map.outerStride()) { + EIGEN_STATIC_ASSERT(check_implication((evaluator::Flags & PacketAccessBit) != 0, + internal::inner_stride_at_compile_time::ret == 1), + PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { + return m_data[col * colStride() + row * rowStride()]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_data[index * m_innerStride.value()]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { + return m_data[col * colStride() + row * rowStride()]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_data[index * m_innerStride.value()]; } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { + PointerType ptr = m_data + row * rowStride() + col * colStride(); + return internal::ploadt(ptr); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index index) const { + return internal::ploadt(m_data + index * m_innerStride.value()); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { + PointerType ptr = m_data + row * rowStride() + col * colStride(); + return internal::pstoret(ptr, x); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { + internal::pstoret(m_data + index * m_innerStride.value(), x); + } + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rowStride() const EIGEN_NOEXCEPT { + return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value(); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index colStride() const EIGEN_NOEXCEPT { + return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value(); + } + + PointerType m_data; + const internal::variable_if_dynamic m_innerStride; + const internal::variable_if_dynamic m_outerStride; +}; + +template +struct evaluator> + : public mapbase_evaluator, PlainObjectType> { + typedef Map XprType; + typedef typename XprType::Scalar Scalar; + // TODO: should check for smaller packet types once we can handle multi-sized packet types + typedef typename packet_traits::type PacketScalar; + + enum { + InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0 + ? int(PlainObjectType::InnerStrideAtCompileTime) + : int(StrideType::InnerStrideAtCompileTime), + OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0 + ? int(PlainObjectType::OuterStrideAtCompileTime) + : int(StrideType::OuterStrideAtCompileTime), + HasNoInnerStride = InnerStrideAtCompileTime == 1, + HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0, + HasNoStride = HasNoInnerStride && HasNoOuterStride, + IsDynamicSize = PlainObjectType::SizeAtCompileTime == Dynamic, + + PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit), + LinearAccessMask = + bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit), + Flags = int(evaluator::Flags) & (LinearAccessMask & PacketAccessMask), + + Alignment = int(MapOptions) & int(AlignedMask) + }; + + EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map) : mapbase_evaluator(map) {} +}; + +// -------------------- Ref -------------------- + +template +struct evaluator> + : public mapbase_evaluator, PlainObjectType> { + typedef Ref XprType; + + enum { + Flags = evaluator>::Flags, + Alignment = evaluator>::Alignment + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& ref) + : mapbase_evaluator(ref) {} +}; + +// -------------------- Block -------------------- + +template ::ret> +struct block_evaluator; + +template +struct evaluator> + : block_evaluator { + typedef Block XprType; + typedef typename XprType::Scalar Scalar; + // TODO: should check for smaller packet types once we can handle multi-sized packet types + typedef typename packet_traits::type PacketScalar; + + enum { + CoeffReadCost = evaluator::CoeffReadCost, + + RowsAtCompileTime = traits::RowsAtCompileTime, + ColsAtCompileTime = traits::ColsAtCompileTime, + MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = traits::MaxColsAtCompileTime, + + ArgTypeIsRowMajor = (int(evaluator::Flags) & RowMajorBit) != 0, + IsRowMajor = (MaxRowsAtCompileTime == 1 && MaxColsAtCompileTime != 1) ? 1 + : (MaxColsAtCompileTime == 1 && MaxRowsAtCompileTime != 1) ? 0 + : ArgTypeIsRowMajor, + HasSameStorageOrderAsArgType = (IsRowMajor == ArgTypeIsRowMajor), + InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), + InnerStrideAtCompileTime = HasSameStorageOrderAsArgType ? int(inner_stride_at_compile_time::ret) + : int(outer_stride_at_compile_time::ret), + OuterStrideAtCompileTime = HasSameStorageOrderAsArgType ? int(outer_stride_at_compile_time::ret) + : int(inner_stride_at_compile_time::ret), + MaskPacketAccessBit = (InnerStrideAtCompileTime == 1 || HasSameStorageOrderAsArgType) ? PacketAccessBit : 0, + + FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || + (InnerPanel && (evaluator::Flags & LinearAccessBit))) + ? LinearAccessBit + : 0, + FlagsRowMajorBit = XprType::Flags & RowMajorBit, + Flags0 = evaluator::Flags & ((HereditaryBits & ~RowMajorBit) | DirectAccessBit | MaskPacketAccessBit), + Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit, + + PacketAlignment = unpacket_traits::alignment, + Alignment0 = (InnerPanel && (OuterStrideAtCompileTime != Dynamic) && (OuterStrideAtCompileTime != 0) && + (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) + ? int(PacketAlignment) + : 0, + Alignment = plain_enum_min(evaluator::Alignment, Alignment0) + }; + typedef block_evaluator block_evaluator_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& block) : block_evaluator_type(block) { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } +}; + +// no direct-access => dispatch to a unary evaluator +template +struct block_evaluator + : unary_evaluator> { + typedef Block XprType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit block_evaluator(const XprType& block) + : unary_evaluator(block) {} +}; + +template +struct unary_evaluator, IndexBased> + : evaluator_base> { + typedef Block XprType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& block) + : m_argImpl(block.nestedExpression()), + m_startRow(block.startRow()), + m_startCol(block.startCol()), + m_linear_offset(ForwardLinearAccess + ? (ArgType::IsRowMajor + ? block.startRow() * block.nestedExpression().cols() + block.startCol() + : block.startCol() * block.nestedExpression().rows() + block.startRow()) + : 0) {} + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + enum { + RowsAtCompileTime = XprType::RowsAtCompileTime, + ForwardLinearAccess = (InnerPanel || int(XprType::IsRowMajor) == int(ArgType::IsRowMajor)) && + bool(evaluator::Flags & LinearAccessBit) + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { + return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return linear_coeff_impl(index, bool_constant()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { + return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { + return linear_coeffRef_impl(index, bool_constant()); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { + return m_argImpl.template packet(m_startRow.value() + row, m_startCol.value() + col); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index index) const { + if (ForwardLinearAccess) + return m_argImpl.template packet(m_linear_offset.value() + index); + else + return packet(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { + return m_argImpl.template writePacket(m_startRow.value() + row, m_startCol.value() + col, x); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { + if (ForwardLinearAccess) + return m_argImpl.template writePacket(m_linear_offset.value() + index, x); + else + return writePacket(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0, + x); + } + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType + linear_coeff_impl(Index index, internal::true_type /* ForwardLinearAccess */) const { + return m_argImpl.coeff(m_linear_offset.value() + index); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType + linear_coeff_impl(Index index, internal::false_type /* not ForwardLinearAccess */) const { + return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& linear_coeffRef_impl(Index index, + internal::true_type /* ForwardLinearAccess */) { + return m_argImpl.coeffRef(m_linear_offset.value() + index); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& linear_coeffRef_impl( + Index index, internal::false_type /* not ForwardLinearAccess */) { + return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); + } + + evaluator m_argImpl; + const variable_if_dynamic m_startRow; + const variable_if_dynamic m_startCol; + const variable_if_dynamic m_linear_offset; +}; + +// TODO: This evaluator does not actually use the child evaluator; +// all action is via the data() as returned by the Block expression. + +template +struct block_evaluator + : mapbase_evaluator, + typename Block::PlainObject> { + typedef Block XprType; + typedef typename XprType::Scalar Scalar; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit block_evaluator(const XprType& block) + : mapbase_evaluator(block) { + eigen_internal_assert((internal::is_constant_evaluated() || + (std::uintptr_t(block.data()) % plain_enum_max(1, evaluator::Alignment)) == 0) && + "data is not aligned"); + } +}; + +// -------------------- Select -------------------- +// NOTE shall we introduce a ternary_evaluator? + +// TODO enable vectorization for Select +template +struct evaluator> + : evaluator_base> { + typedef Select XprType; + enum { + CoeffReadCost = evaluator::CoeffReadCost + + plain_enum_max(evaluator::CoeffReadCost, evaluator::CoeffReadCost), + + Flags = (unsigned int)evaluator::Flags & evaluator::Flags & HereditaryBits, + + Alignment = plain_enum_min(evaluator::Alignment, evaluator::Alignment) + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& select) + : m_conditionImpl(select.conditionMatrix()), m_thenImpl(select.thenMatrix()), m_elseImpl(select.elseMatrix()) { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { + if (m_conditionImpl.coeff(row, col)) + return m_thenImpl.coeff(row, col); + else + return m_elseImpl.coeff(row, col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + if (m_conditionImpl.coeff(index)) + return m_thenImpl.coeff(index); + else + return m_elseImpl.coeff(index); + } + + protected: + evaluator m_conditionImpl; + evaluator m_thenImpl; + evaluator m_elseImpl; +}; + +// -------------------- Replicate -------------------- + +template +struct unary_evaluator> + : evaluator_base> { + typedef Replicate XprType; + typedef typename XprType::CoeffReturnType CoeffReturnType; + enum { Factor = (RowFactor == Dynamic || ColFactor == Dynamic) ? Dynamic : RowFactor * ColFactor }; + typedef typename internal::nested_eval::type ArgTypeNested; + typedef internal::remove_all_t ArgTypeNestedCleaned; + + enum { + CoeffReadCost = evaluator::CoeffReadCost, + LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0, + Flags = (evaluator::Flags & (HereditaryBits | LinearAccessMask) & ~RowMajorBit) | + (traits::Flags & RowMajorBit), + + Alignment = evaluator::Alignment + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& replicate) + : m_arg(replicate.nestedExpression()), + m_argImpl(m_arg), + m_rows(replicate.nestedExpression().rows()), + m_cols(replicate.nestedExpression().cols()) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { + // try to avoid using modulo; this is a pure optimization strategy + const Index actual_row = internal::traits::RowsAtCompileTime == 1 ? 0 + : RowFactor == 1 ? row + : row % m_rows.value(); + const Index actual_col = internal::traits::ColsAtCompileTime == 1 ? 0 + : ColFactor == 1 ? col + : col % m_cols.value(); + + return m_argImpl.coeff(actual_row, actual_col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + // try to avoid using modulo; this is a pure optimization strategy + const Index actual_index = internal::traits::RowsAtCompileTime == 1 + ? (ColFactor == 1 ? index : index % m_cols.value()) + : (RowFactor == 1 ? index : index % m_rows.value()); + + return m_argImpl.coeff(actual_index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { + const Index actual_row = internal::traits::RowsAtCompileTime == 1 ? 0 + : RowFactor == 1 ? row + : row % m_rows.value(); + const Index actual_col = internal::traits::ColsAtCompileTime == 1 ? 0 + : ColFactor == 1 ? col + : col % m_cols.value(); + + return m_argImpl.template packet(actual_row, actual_col); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index index) const { + const Index actual_index = internal::traits::RowsAtCompileTime == 1 + ? (ColFactor == 1 ? index : index % m_cols.value()) + : (RowFactor == 1 ? index : index % m_rows.value()); + + return m_argImpl.template packet(actual_index); + } + + protected: + const ArgTypeNested m_arg; + evaluator m_argImpl; + const variable_if_dynamic m_rows; + const variable_if_dynamic m_cols; +}; + +// -------------------- MatrixWrapper and ArrayWrapper -------------------- +// +// evaluator_wrapper_base is a common base class for the +// MatrixWrapper and ArrayWrapper evaluators. + +template +struct evaluator_wrapper_base : evaluator_base { + typedef remove_all_t ArgType; + enum { + CoeffReadCost = evaluator::CoeffReadCost, + Flags = evaluator::Flags, + Alignment = evaluator::Alignment + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {} + + typedef typename ArgType::Scalar Scalar; + typedef typename ArgType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { + return m_argImpl.coeff(row, col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(index); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(row, col); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(index); } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { + return m_argImpl.template packet(row, col); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index index) const { + return m_argImpl.template packet(index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { + m_argImpl.template writePacket(row, col, x); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { + m_argImpl.template writePacket(index, x); + } + + protected: + evaluator m_argImpl; +}; + +template +struct unary_evaluator> : evaluator_wrapper_base> { + typedef MatrixWrapper XprType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& wrapper) + : evaluator_wrapper_base>(wrapper.nestedExpression()) {} +}; + +template +struct unary_evaluator> : evaluator_wrapper_base> { + typedef ArrayWrapper XprType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& wrapper) + : evaluator_wrapper_base>(wrapper.nestedExpression()) {} +}; + +// -------------------- Reverse -------------------- + +// defined in Reverse.h: +template +struct reverse_packet_cond; + +template +struct unary_evaluator> : evaluator_base> { + typedef Reverse XprType; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + enum { + IsRowMajor = XprType::IsRowMajor, + IsColMajor = !IsRowMajor, + ReverseRow = (Direction == Vertical) || (Direction == BothDirections), + ReverseCol = (Direction == Horizontal) || (Direction == BothDirections), + ReversePacket = (Direction == BothDirections) || ((Direction == Vertical) && IsColMajor) || + ((Direction == Horizontal) && IsRowMajor), + + CoeffReadCost = evaluator::CoeffReadCost, + + // let's enable LinearAccess only with vectorization because of the product overhead + // FIXME enable DirectAccess with negative strides? + Flags0 = evaluator::Flags, + LinearAccess = + ((Direction == BothDirections) && (int(Flags0) & PacketAccessBit)) || + ((ReverseRow && XprType::ColsAtCompileTime == 1) || (ReverseCol && XprType::RowsAtCompileTime == 1)) + ? LinearAccessBit + : 0, + + Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess), + + Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f. + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& reverse) + : m_argImpl(reverse.nestedExpression()), + m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1), + m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { + return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row, ReverseCol ? m_cols.value() - col - 1 : col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { + return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row, ReverseCol ? m_cols.value() - col - 1 : col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { + return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { + enum { + PacketSize = unpacket_traits::size, + OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, + OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1 + }; + typedef internal::reverse_packet_cond reverse_packet; + return reverse_packet::run(m_argImpl.template packet( + ReverseRow ? m_rows.value() - row - OffsetRow : row, ReverseCol ? m_cols.value() - col - OffsetCol : col)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index index) const { + enum { PacketSize = unpacket_traits::size }; + return preverse( + m_argImpl.template packet(m_rows.value() * m_cols.value() - index - PacketSize)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { + // FIXME we could factorize some code with packet(i,j) + enum { + PacketSize = unpacket_traits::size, + OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, + OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1 + }; + typedef internal::reverse_packet_cond reverse_packet; + m_argImpl.template writePacket(ReverseRow ? m_rows.value() - row - OffsetRow : row, + ReverseCol ? m_cols.value() - col - OffsetCol : col, + reverse_packet::run(x)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { + enum { PacketSize = unpacket_traits::size }; + m_argImpl.template writePacket(m_rows.value() * m_cols.value() - index - PacketSize, preverse(x)); + } + + protected: + evaluator m_argImpl; + + // If we do not reverse rows, then we do not need to know the number of rows; same for columns + // Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors. + const variable_if_dynamic m_rows; + const variable_if_dynamic m_cols; +}; + +// -------------------- Diagonal -------------------- + +template +struct evaluator> : evaluator_base> { + typedef Diagonal XprType; + + enum { + CoeffReadCost = evaluator::CoeffReadCost, + + Flags = + (unsigned int)(evaluator::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit, + + Alignment = 0 + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& diagonal) + : m_argImpl(diagonal.nestedExpression()), m_index(diagonal.index()) {} + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index) const { + return m_argImpl.coeff(row + rowOffset(), row + colOffset()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_argImpl.coeff(index + rowOffset(), index + colOffset()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index) { + return m_argImpl.coeffRef(row + rowOffset(), row + colOffset()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { + return m_argImpl.coeffRef(index + rowOffset(), index + colOffset()); + } + + protected: + evaluator m_argImpl; + const internal::variable_if_dynamicindex m_index; + + private: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rowOffset() const { + return m_index.value() > 0 ? 0 : -m_index.value(); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index colOffset() const { + return m_index.value() > 0 ? m_index.value() : 0; + } +}; + +//---------------------------------------------------------------------- +// deprecated code +//---------------------------------------------------------------------- + +// -------------------- EvalToTemp -------------------- + +// expression class for evaluating nested expression to a temporary + +template +class EvalToTemp; + +template +struct traits> : public traits {}; + +template +class EvalToTemp : public dense_xpr_base>::type { + public: + typedef typename dense_xpr_base::type Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp) + + explicit EvalToTemp(const ArgType& arg) : m_arg(arg) {} + + const ArgType& arg() const { return m_arg; } + + EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_arg.rows(); } + + EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_arg.cols(); } + + private: + const ArgType& m_arg; +}; + +template +struct evaluator> : public evaluator { + typedef EvalToTemp XprType; + typedef typename ArgType::PlainObject PlainObject; + typedef evaluator Base; + + EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : m_result(xpr.arg()) { + internal::construct_at(this, m_result); + } + + // This constructor is used when nesting an EvalTo evaluator in another evaluator + EIGEN_DEVICE_FUNC evaluator(const ArgType& arg) : m_result(arg) { internal::construct_at(this, m_result); } + + protected: + PlainObject m_result; +}; + +} // namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COREEVALUATORS_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CoreIterators.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CoreIterators.h new file mode 100644 index 0000000000000000000000000000000000000000..f62cf238e75da7894ef263b7d58962073aca1dd4 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CoreIterators.h @@ -0,0 +1,141 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COREITERATORS_H +#define EIGEN_COREITERATORS_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core + */ + +namespace internal { + +template +class inner_iterator_selector; + +} + +/** \class InnerIterator + * \brief An InnerIterator allows to loop over the element of any matrix expression. + * + * \warning To be used with care because an evaluator is constructed every time an InnerIterator iterator is + * constructed. + * + * TODO: add a usage example + */ +template +class InnerIterator { + protected: + typedef internal::inner_iterator_selector::Kind> IteratorType; + typedef internal::evaluator EvaluatorType; + typedef typename internal::traits::Scalar Scalar; + + public: + /** Construct an iterator over the \a outerId -th row or column of \a xpr */ + InnerIterator(const XprType &xpr, const Index &outerId) : m_eval(xpr), m_iter(m_eval, outerId, xpr.innerSize()) {} + + /// \returns the value of the current coefficient. + EIGEN_STRONG_INLINE Scalar value() const { return m_iter.value(); } + /** Increment the iterator \c *this to the next non-zero coefficient. + * Explicit zeros are not skipped over. To skip explicit zeros, see class SparseView + */ + EIGEN_STRONG_INLINE InnerIterator &operator++() { + m_iter.operator++(); + return *this; + } + EIGEN_STRONG_INLINE InnerIterator &operator+=(Index i) { + m_iter.operator+=(i); + return *this; + } + EIGEN_STRONG_INLINE InnerIterator operator+(Index i) { + InnerIterator result(*this); + result += i; + return result; + } + + /// \returns the column or row index of the current coefficient. + EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); } + /// \returns the row index of the current coefficient. + EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); } + /// \returns the column index of the current coefficient. + EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); } + /// \returns \c true if the iterator \c *this still references a valid coefficient. + EIGEN_STRONG_INLINE operator bool() const { return m_iter; } + + protected: + EvaluatorType m_eval; + IteratorType m_iter; + + private: + // If you get here, then you're not using the right InnerIterator type, e.g.: + // SparseMatrix A; + // SparseMatrix::InnerIterator it(A,0); + template + InnerIterator(const EigenBase &, Index outer); +}; + +namespace internal { + +// Generic inner iterator implementation for dense objects +template +class inner_iterator_selector { + protected: + typedef evaluator EvaluatorType; + typedef typename traits::Scalar Scalar; + enum { IsRowMajor = (XprType::Flags & RowMajorBit) == RowMajorBit }; + + public: + EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &innerSize) + : m_eval(eval), m_inner(0), m_outer(outerId), m_end(innerSize) {} + + EIGEN_STRONG_INLINE Scalar value() const { + return (IsRowMajor) ? m_eval.coeff(m_outer, m_inner) : m_eval.coeff(m_inner, m_outer); + } + + EIGEN_STRONG_INLINE inner_iterator_selector &operator++() { + m_inner++; + return *this; + } + + EIGEN_STRONG_INLINE Index index() const { return m_inner; } + inline Index row() const { return IsRowMajor ? m_outer : index(); } + inline Index col() const { return IsRowMajor ? index() : m_outer; } + + EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner >= 0; } + + protected: + const EvaluatorType &m_eval; + Index m_inner; + const Index m_outer; + const Index m_end; +}; + +// For iterator-based evaluator, inner-iterator is already implemented as +// evaluator<>::InnerIterator +template +class inner_iterator_selector : public evaluator::InnerIterator { + protected: + typedef typename evaluator::InnerIterator Base; + typedef evaluator EvaluatorType; + + public: + EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, + const Index & /*innerSize*/) + : Base(eval, outerId) {} +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COREITERATORS_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CwiseBinaryOp.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CwiseBinaryOp.h new file mode 100644 index 0000000000000000000000000000000000000000..aa79b60813d0e00672979f6e824bf261eb1f835d --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CwiseBinaryOp.h @@ -0,0 +1,166 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CWISE_BINARY_OP_H +#define EIGEN_CWISE_BINARY_OP_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { +template +struct traits> { + // we must not inherit from traits since it has + // the potential to cause problems with MSVC + typedef remove_all_t Ancestor; + typedef typename traits::XprKind XprKind; + enum { + RowsAtCompileTime = traits::RowsAtCompileTime, + ColsAtCompileTime = traits::ColsAtCompileTime, + MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = traits::MaxColsAtCompileTime + }; + + // even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor), + // we still want to handle the case when the result type is different. + typedef typename result_of::type Scalar; + typedef typename cwise_promote_storage_type::StorageKind, typename traits::StorageKind, + BinaryOp>::ret StorageKind; + typedef typename promote_index_type::StorageIndex, typename traits::StorageIndex>::type + StorageIndex; + typedef typename Lhs::Nested LhsNested; + typedef typename Rhs::Nested RhsNested; + typedef std::remove_reference_t LhsNested_; + typedef std::remove_reference_t RhsNested_; + enum { + Flags = cwise_promote_storage_order::StorageKind, typename traits::StorageKind, + LhsNested_::Flags & RowMajorBit, RhsNested_::Flags & RowMajorBit>::value + }; +}; +} // end namespace internal + +template +class CwiseBinaryOpImpl; + +/** \class CwiseBinaryOp + * \ingroup Core_Module + * + * \brief Generic expression where a coefficient-wise binary operator is applied to two expressions + * + * \tparam BinaryOp template functor implementing the operator + * \tparam LhsType the type of the left-hand side + * \tparam RhsType the type of the right-hand side + * + * This class represents an expression where a coefficient-wise binary operator is applied to two expressions. + * It is the return type of binary operators, by which we mean only those binary operators where + * both the left-hand side and the right-hand side are Eigen expressions. + * For example, the return type of matrix1+matrix2 is a CwiseBinaryOp. + * + * Most of the time, this is the only way that it is used, so you typically don't have to name + * CwiseBinaryOp types explicitly. + * + * \sa MatrixBase::binaryExpr(const MatrixBase &,const CustomBinaryOp &) const, class CwiseUnaryOp, class + * CwiseNullaryOp + */ +template +class CwiseBinaryOp : public CwiseBinaryOpImpl::StorageKind, + typename internal::traits::StorageKind, BinaryOp>::ret>, + internal::no_assignment_operator { + public: + typedef internal::remove_all_t Functor; + typedef internal::remove_all_t Lhs; + typedef internal::remove_all_t Rhs; + + typedef typename CwiseBinaryOpImpl< + BinaryOp, LhsType, RhsType, + typename internal::cwise_promote_storage_type::StorageKind, + typename internal::traits::StorageKind, BinaryOp>::ret>::Base + Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp) + + EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp, typename Lhs::Scalar, typename Rhs::Scalar) + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs) + + typedef typename internal::ref_selector::type LhsNested; + typedef typename internal::ref_selector::type RhsNested; + typedef std::remove_reference_t LhsNested_; + typedef std::remove_reference_t RhsNested_; + +#if EIGEN_COMP_MSVC + // Required for Visual Studio or the Copy constructor will probably not get inlined! + EIGEN_STRONG_INLINE CwiseBinaryOp(const CwiseBinaryOp&) = default; +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& aLhs, const Rhs& aRhs, + const BinaryOp& func = BinaryOp()) + : m_lhs(aLhs), m_rhs(aRhs), m_functor(func) { + eigen_assert(aLhs.rows() == aRhs.rows() && aLhs.cols() == aRhs.cols()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { + // return the fixed size type if available to enable compile time optimizations + return internal::traits>::RowsAtCompileTime == Dynamic ? m_rhs.rows() + : m_lhs.rows(); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { + // return the fixed size type if available to enable compile time optimizations + return internal::traits>::ColsAtCompileTime == Dynamic ? m_rhs.cols() + : m_lhs.cols(); + } + + /** \returns the left hand side nested expression */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const LhsNested_& lhs() const { return m_lhs; } + /** \returns the right hand side nested expression */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const RhsNested_& rhs() const { return m_rhs; } + /** \returns the functor representing the binary operation */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const BinaryOp& functor() const { return m_functor; } + + protected: + LhsNested m_lhs; + RhsNested m_rhs; + const BinaryOp m_functor; +}; + +// Generic API dispatcher +template +class CwiseBinaryOpImpl : public internal::generic_xpr_base>::type { + public: + typedef typename internal::generic_xpr_base>::type Base; +}; + +/** replaces \c *this by \c *this - \a other. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::operator-=(const MatrixBase& other) { + call_assignment(derived(), other.derived(), internal::sub_assign_op()); + return derived(); +} + +/** replaces \c *this by \c *this + \a other. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::operator+=(const MatrixBase& other) { + call_assignment(derived(), other.derived(), internal::add_assign_op()); + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_CWISE_BINARY_OP_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CwiseNullaryOp.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CwiseNullaryOp.h new file mode 100644 index 0000000000000000000000000000000000000000..aa139bf3dd3e1d20c773adde327e6ccc6a355cf9 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CwiseNullaryOp.h @@ -0,0 +1,977 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CWISE_NULLARY_OP_H +#define EIGEN_CWISE_NULLARY_OP_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { +template +struct traits > : traits { + enum { Flags = traits::Flags & RowMajorBit }; +}; + +} // namespace internal + +/** \class CwiseNullaryOp + * \ingroup Core_Module + * + * \brief Generic expression of a matrix where all coefficients are defined by a functor + * + * \tparam NullaryOp template functor implementing the operator + * \tparam PlainObjectType the underlying plain matrix/array type + * + * This class represents an expression of a generic nullary operator. + * It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() methods, + * and most of the time this is the only way it is used. + * + * However, if you want to write a function returning such an expression, you + * will need to use this class. + * + * The functor NullaryOp must expose one of the following method: + + +
\c operator()() if the procedural generation does not depend on the coefficient entries + (e.g., random numbers)
\c operator()(Index i)if the procedural generation makes + sense for vectors only and that it depends on the coefficient index \c i (e.g., linspace)
\c + operator()(Index i,Index j)if the procedural generation depends on the matrix coordinates \c i, \c j (e.g., + to generate a checkerboard with 0 and 1)
+ * It is also possible to expose the last two operators if the generation makes sense for matrices but can be optimized + for vectors. + * + * See DenseBase::NullaryExpr(Index,const CustomNullaryOp&) for an example binding + * C++11 random number generators. + * + * A nullary expression can also be used to implement custom sophisticated matrix manipulations + * that cannot be covered by the existing set of natively supported matrix manipulations. + * See this \ref TopicCustomizing_NullaryExpr "page" for some examples and additional explanations + * on the behavior of CwiseNullaryOp. + * + * \sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr + */ +template +class CwiseNullaryOp : public internal::dense_xpr_base >::type, + internal::no_assignment_operator { + public: + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp) + + EIGEN_DEVICE_FUNC CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp()) + : m_rows(rows), m_cols(cols), m_functor(func) { + eigen_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) && cols >= 0 && + (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); + } + EIGEN_DEVICE_FUNC CwiseNullaryOp(Index size, const NullaryOp& func = NullaryOp()) + : CwiseNullaryOp(RowsAtCompileTime == 1 ? 1 : size, RowsAtCompileTime == 1 ? size : 1, func) { + EIGEN_STATIC_ASSERT(CwiseNullaryOp::IsVectorAtCompileTime, YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const { return m_rows.value(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const { return m_cols.value(); } + + /** \returns the functor representing the nullary operation */ + EIGEN_DEVICE_FUNC const NullaryOp& functor() const { return m_functor; } + + protected: + const internal::variable_if_dynamic m_rows; + const internal::variable_if_dynamic m_cols; + const NullaryOp m_functor; +}; + +/** \returns an expression of a matrix defined by a custom functor \a func + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used + * instead. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +#ifndef EIGEN_PARSED_BY_DOXYGEN + const CwiseNullaryOp::PlainObject> +#else + const CwiseNullaryOp +#endif + DenseBase::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func) { + return CwiseNullaryOp(rows, cols, func); +} + +/** \returns an expression of a matrix defined by a custom functor \a func + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Zero() should be used + * instead. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * Here is an example with C++11 random generators: \include random_cpp11.cpp + * Output: \verbinclude random_cpp11.out + * + * \sa class CwiseNullaryOp + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +#ifndef EIGEN_PARSED_BY_DOXYGEN + const CwiseNullaryOp::PlainObject> +#else + const CwiseNullaryOp +#endif + DenseBase::NullaryExpr(Index size, const CustomNullaryOp& func) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + if (RowsAtCompileTime == 1) + return CwiseNullaryOp(1, size, func); + else + return CwiseNullaryOp(size, 1, func); +} + +/** \returns an expression of a matrix defined by a custom functor \a func + * + * This variant is only for fixed-size DenseBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +#ifndef EIGEN_PARSED_BY_DOXYGEN + const CwiseNullaryOp::PlainObject> +#else + const CwiseNullaryOp +#endif + DenseBase::NullaryExpr(const CustomNullaryOp& func) { + return CwiseNullaryOp(RowsAtCompileTime, ColsAtCompileTime, func); +} + +/** \returns an expression of a constant matrix of value \a value + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this DenseBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used + * instead. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Constant(Index rows, Index cols, const Scalar& value) { + return DenseBase::NullaryExpr(rows, cols, internal::scalar_constant_op(value)); +} + +/** \returns an expression of a constant matrix of value \a value + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this DenseBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Zero() should be used + * instead. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Constant(Index size, const Scalar& value) { + return DenseBase::NullaryExpr(size, internal::scalar_constant_op(value)); +} + +/** \returns an expression of a constant matrix of value \a value + * + * This variant is only for fixed-size DenseBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Constant(const Scalar& value) { + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + return DenseBase::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, + internal::scalar_constant_op(value)); +} + +/** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(Index,const Scalar&,const Scalar&) + * + * \only_for_vectors + * + * Example: \include DenseBase_LinSpaced_seq_deprecated.cpp + * Output: \verbinclude DenseBase_LinSpaced_seq_deprecated.out + * + * \sa LinSpaced(Index,const Scalar&, const Scalar&), setLinSpaced(Index,const Scalar&,const Scalar&) + */ +template +EIGEN_DEPRECATED EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase< + Derived>::RandomAccessLinSpacedReturnType +DenseBase::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return DenseBase::NullaryExpr(size, internal::linspaced_op(low, high, size)); +} + +/** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(const Scalar&,const Scalar&) + * + * \sa LinSpaced(const Scalar&, const Scalar&) + */ +template +EIGEN_DEPRECATED EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase< + Derived>::RandomAccessLinSpacedReturnType +DenseBase::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + return DenseBase::NullaryExpr(Derived::SizeAtCompileTime, + internal::linspaced_op(low, high, Derived::SizeAtCompileTime)); +} + +/** + * \brief Sets a linearly spaced vector. + * + * The function generates 'size' equally spaced values in the closed interval [low,high]. + * When size is set to 1, a vector of length 1 containing 'high' is returned. + * + * \only_for_vectors + * + * Example: \include DenseBase_LinSpaced.cpp + * Output: \verbinclude DenseBase_LinSpaced.out + * + * For integer scalar types, an even spacing is possible if and only if the length of the range, + * i.e., \c high-low is a scalar multiple of \c size-1, or if \c size is a scalar multiple of the + * number of values \c high-low+1 (meaning each value can be repeated the same number of time). + * If one of these two considions is not satisfied, then \c high is lowered to the largest value + * satisfying one of this constraint. + * Here are some examples: + * + * Example: \include DenseBase_LinSpacedInt.cpp + * Output: \verbinclude DenseBase_LinSpacedInt.out + * + * \sa setLinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessLinSpacedReturnType +DenseBase::LinSpaced(Index size, const Scalar& low, const Scalar& high) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return DenseBase::NullaryExpr(size, internal::linspaced_op(low, high, size)); +} + +/** + * \copydoc DenseBase::LinSpaced(Index, const Scalar&, const Scalar&) + * Special version for fixed size types which does not require the size parameter. + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessLinSpacedReturnType +DenseBase::LinSpaced(const Scalar& low, const Scalar& high) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + return DenseBase::NullaryExpr(Derived::SizeAtCompileTime, + internal::linspaced_op(low, high, Derived::SizeAtCompileTime)); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessEqualSpacedReturnType +DenseBase::EqualSpaced(Index size, const Scalar& low, const Scalar& step) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return DenseBase::NullaryExpr(size, internal::equalspaced_op(low, step)); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessEqualSpacedReturnType +DenseBase::EqualSpaced(const Scalar& low, const Scalar& step) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return DenseBase::NullaryExpr(Derived::SizeAtCompileTime, internal::equalspaced_op(low, step)); +} + +/** \returns true if all coefficients in this matrix are approximately equal to \a val, to within precision \a prec */ +template +EIGEN_DEVICE_FUNC bool DenseBase::isApproxToConstant(const Scalar& val, const RealScalar& prec) const { + typename internal::nested_eval::type self(derived()); + for (Index j = 0; j < cols(); ++j) + for (Index i = 0; i < rows(); ++i) + if (!internal::isApprox(self.coeff(i, j), val, prec)) return false; + return true; +} + +/** This is just an alias for isApproxToConstant(). + * + * \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */ +template +EIGEN_DEVICE_FUNC bool DenseBase::isConstant(const Scalar& val, const RealScalar& prec) const { + return isApproxToConstant(val, prec); +} + +/** Alias for setConstant(): sets all coefficients in this expression to \a val. + * + * \sa setConstant(), Constant(), class CwiseNullaryOp + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void DenseBase::fill(const Scalar& val) { + setConstant(val); +} + +/** Sets all coefficients in this expression to value \a val. + * + * \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), + * Constant(), class CwiseNullaryOp, setZero(), setOnes() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setConstant(const Scalar& val) { + internal::eigen_fill_impl::run(derived(), val); + return derived(); +} + +/** Resizes to the given \a size, and sets all coefficients in this expression to the given value \a val. + * + * \only_for_vectors + * + * Example: \include Matrix_setConstant_int.cpp + * Output: \verbinclude Matrix_setConstant_int.out + * + * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, + * MatrixBase::Constant(const Scalar&) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setConstant(Index size, const Scalar& val) { + resize(size); + return setConstant(val); +} + +/** Resizes to the given size, and sets all coefficients in this expression to the given value \a val. + * + * \param rows the new number of rows + * \param cols the new number of columns + * \param val the value to which all coefficients are set + * + * Example: \include Matrix_setConstant_int_int.cpp + * Output: \verbinclude Matrix_setConstant_int_int.out + * + * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, + * MatrixBase::Constant(const Scalar&) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setConstant(Index rows, Index cols, + const Scalar& val) { + resize(rows, cols); + return setConstant(val); +} + +/** Resizes to the given size, changing only the number of columns, and sets all + * coefficients in this expression to the given value \a val. For the parameter + * of type NoChange_t, just pass the special value \c NoChange. + * + * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, + * MatrixBase::Constant(const Scalar&) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setConstant(NoChange_t, Index cols, + const Scalar& val) { + return setConstant(rows(), cols, val); +} + +/** Resizes to the given size, changing only the number of rows, and sets all + * coefficients in this expression to the given value \a val. For the parameter + * of type NoChange_t, just pass the special value \c NoChange. + * + * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, + * MatrixBase::Constant(const Scalar&) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setConstant(Index rows, NoChange_t, + const Scalar& val) { + return setConstant(rows, cols(), val); +} + +/** + * \brief Sets a linearly spaced vector. + * + * The function generates 'size' equally spaced values in the closed interval [low,high]. + * When size is set to 1, a vector of length 1 containing 'high' is returned. + * + * \only_for_vectors + * + * Example: \include DenseBase_setLinSpaced.cpp + * Output: \verbinclude DenseBase_setLinSpaced.out + * + * For integer scalar types, do not miss the explanations on the definition + * of \link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \endlink. + * + * \sa LinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(Index newSize, const Scalar& low, + const Scalar& high) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op(low, high, newSize)); +} + +/** + * \brief Sets a linearly spaced vector. + * + * The function fills \c *this with equally spaced values in the closed interval [low,high]. + * When size is set to 1, a vector of length 1 containing 'high' is returned. + * + * \only_for_vectors + * + * For integer scalar types, do not miss the explanations on the definition + * of \link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \endlink. + * + * \sa LinSpaced(Index,const Scalar&,const Scalar&), setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(const Scalar& low, const Scalar& high) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return setLinSpaced(size(), low, high); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setEqualSpaced(Index newSize, const Scalar& low, + const Scalar& step) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return derived() = Derived::NullaryExpr(newSize, internal::equalspaced_op(low, step)); +} +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setEqualSpaced(const Scalar& low, + const Scalar& step) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return setEqualSpaced(size(), low, step); +} + +// zero: + +/** \returns an expression of a zero matrix. + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used + * instead. + * + * Example: \include MatrixBase_zero_int_int.cpp + * Output: \verbinclude MatrixBase_zero_int_int.out + * + * \sa Zero(), Zero(Index) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ZeroReturnType DenseBase::Zero( + Index rows, Index cols) { + return ZeroReturnType(rows, cols); +} + +/** \returns an expression of a zero vector. + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Zero() should be used + * instead. + * + * Example: \include MatrixBase_zero_int.cpp + * Output: \verbinclude MatrixBase_zero_int.out + * + * \sa Zero(), Zero(Index,Index) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ZeroReturnType DenseBase::Zero( + Index size) { + return ZeroReturnType(size); +} + +/** \returns an expression of a fixed-size zero matrix or vector. + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * Example: \include MatrixBase_zero.cpp + * Output: \verbinclude MatrixBase_zero.out + * + * \sa Zero(Index), Zero(Index,Index) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ZeroReturnType DenseBase::Zero() { + return ZeroReturnType(RowsAtCompileTime, ColsAtCompileTime); +} + +/** \returns true if *this is approximately equal to the zero matrix, + * within the precision given by \a prec. + * + * Example: \include MatrixBase_isZero.cpp + * Output: \verbinclude MatrixBase_isZero.out + * + * \sa class CwiseNullaryOp, Zero() + */ +template +EIGEN_DEVICE_FUNC bool DenseBase::isZero(const RealScalar& prec) const { + typename internal::nested_eval::type self(derived()); + for (Index j = 0; j < cols(); ++j) + for (Index i = 0; i < rows(); ++i) + if (!internal::isMuchSmallerThan(self.coeff(i, j), static_cast(1), prec)) return false; + return true; +} + +/** Sets all coefficients in this expression to zero. + * + * Example: \include MatrixBase_setZero.cpp + * Output: \verbinclude MatrixBase_setZero.out + * + * \sa class CwiseNullaryOp, Zero() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setZero() { + internal::eigen_zero_impl::run(derived()); + return derived(); +} + +/** Resizes to the given \a size, and sets all coefficients in this expression to zero. + * + * \only_for_vectors + * + * Example: \include Matrix_setZero_int.cpp + * Output: \verbinclude Matrix_setZero_int.out + * + * \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setZero(Index newSize) { + resize(newSize); + return setZero(); +} + +/** Resizes to the given size, and sets all coefficients in this expression to zero. + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setZero_int_int.cpp + * Output: \verbinclude Matrix_setZero_int_int.out + * + * \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setZero(Index rows, Index cols) { + resize(rows, cols); + return setZero(); +} + +/** Resizes to the given size, changing only the number of columns, and sets all + * coefficients in this expression to zero. For the parameter of type NoChange_t, + * just pass the special value \c NoChange. + * + * \sa DenseBase::setZero(), setZero(Index), setZero(Index, Index), setZero(Index, NoChange_t), class CwiseNullaryOp, + * DenseBase::Zero() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setZero(NoChange_t, Index cols) { + return setZero(rows(), cols); +} + +/** Resizes to the given size, changing only the number of rows, and sets all + * coefficients in this expression to zero. For the parameter of type NoChange_t, + * just pass the special value \c NoChange. + * + * \sa DenseBase::setZero(), setZero(Index), setZero(Index, Index), setZero(NoChange_t, Index), class CwiseNullaryOp, + * DenseBase::Zero() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setZero(Index rows, NoChange_t) { + return setZero(rows, cols()); +} + +// ones: + +/** \returns an expression of a matrix where all coefficients equal one. + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Ones() should be used + * instead. + * + * Example: \include MatrixBase_ones_int_int.cpp + * Output: \verbinclude MatrixBase_ones_int_int.out + * + * \sa Ones(), Ones(Index), isOnes(), class Ones + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Ones( + Index rows, Index cols) { + return Constant(rows, cols, Scalar(1)); +} + +/** \returns an expression of a vector where all coefficients equal one. + * + * The parameter \a newSize is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Ones() should be used + * instead. + * + * Example: \include MatrixBase_ones_int.cpp + * Output: \verbinclude MatrixBase_ones_int.out + * + * \sa Ones(), Ones(Index,Index), isOnes(), class Ones + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Ones( + Index newSize) { + return Constant(newSize, Scalar(1)); +} + +/** \returns an expression of a fixed-size matrix or vector where all coefficients equal one. + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * Example: \include MatrixBase_ones.cpp + * Output: \verbinclude MatrixBase_ones.out + * + * \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Ones() { + return Constant(Scalar(1)); +} + +/** \returns true if *this is approximately equal to the matrix where all coefficients + * are equal to 1, within the precision given by \a prec. + * + * Example: \include MatrixBase_isOnes.cpp + * Output: \verbinclude MatrixBase_isOnes.out + * + * \sa class CwiseNullaryOp, Ones() + */ +template +EIGEN_DEVICE_FUNC bool DenseBase::isOnes(const RealScalar& prec) const { + return isApproxToConstant(Scalar(1), prec); +} + +/** Sets all coefficients in this expression to one. + * + * Example: \include MatrixBase_setOnes.cpp + * Output: \verbinclude MatrixBase_setOnes.out + * + * \sa class CwiseNullaryOp, Ones() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setOnes() { + return setConstant(Scalar(1)); +} + +/** Resizes to the given \a newSize, and sets all coefficients in this expression to one. + * + * \only_for_vectors + * + * Example: \include Matrix_setOnes_int.cpp + * Output: \verbinclude Matrix_setOnes_int.out + * + * \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setOnes(Index newSize) { + resize(newSize); + return setConstant(Scalar(1)); +} + +/** Resizes to the given size, and sets all coefficients in this expression to one. + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setOnes_int_int.cpp + * Output: \verbinclude Matrix_setOnes_int_int.out + * + * \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setOnes(Index rows, Index cols) { + resize(rows, cols); + return setConstant(Scalar(1)); +} + +/** Resizes to the given size, changing only the number of rows, and sets all + * coefficients in this expression to one. For the parameter of type NoChange_t, + * just pass the special value \c NoChange. + * + * \sa MatrixBase::setOnes(), setOnes(Index), setOnes(Index, Index), setOnes(NoChange_t, Index), class CwiseNullaryOp, + * MatrixBase::Ones() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setOnes(Index rows, NoChange_t) { + return setOnes(rows, cols()); +} + +/** Resizes to the given size, changing only the number of columns, and sets all + * coefficients in this expression to one. For the parameter of type NoChange_t, + * just pass the special value \c NoChange. + * + * \sa MatrixBase::setOnes(), setOnes(Index), setOnes(Index, Index), setOnes(Index, NoChange_t) class CwiseNullaryOp, + * MatrixBase::Ones() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setOnes(NoChange_t, Index cols) { + return setOnes(rows(), cols); +} + +// Identity: + +/** \returns an expression of the identity matrix (not necessarily square). + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Identity() should be used + * instead. + * + * Example: \include MatrixBase_identity_int_int.cpp + * Output: \verbinclude MatrixBase_identity_int_int.out + * + * \sa Identity(), setIdentity(), isIdentity() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType +MatrixBase::Identity(Index rows, Index cols) { + return DenseBase::NullaryExpr(rows, cols, internal::scalar_identity_op()); +} + +/** \returns an expression of the identity matrix (not necessarily square). + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variant taking size arguments. + * + * Example: \include MatrixBase_identity.cpp + * Output: \verbinclude MatrixBase_identity.out + * + * \sa Identity(Index,Index), setIdentity(), isIdentity() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType +MatrixBase::Identity() { + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + return MatrixBase::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_identity_op()); +} + +/** \returns true if *this is approximately equal to the identity matrix + * (not necessarily square), + * within the precision given by \a prec. + * + * Example: \include MatrixBase_isIdentity.cpp + * Output: \verbinclude MatrixBase_isIdentity.out + * + * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity() + */ +template +bool MatrixBase::isIdentity(const RealScalar& prec) const { + typename internal::nested_eval::type self(derived()); + for (Index j = 0; j < cols(); ++j) { + for (Index i = 0; i < rows(); ++i) { + if (i == j) { + if (!internal::isApprox(self.coeff(i, j), static_cast(1), prec)) return false; + } else { + if (!internal::isMuchSmallerThan(self.coeff(i, j), static_cast(1), prec)) return false; + } + } + } + return true; +} + +namespace internal { + +template = 16)> +struct setIdentity_impl { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Derived& run(Derived& m) { + return m = Derived::Identity(m.rows(), m.cols()); + } +}; + +template +struct setIdentity_impl { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Derived& run(Derived& m) { + m.setZero(); + const Index size = numext::mini(m.rows(), m.cols()); + for (Index i = 0; i < size; ++i) m.coeffRef(i, i) = typename Derived::Scalar(1); + return m; + } +}; + +} // end namespace internal + +/** Writes the identity expression (not necessarily square) into *this. + * + * Example: \include MatrixBase_setIdentity.cpp + * Output: \verbinclude MatrixBase_setIdentity.out + * + * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity() { + return internal::setIdentity_impl::run(derived()); +} + +/** \brief Resizes to the given size, and writes the identity expression (not necessarily square) into *this. + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setIdentity_int_int.cpp + * Output: \verbinclude Matrix_setIdentity_int_int.out + * + * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity(Index rows, Index cols) { + derived().resize(rows, cols); + return setIdentity(); +} + +/** \returns an expression of the i-th unit (basis) vector. + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit( + Index newSize, Index i) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return BasisReturnType(SquareMatrixType::Identity(newSize, newSize), i); +} + +/** \returns an expression of the i-th unit (basis) vector. + * + * \only_for_vectors + * + * This variant is for fixed-size vector only. + * + * \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit( + Index i) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return BasisReturnType(SquareMatrixType::Identity(), i); +} + +/** \returns an expression of the X axis unit vector (1{,0}^*) + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), + * MatrixBase::UnitW() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitX() { + return Derived::Unit(0); +} + +/** \returns an expression of the Y axis unit vector (0,1{,0}^*) + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), + * MatrixBase::UnitW() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitY() { + return Derived::Unit(1); +} + +/** \returns an expression of the Z axis unit vector (0,0,1{,0}^*) + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), + * MatrixBase::UnitW() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitZ() { + return Derived::Unit(2); +} + +/** \returns an expression of the W axis unit vector (0,0,0,1) + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), + * MatrixBase::UnitW() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitW() { + return Derived::Unit(3); +} + +/** \brief Set the coefficients of \c *this to the i-th unit (basis) vector + * + * \param i index of the unique coefficient to be set to 1 + * + * \only_for_vectors + * + * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Unit(Index,Index) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::setUnit(Index i) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); + eigen_assert(i < size()); + derived().setZero(); + derived().coeffRef(i) = Scalar(1); + return derived(); +} + +/** \brief Resizes to the given \a newSize, and writes the i-th unit (basis) vector into *this. + * + * \param newSize the new size of the vector + * \param i index of the unique coefficient to be set to 1 + * + * \only_for_vectors + * + * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Unit(Index,Index) + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::setUnit(Index newSize, Index i) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); + eigen_assert(i < newSize); + derived().resize(newSize); + return setUnit(i); +} + +} // end namespace Eigen + +#endif // EIGEN_CWISE_NULLARY_OP_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CwiseTernaryOp.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CwiseTernaryOp.h new file mode 100644 index 0000000000000000000000000000000000000000..9bb0d4075c8374704b36ea5b18f81b0ebfd9e9dc --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CwiseTernaryOp.h @@ -0,0 +1,171 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2016 Eugene Brevdo +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CWISE_TERNARY_OP_H +#define EIGEN_CWISE_TERNARY_OP_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { +template +struct traits> { + // we must not inherit from traits since it has + // the potential to cause problems with MSVC + typedef remove_all_t Ancestor; + typedef typename traits::XprKind XprKind; + enum { + RowsAtCompileTime = traits::RowsAtCompileTime, + ColsAtCompileTime = traits::ColsAtCompileTime, + MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = traits::MaxColsAtCompileTime + }; + + // even though we require Arg1, Arg2, and Arg3 to have the same scalar type + // (see CwiseTernaryOp constructor), + // we still want to handle the case when the result type is different. + typedef typename result_of::type Scalar; + + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::StorageIndex StorageIndex; + + typedef typename Arg1::Nested Arg1Nested; + typedef typename Arg2::Nested Arg2Nested; + typedef typename Arg3::Nested Arg3Nested; + typedef std::remove_reference_t Arg1Nested_; + typedef std::remove_reference_t Arg2Nested_; + typedef std::remove_reference_t Arg3Nested_; + enum { Flags = Arg1Nested_::Flags & RowMajorBit }; +}; +} // end namespace internal + +template +class CwiseTernaryOpImpl; + +/** \class CwiseTernaryOp + * \ingroup Core_Module + * + * \brief Generic expression where a coefficient-wise ternary operator is + * applied to two expressions + * + * \tparam TernaryOp template functor implementing the operator + * \tparam Arg1Type the type of the first argument + * \tparam Arg2Type the type of the second argument + * \tparam Arg3Type the type of the third argument + * + * This class represents an expression where a coefficient-wise ternary + * operator is applied to three expressions. + * It is the return type of ternary operators, by which we mean only those + * ternary operators where + * all three arguments are Eigen expressions. + * For example, the return type of betainc(matrix1, matrix2, matrix3) is a + * CwiseTernaryOp. + * + * Most of the time, this is the only way that it is used, so you typically + * don't have to name + * CwiseTernaryOp types explicitly. + * + * \sa MatrixBase::ternaryExpr(const MatrixBase &, const + * MatrixBase &, const CustomTernaryOp &) const, class CwiseBinaryOp, + * class CwiseUnaryOp, class CwiseNullaryOp + */ +template +class CwiseTernaryOp : public CwiseTernaryOpImpl::StorageKind>, + internal::no_assignment_operator { + public: + typedef internal::remove_all_t Arg1; + typedef internal::remove_all_t Arg2; + typedef internal::remove_all_t Arg3; + + // require the sizes to match + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg2) + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg3) + + // The index types should match + EIGEN_STATIC_ASSERT((internal::is_same::StorageKind, + typename internal::traits::StorageKind>::value), + STORAGE_KIND_MUST_MATCH) + EIGEN_STATIC_ASSERT((internal::is_same::StorageKind, + typename internal::traits::StorageKind>::value), + STORAGE_KIND_MUST_MATCH) + + typedef typename CwiseTernaryOpImpl::StorageKind>::Base Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseTernaryOp) + + typedef typename internal::ref_selector::type Arg1Nested; + typedef typename internal::ref_selector::type Arg2Nested; + typedef typename internal::ref_selector::type Arg3Nested; + typedef std::remove_reference_t Arg1Nested_; + typedef std::remove_reference_t Arg2Nested_; + typedef std::remove_reference_t Arg3Nested_; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CwiseTernaryOp(const Arg1& a1, const Arg2& a2, const Arg3& a3, + const TernaryOp& func = TernaryOp()) + : m_arg1(a1), m_arg2(a2), m_arg3(a3), m_functor(func) { + eigen_assert(a1.rows() == a2.rows() && a1.cols() == a2.cols() && a1.rows() == a3.rows() && a1.cols() == a3.cols()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const { + // return the fixed size type if available to enable compile time + // optimizations + if (internal::traits>::RowsAtCompileTime == Dynamic && + internal::traits>::RowsAtCompileTime == Dynamic) + return m_arg3.rows(); + else if (internal::traits>::RowsAtCompileTime == Dynamic && + internal::traits>::RowsAtCompileTime == Dynamic) + return m_arg2.rows(); + else + return m_arg1.rows(); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const { + // return the fixed size type if available to enable compile time + // optimizations + if (internal::traits>::ColsAtCompileTime == Dynamic && + internal::traits>::ColsAtCompileTime == Dynamic) + return m_arg3.cols(); + else if (internal::traits>::ColsAtCompileTime == Dynamic && + internal::traits>::ColsAtCompileTime == Dynamic) + return m_arg2.cols(); + else + return m_arg1.cols(); + } + + /** \returns the first argument nested expression */ + EIGEN_DEVICE_FUNC const Arg1Nested_& arg1() const { return m_arg1; } + /** \returns the first argument nested expression */ + EIGEN_DEVICE_FUNC const Arg2Nested_& arg2() const { return m_arg2; } + /** \returns the third argument nested expression */ + EIGEN_DEVICE_FUNC const Arg3Nested_& arg3() const { return m_arg3; } + /** \returns the functor representing the ternary operation */ + EIGEN_DEVICE_FUNC const TernaryOp& functor() const { return m_functor; } + + protected: + Arg1Nested m_arg1; + Arg2Nested m_arg2; + Arg3Nested m_arg3; + const TernaryOp m_functor; +}; + +// Generic API dispatcher +template +class CwiseTernaryOpImpl : public internal::generic_xpr_base>::type { + public: + typedef typename internal::generic_xpr_base>::type Base; +}; + +} // end namespace Eigen + +#endif // EIGEN_CWISE_TERNARY_OP_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CwiseUnaryOp.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CwiseUnaryOp.h new file mode 100644 index 0000000000000000000000000000000000000000..42ed459a0f015dadc2fe2f0ce118916e5f4a4000 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CwiseUnaryOp.h @@ -0,0 +1,91 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CWISE_UNARY_OP_H +#define EIGEN_CWISE_UNARY_OP_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { +template +struct traits > : traits { + typedef typename result_of::type Scalar; + typedef typename XprType::Nested XprTypeNested; + typedef std::remove_reference_t XprTypeNested_; + enum { Flags = XprTypeNested_::Flags & RowMajorBit }; +}; +} // namespace internal + +template +class CwiseUnaryOpImpl; + +/** \class CwiseUnaryOp + * \ingroup Core_Module + * + * \brief Generic expression where a coefficient-wise unary operator is applied to an expression + * + * \tparam UnaryOp template functor implementing the operator + * \tparam XprType the type of the expression to which we are applying the unary operator + * + * This class represents an expression where a unary operator is applied to an expression. + * It is the return type of all operations taking exactly 1 input expression, regardless of the + * presence of other inputs such as scalars. For example, the operator* in the expression 3*matrix + * is considered unary, because only the right-hand side is an expression, and its + * return type is a specialization of CwiseUnaryOp. + * + * Most of the time, this is the only way that it is used, so you typically don't have to name + * CwiseUnaryOp types explicitly. + * + * \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp + */ +template +class CwiseUnaryOp : public CwiseUnaryOpImpl::StorageKind>, + internal::no_assignment_operator { + public: + typedef typename CwiseUnaryOpImpl::StorageKind>::Base Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp) + typedef typename internal::ref_selector::type XprTypeNested; + typedef internal::remove_all_t NestedExpression; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp()) + : m_xpr(xpr), m_functor(func) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_xpr.rows(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_xpr.cols(); } + + /** \returns the functor representing the unary operation */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const UnaryOp& functor() const { return m_functor; } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const internal::remove_all_t& nestedExpression() const { + return m_xpr; + } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE internal::remove_all_t& nestedExpression() { return m_xpr; } + + protected: + XprTypeNested m_xpr; + const UnaryOp m_functor; +}; + +// Generic API dispatcher +template +class CwiseUnaryOpImpl : public internal::generic_xpr_base >::type { + public: + typedef typename internal::generic_xpr_base >::type Base; +}; + +} // end namespace Eigen + +#endif // EIGEN_CWISE_UNARY_OP_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CwiseUnaryView.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CwiseUnaryView.h new file mode 100644 index 0000000000000000000000000000000000000000..49b14101115e83df86b35e7ba15321d5bcd9cad7 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/CwiseUnaryView.h @@ -0,0 +1,167 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CWISE_UNARY_VIEW_H +#define EIGEN_CWISE_UNARY_VIEW_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { +template +struct traits > : traits { + typedef typename result_of::Scalar&)>::type1 ScalarRef; + static_assert(std::is_reference::value, "Views must return a reference type."); + typedef remove_all_t Scalar; + typedef typename MatrixType::Nested MatrixTypeNested; + typedef remove_all_t MatrixTypeNested_; + enum { + FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, + Flags = + traits::Flags & + (RowMajorBit | FlagsLvalueBit | DirectAccessBit), // FIXME DirectAccessBit should not be handled by expressions + MatrixTypeInnerStride = inner_stride_at_compile_time::ret, + // need to cast the sizeof's from size_t to int explicitly, otherwise: + // "error: no integral type can represent all of the enumerator values + InnerStrideAtCompileTime = + StrideType::InnerStrideAtCompileTime == 0 + ? (MatrixTypeInnerStride == Dynamic + ? int(Dynamic) + : int(MatrixTypeInnerStride) * int(sizeof(typename traits::Scalar) / sizeof(Scalar))) + : int(StrideType::InnerStrideAtCompileTime), + + OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0 + ? (outer_stride_at_compile_time::ret == Dynamic + ? int(Dynamic) + : outer_stride_at_compile_time::ret * + int(sizeof(typename traits::Scalar) / sizeof(Scalar))) + : int(StrideType::OuterStrideAtCompileTime) + }; +}; + +// Generic API dispatcher +template ::value> +class CwiseUnaryViewImpl : public generic_xpr_base >::type { + public: + typedef typename generic_xpr_base >::type Base; +}; + +template +class CwiseUnaryViewImpl + : public dense_xpr_base >::type { + public: + typedef CwiseUnaryView Derived; + typedef typename dense_xpr_base >::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Derived) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryViewImpl) + + EIGEN_DEVICE_FUNC inline const Scalar* data() const { return &(this->coeffRef(0)); } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const { + return StrideType::InnerStrideAtCompileTime != 0 ? int(StrideType::InnerStrideAtCompileTime) + : derived().nestedExpression().innerStride() * + sizeof(typename traits::Scalar) / sizeof(Scalar); + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const { + return StrideType::OuterStrideAtCompileTime != 0 ? int(StrideType::OuterStrideAtCompileTime) + : derived().nestedExpression().outerStride() * + sizeof(typename traits::Scalar) / sizeof(Scalar); + } + + protected: + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(CwiseUnaryViewImpl) + + // Allow const access to coeffRef for the case of direct access being enabled. + EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { + return internal::evaluator(derived()).coeffRef(index); + } + + EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index row, Index col) const { + return internal::evaluator(derived()).coeffRef(row, col); + } +}; + +template +class CwiseUnaryViewImpl + : public CwiseUnaryViewImpl { + public: + typedef CwiseUnaryViewImpl Base; + typedef CwiseUnaryView Derived; + EIGEN_DENSE_PUBLIC_INTERFACE(Derived) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryViewImpl) + + using Base::data; + EIGEN_DEVICE_FUNC inline Scalar* data() { return &(this->coeffRef(0)); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { + return internal::evaluator(derived()).coeffRef(row, col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { + return internal::evaluator(derived()).coeffRef(index); + } + + protected: + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(CwiseUnaryViewImpl) +}; + +} // namespace internal + +/** \class CwiseUnaryView + * \ingroup Core_Module + * + * \brief Generic lvalue expression of a coefficient-wise unary operator of a matrix or a vector + * + * \tparam ViewOp template functor implementing the view + * \tparam MatrixType the type of the matrix we are applying the unary operator + * + * This class represents a lvalue expression of a generic unary view operator of a matrix or a vector. + * It is the return type of real() and imag(), and most of the time this is the only way it is used. + * + * \sa MatrixBase::unaryViewExpr(const CustomUnaryOp &) const, class CwiseUnaryOp + */ +template +class CwiseUnaryView : public internal::CwiseUnaryViewImpl::StorageKind> { + public: + typedef typename internal::CwiseUnaryViewImpl::StorageKind>::Base Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryView) + typedef typename internal::ref_selector::non_const_type MatrixTypeNested; + typedef internal::remove_all_t NestedExpression; + + explicit EIGEN_DEVICE_FUNC inline CwiseUnaryView(MatrixType& mat, const ViewOp& func = ViewOp()) + : m_matrix(mat), m_functor(func) {} + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView) + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); } + + /** \returns the functor representing unary operation */ + EIGEN_DEVICE_FUNC const ViewOp& functor() const { return m_functor; } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC const internal::remove_all_t& nestedExpression() const { return m_matrix; } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC std::remove_reference_t& nestedExpression() { return m_matrix; } + + protected: + MatrixTypeNested m_matrix; + ViewOp m_functor; +}; + +} // namespace Eigen + +#endif // EIGEN_CWISE_UNARY_VIEW_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DenseBase.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DenseBase.h new file mode 100644 index 0000000000000000000000000000000000000000..d5906bdb73fd2acefb861556700f50bf6d4d9d0a --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DenseBase.h @@ -0,0 +1,661 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007-2010 Benoit Jacob +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DENSEBASE_H +#define EIGEN_DENSEBASE_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +// The index type defined by EIGEN_DEFAULT_DENSE_INDEX_TYPE must be a signed type. +EIGEN_STATIC_ASSERT(NumTraits::IsSigned, THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE) + +/** \class DenseBase + * \ingroup Core_Module + * + * \brief Base class for all dense matrices, vectors, and arrays + * + * This class is the base that is inherited by all dense objects (matrix, vector, arrays, + * and related expression types). The common Eigen API for dense objects is contained in this class. + * + * \tparam Derived is the derived type, e.g., a matrix type or an expression. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN. + * + * \sa \blank \ref TopicClassHierarchy + */ +template +class DenseBase +#ifndef EIGEN_PARSED_BY_DOXYGEN + : public DenseCoeffsBase::value> +#else + : public DenseCoeffsBase +#endif // not EIGEN_PARSED_BY_DOXYGEN +{ + public: + /** Inner iterator type to iterate over the coefficients of a row or column. + * \sa class InnerIterator + */ + typedef Eigen::InnerIterator InnerIterator; + + typedef typename internal::traits::StorageKind StorageKind; + + /** + * \brief The type used to store indices + * \details This typedef is relevant for types that store multiple indices such as + * PermutationMatrix or Transpositions, otherwise it defaults to Eigen::Index + * \sa \blank \ref TopicPreprocessorDirectives, Eigen::Index, SparseMatrixBase. + */ + typedef typename internal::traits::StorageIndex StorageIndex; + + /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex, etc. */ + typedef typename internal::traits::Scalar Scalar; + + /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex, etc. + * + * It is an alias for the Scalar type */ + typedef Scalar value_type; + + typedef typename NumTraits::Real RealScalar; + typedef DenseCoeffsBase::value> Base; + + using Base::coeff; + using Base::coeffByOuterInner; + using Base::colIndexByOuterInner; + using Base::cols; + using Base::const_cast_derived; + using Base::derived; + using Base::rowIndexByOuterInner; + using Base::rows; + using Base::size; + using Base::operator(); + using Base::operator[]; + using Base::colStride; + using Base::innerStride; + using Base::outerStride; + using Base::rowStride; + using Base::stride; + using Base::w; + using Base::x; + using Base::y; + using Base::z; + typedef typename Base::CoeffReturnType CoeffReturnType; + + enum { + + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + /**< The number of rows at compile-time. This is just a copy of the value provided + * by the \a Derived type. If a value is not known at compile-time, + * it is set to the \a Dynamic constant. + * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ + + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + /**< The number of columns at compile-time. This is just a copy of the value provided + * by the \a Derived type. If a value is not known at compile-time, + * it is set to the \a Dynamic constant. + * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ + + SizeAtCompileTime = (internal::size_of_xpr_at_compile_time::ret), + /**< This is equal to the number of coefficients, i.e. the number of + * rows times the number of columns, or to \a Dynamic if this is not + * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ + + MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, + /**< This value is equal to the maximum possible number of rows that this expression + * might have. If this expression might have an arbitrarily high number of rows, + * this value is set to \a Dynamic. + * + * This value is useful to know when evaluating an expression, in order to determine + * whether it is possible to avoid doing a dynamic memory allocation. + * + * \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime + */ + + MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime, + /**< This value is equal to the maximum possible number of columns that this expression + * might have. If this expression might have an arbitrarily high number of columns, + * this value is set to \a Dynamic. + * + * This value is useful to know when evaluating an expression, in order to determine + * whether it is possible to avoid doing a dynamic memory allocation. + * + * \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime + */ + + MaxSizeAtCompileTime = internal::size_at_compile_time(internal::traits::MaxRowsAtCompileTime, + internal::traits::MaxColsAtCompileTime), + /**< This value is equal to the maximum possible number of coefficients that this expression + * might have. If this expression might have an arbitrarily high number of coefficients, + * this value is set to \a Dynamic. + * + * This value is useful to know when evaluating an expression, in order to determine + * whether it is possible to avoid doing a dynamic memory allocation. + * + * \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime + */ + + IsVectorAtCompileTime = + internal::traits::RowsAtCompileTime == 1 || internal::traits::ColsAtCompileTime == 1, + /**< This is set to true if either the number of rows or the number of + * columns is known at compile-time to be equal to 1. Indeed, in that case, + * we are dealing with a column-vector (if there is only one column) or with + * a row-vector (if there is only one row). */ + + NumDimensions = int(MaxSizeAtCompileTime) == 1 ? 0 + : bool(IsVectorAtCompileTime) ? 1 + : 2, + /**< This value is equal to Tensor::NumDimensions, i.e. 0 for scalars, 1 for vectors, + * and 2 for matrices. + */ + + Flags = internal::traits::Flags, + /**< This stores expression \ref flags flags which may or may not be inherited by new expressions + * constructed from this one. See the \ref flags "list of flags". + */ + + IsRowMajor = int(Flags) & RowMajorBit, /**< True if this expression has row-major storage order. */ + + InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime) + : int(IsRowMajor) ? int(ColsAtCompileTime) + : int(RowsAtCompileTime), + + InnerStrideAtCompileTime = internal::inner_stride_at_compile_time::ret, + OuterStrideAtCompileTime = internal::outer_stride_at_compile_time::ret + }; + + typedef typename internal::find_best_packet::type PacketScalar; + + enum { IsPlainObjectBase = 0 }; + + /** The plain matrix type corresponding to this expression. + * \sa PlainObject */ + typedef Matrix::Scalar, internal::traits::RowsAtCompileTime, + internal::traits::ColsAtCompileTime, + AutoAlign | (internal::traits::Flags & RowMajorBit ? RowMajor : ColMajor), + internal::traits::MaxRowsAtCompileTime, internal::traits::MaxColsAtCompileTime> + PlainMatrix; + + /** The plain array type corresponding to this expression. + * \sa PlainObject */ + typedef Array::Scalar, internal::traits::RowsAtCompileTime, + internal::traits::ColsAtCompileTime, + AutoAlign | (internal::traits::Flags & RowMajorBit ? RowMajor : ColMajor), + internal::traits::MaxRowsAtCompileTime, internal::traits::MaxColsAtCompileTime> + PlainArray; + + /** \brief The plain matrix or array type corresponding to this expression. + * + * This is not necessarily exactly the return type of eval(). In the case of plain matrices, + * the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed + * that the return type of eval() is either PlainObject or const PlainObject&. + */ + typedef std::conditional_t::XprKind, MatrixXpr>::value, + PlainMatrix, PlainArray> + PlainObject; + + /** \returns the outer size. + * + * \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension + * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a + * column-major matrix, and the number of rows for a row-major matrix. */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index outerSize() const { + return IsVectorAtCompileTime ? 1 : int(IsRowMajor) ? this->rows() : this->cols(); + } + + /** \returns the inner size. + * + * \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension + * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a + * column-major matrix, and the number of columns for a row-major matrix. */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index innerSize() const { + return IsVectorAtCompileTime ? this->size() : int(IsRowMajor) ? this->cols() : this->rows(); + } + + /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are + * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and + * does nothing else. + */ + EIGEN_DEVICE_FUNC void resize(Index newSize) { + EIGEN_ONLY_USED_FOR_DEBUG(newSize); + eigen_assert(newSize == this->size() && "DenseBase::resize() does not actually allow to resize."); + } + /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are + * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and + * does nothing else. + */ + EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) { + EIGEN_ONLY_USED_FOR_DEBUG(rows); + EIGEN_ONLY_USED_FOR_DEBUG(cols); + eigen_assert(rows == this->rows() && cols == this->cols() && + "DenseBase::resize() does not actually allow to resize."); + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal Represents a matrix with all coefficients equal to one another*/ + typedef CwiseNullaryOp, PlainObject> ConstantReturnType; + /** \internal Represents a matrix with all coefficients equal to zero*/ + typedef CwiseNullaryOp, PlainObject> ZeroReturnType; + /** \internal \deprecated Represents a vector with linearly spaced coefficients that allows sequential access only. */ + EIGEN_DEPRECATED typedef CwiseNullaryOp, PlainObject> SequentialLinSpacedReturnType; + /** \internal Represents a vector with linearly spaced coefficients that allows random access. */ + typedef CwiseNullaryOp, PlainObject> RandomAccessLinSpacedReturnType; + /** \internal Represents a vector with equally spaced coefficients that allows random access. */ + typedef CwiseNullaryOp, PlainObject> RandomAccessEqualSpacedReturnType; + /** \internal the return type of MatrixBase::eigenvalues() */ + typedef Matrix::Scalar>::Real, + internal::traits::ColsAtCompileTime, 1> + EigenvaluesReturnType; + +#endif // not EIGEN_PARSED_BY_DOXYGEN + + /** Copies \a other into *this. \returns a reference to *this. */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase& other); + + /** Special case of the template operator=, in order to prevent the compiler + * from generating a default operator= (issue hit with g++ 4.1) + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase& other); + + template + EIGEN_DEVICE_FUNC Derived& operator=(const EigenBase& other); + + template + EIGEN_DEVICE_FUNC Derived& operator+=(const EigenBase& other); + + template + EIGEN_DEVICE_FUNC Derived& operator-=(const EigenBase& other); + + template + EIGEN_DEVICE_FUNC Derived& operator=(const ReturnByValue& func); + + /** \internal + * Copies \a other into *this without evaluating other. \returns a reference to *this. */ + template + /** \deprecated */ + EIGEN_DEPRECATED EIGEN_DEVICE_FUNC Derived& lazyAssign(const DenseBase& other); + + EIGEN_DEVICE_FUNC CommaInitializer operator<<(const Scalar& s); + + template + /** \deprecated it now returns \c *this */ + EIGEN_DEPRECATED const Derived& flagged() const { + return derived(); + } + + template + EIGEN_DEVICE_FUNC CommaInitializer operator<<(const DenseBase& other); + + typedef Transpose TransposeReturnType; + EIGEN_DEVICE_FUNC TransposeReturnType transpose(); + typedef Transpose ConstTransposeReturnType; + EIGEN_DEVICE_FUNC const ConstTransposeReturnType transpose() const; + EIGEN_DEVICE_FUNC void transposeInPlace(); + + EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(Index rows, Index cols, const Scalar& value); + EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(Index size, const Scalar& value); + EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(const Scalar& value); + + EIGEN_DEPRECATED EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(Sequential_t, Index size, + const Scalar& low, + const Scalar& high); + EIGEN_DEPRECATED EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(Sequential_t, + const Scalar& low, + const Scalar& high); + + EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(Index size, const Scalar& low, + const Scalar& high); + EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(const Scalar& low, const Scalar& high); + + EIGEN_DEVICE_FUNC static const RandomAccessEqualSpacedReturnType EqualSpaced(Index size, const Scalar& low, + const Scalar& step); + EIGEN_DEVICE_FUNC static const RandomAccessEqualSpacedReturnType EqualSpaced(const Scalar& low, const Scalar& step); + + template + EIGEN_DEVICE_FUNC static const CwiseNullaryOp NullaryExpr(Index rows, Index cols, + const CustomNullaryOp& func); + template + EIGEN_DEVICE_FUNC static const CwiseNullaryOp NullaryExpr(Index size, + const CustomNullaryOp& func); + template + EIGEN_DEVICE_FUNC static const CwiseNullaryOp NullaryExpr(const CustomNullaryOp& func); + + EIGEN_DEVICE_FUNC static const ZeroReturnType Zero(Index rows, Index cols); + EIGEN_DEVICE_FUNC static const ZeroReturnType Zero(Index size); + EIGEN_DEVICE_FUNC static const ZeroReturnType Zero(); + EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index rows, Index cols); + EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index size); + EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(); + + EIGEN_DEVICE_FUNC void fill(const Scalar& value); + EIGEN_DEVICE_FUNC Derived& setConstant(const Scalar& value); + EIGEN_DEVICE_FUNC Derived& setLinSpaced(Index size, const Scalar& low, const Scalar& high); + EIGEN_DEVICE_FUNC Derived& setLinSpaced(const Scalar& low, const Scalar& high); + EIGEN_DEVICE_FUNC Derived& setEqualSpaced(Index size, const Scalar& low, const Scalar& step); + EIGEN_DEVICE_FUNC Derived& setEqualSpaced(const Scalar& low, const Scalar& step); + EIGEN_DEVICE_FUNC Derived& setZero(); + EIGEN_DEVICE_FUNC Derived& setOnes(); + EIGEN_DEVICE_FUNC Derived& setRandom(); + + template + EIGEN_DEVICE_FUNC bool isApprox(const DenseBase& other, + const RealScalar& prec = NumTraits::dummy_precision()) const; + EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const RealScalar& other, + const RealScalar& prec = NumTraits::dummy_precision()) const; + template + EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const DenseBase& other, + const RealScalar& prec = NumTraits::dummy_precision()) const; + + EIGEN_DEVICE_FUNC bool isApproxToConstant(const Scalar& value, + const RealScalar& prec = NumTraits::dummy_precision()) const; + EIGEN_DEVICE_FUNC bool isConstant(const Scalar& value, + const RealScalar& prec = NumTraits::dummy_precision()) const; + EIGEN_DEVICE_FUNC bool isZero(const RealScalar& prec = NumTraits::dummy_precision()) const; + EIGEN_DEVICE_FUNC bool isOnes(const RealScalar& prec = NumTraits::dummy_precision()) const; + + EIGEN_DEVICE_FUNC inline bool hasNaN() const; + EIGEN_DEVICE_FUNC inline bool allFinite() const; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*=(const Scalar& other); + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator/=(const Scalar& other); + + typedef internal::add_const_on_value_type_t::type> EvalReturnType; + /** \returns the matrix or vector obtained by evaluating this expression. + * + * Notice that in the case of a plain matrix or vector (not an expression) this function just returns + * a const reference, in order to avoid a useless copy. + * + * \warning Be careful with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page + * \endlink. + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvalReturnType eval() const { + // Even though MSVC does not honor strong inlining when the return type + // is a dynamic matrix, we desperately need strong inlining for fixed + // size types on MSVC. + return typename internal::eval::type(derived()); + } + + /** swaps *this with the expression \a other. + * + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void swap(const DenseBase& other) { + EIGEN_STATIC_ASSERT(!OtherDerived::IsPlainObjectBase, THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY); + eigen_assert(rows() == other.rows() && cols() == other.cols()); + call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op()); + } + + /** swaps *this with the matrix or array \a other. + * + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void swap(PlainObjectBase& other) { + eigen_assert(rows() == other.rows() && cols() == other.cols()); + call_assignment(derived(), other.derived(), internal::swap_assign_op()); + } + + EIGEN_DEVICE_FUNC inline const NestByValue nestByValue() const; + EIGEN_DEVICE_FUNC inline const ForceAlignedAccess forceAlignedAccess() const; + EIGEN_DEVICE_FUNC inline ForceAlignedAccess forceAlignedAccess(); + template + EIGEN_DEVICE_FUNC inline const std::conditional_t, Derived&> + forceAlignedAccessIf() const; + template + EIGEN_DEVICE_FUNC inline std::conditional_t, Derived&> forceAlignedAccessIf(); + + EIGEN_DEVICE_FUNC Scalar sum() const; + EIGEN_DEVICE_FUNC Scalar mean() const; + EIGEN_DEVICE_FUNC Scalar trace() const; + + EIGEN_DEVICE_FUNC Scalar prod() const; + + template + EIGEN_DEVICE_FUNC typename internal::traits::Scalar minCoeff() const; + template + EIGEN_DEVICE_FUNC typename internal::traits::Scalar maxCoeff() const; + + // By default, the fastest version with undefined NaN propagation semantics is + // used. + // TODO(rmlarsen): Replace with default template argument when we move to + // c++11 or beyond. + EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar minCoeff() const { + return minCoeff(); + } + EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar maxCoeff() const { + return maxCoeff(); + } + + template + EIGEN_DEVICE_FUNC typename internal::traits::Scalar minCoeff(IndexType* row, IndexType* col) const; + template + EIGEN_DEVICE_FUNC typename internal::traits::Scalar maxCoeff(IndexType* row, IndexType* col) const; + template + EIGEN_DEVICE_FUNC typename internal::traits::Scalar minCoeff(IndexType* index) const; + template + EIGEN_DEVICE_FUNC typename internal::traits::Scalar maxCoeff(IndexType* index) const; + + // TODO(rmlarsen): Replace these methods with a default template argument. + template + EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar minCoeff(IndexType* row, IndexType* col) const { + return minCoeff(row, col); + } + template + EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar maxCoeff(IndexType* row, IndexType* col) const { + return maxCoeff(row, col); + } + template + EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar minCoeff(IndexType* index) const { + return minCoeff(index); + } + template + EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar maxCoeff(IndexType* index) const { + return maxCoeff(index); + } + + template + EIGEN_DEVICE_FUNC Scalar redux(const BinaryOp& func) const; + + template + EIGEN_DEVICE_FUNC void visit(Visitor& func) const; + + /** \returns a WithFormat proxy object allowing to print a matrix the with given + * format \a fmt. + * + * See class IOFormat for some examples. + * + * \sa class IOFormat, class WithFormat + */ + inline const WithFormat format(const IOFormat& fmt) const { return WithFormat(derived(), fmt); } + + /** \returns the unique coefficient of a 1x1 expression */ + EIGEN_DEVICE_FUNC CoeffReturnType value() const { + EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1); + return derived().coeff(0, 0); + } + + EIGEN_DEVICE_FUNC bool all() const; + EIGEN_DEVICE_FUNC bool any() const; + EIGEN_DEVICE_FUNC Index count() const; + + typedef VectorwiseOp RowwiseReturnType; + typedef const VectorwiseOp ConstRowwiseReturnType; + typedef VectorwiseOp ColwiseReturnType; + typedef const VectorwiseOp ConstColwiseReturnType; + + /** \returns a VectorwiseOp wrapper of *this for broadcasting and partial reductions + * + * Example: \include MatrixBase_rowwise.cpp + * Output: \verbinclude MatrixBase_rowwise.out + * + * \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting + */ + // Code moved here due to a CUDA compiler bug + EIGEN_DEVICE_FUNC inline ConstRowwiseReturnType rowwise() const { return ConstRowwiseReturnType(derived()); } + EIGEN_DEVICE_FUNC RowwiseReturnType rowwise(); + + /** \returns a VectorwiseOp wrapper of *this broadcasting and partial reductions + * + * Example: \include MatrixBase_colwise.cpp + * Output: \verbinclude MatrixBase_colwise.out + * + * \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting + */ + EIGEN_DEVICE_FUNC inline ConstColwiseReturnType colwise() const { return ConstColwiseReturnType(derived()); } + EIGEN_DEVICE_FUNC ColwiseReturnType colwise(); + + typedef CwiseNullaryOp, PlainObject> RandomReturnType; + static const RandomReturnType Random(Index rows, Index cols); + static const RandomReturnType Random(Index size); + static const RandomReturnType Random(); + + template + inline EIGEN_DEVICE_FUNC + CwiseTernaryOp::Scalar, + typename DenseBase::Scalar, Scalar>, + ThenDerived, ElseDerived, Derived> + select(const DenseBase& thenMatrix, const DenseBase& elseMatrix) const; + + template + inline EIGEN_DEVICE_FUNC + CwiseTernaryOp::Scalar, + typename DenseBase::Scalar, Scalar>, + ThenDerived, typename DenseBase::ConstantReturnType, Derived> + select(const DenseBase& thenMatrix, const typename DenseBase::Scalar& elseScalar) const; + + template + inline EIGEN_DEVICE_FUNC + CwiseTernaryOp::Scalar, + typename DenseBase::Scalar, Scalar>, + typename DenseBase::ConstantReturnType, ElseDerived, Derived> + select(const typename DenseBase::Scalar& thenScalar, const DenseBase& elseMatrix) const; + + template + RealScalar lpNorm() const; + + template + EIGEN_DEVICE_FUNC const Replicate replicate() const; + /** + * \return an expression of the replication of \c *this + * + * Example: \include MatrixBase_replicate_int_int.cpp + * Output: \verbinclude MatrixBase_replicate_int_int.out + * + * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate + */ + // Code moved here due to a CUDA compiler bug + EIGEN_DEVICE_FUNC const Replicate replicate(Index rowFactor, Index colFactor) const { + return Replicate(derived(), rowFactor, colFactor); + } + + typedef Reverse ReverseReturnType; + typedef const Reverse ConstReverseReturnType; + EIGEN_DEVICE_FUNC ReverseReturnType reverse(); + /** This is the const version of reverse(). */ + // Code moved here due to a CUDA compiler bug + EIGEN_DEVICE_FUNC ConstReverseReturnType reverse() const { return ConstReverseReturnType(derived()); } + EIGEN_DEVICE_FUNC void reverseInPlace(); + +#ifdef EIGEN_PARSED_BY_DOXYGEN + /** STL-like RandomAccessIterator + * iterator type as returned by the begin() and end() methods. + */ + typedef random_access_iterator_type iterator; + /** This is the const version of iterator (aka read-only) */ + typedef random_access_iterator_type const_iterator; +#else + typedef std::conditional_t<(Flags & DirectAccessBit) == DirectAccessBit, + internal::pointer_based_stl_iterator, + internal::generic_randaccess_stl_iterator > + iterator_type; + + typedef std::conditional_t<(Flags & DirectAccessBit) == DirectAccessBit, + internal::pointer_based_stl_iterator, + internal::generic_randaccess_stl_iterator > + const_iterator_type; + + // Stl-style iterators are supported only for vectors. + + typedef std::conditional_t iterator; + + typedef std::conditional_t const_iterator; +#endif + + inline iterator begin(); + inline const_iterator begin() const; + inline const_iterator cbegin() const; + inline iterator end(); + inline const_iterator end() const; + inline const_iterator cend() const; + +#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase +#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND) +#define EIGEN_DOC_UNARY_ADDONS(X, Y) +#include "../plugins/CommonCwiseUnaryOps.inc" +#include "../plugins/BlockMethods.inc" +#include "../plugins/IndexedViewMethods.inc" +#include "../plugins/ReshapedMethods.inc" +#ifdef EIGEN_DENSEBASE_PLUGIN +#include EIGEN_DENSEBASE_PLUGIN +#endif +#undef EIGEN_CURRENT_STORAGE_BASE_CLASS +#undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL +#undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF +#undef EIGEN_DOC_UNARY_ADDONS + + // disable the use of evalTo for dense objects with a nice compilation error + template + EIGEN_DEVICE_FUNC inline void evalTo(Dest&) const { + EIGEN_STATIC_ASSERT((internal::is_same::value), + THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS); + } + + protected: + EIGEN_DEFAULT_COPY_CONSTRUCTOR(DenseBase) + /** Default constructor. Do nothing. */ +#ifdef EIGEN_INTERNAL_DEBUGGING + EIGEN_DEVICE_FUNC constexpr DenseBase() { + /* Just checks for self-consistency of the flags. + * Only do it when debugging Eigen, as this borders on paranoia and could slow compilation down + */ + EIGEN_STATIC_ASSERT( + (internal::check_implication(MaxRowsAtCompileTime == 1 && MaxColsAtCompileTime != 1, int(IsRowMajor)) && + internal::check_implication(MaxColsAtCompileTime == 1 && MaxRowsAtCompileTime != 1, int(!IsRowMajor))), + INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION) + } +#else + EIGEN_DEVICE_FUNC constexpr DenseBase() = default; +#endif + + private: + EIGEN_DEVICE_FUNC explicit DenseBase(int); + EIGEN_DEVICE_FUNC DenseBase(int, int); + template + EIGEN_DEVICE_FUNC explicit DenseBase(const DenseBase&); +}; + +/** Free-function swap. + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + // Use forwarding references to capture all combinations of cv-qualified l+r-value cases. + std::enable_if_t>, std::decay_t>::value && + std::is_base_of>, std::decay_t>::value, + void> + swap(DerivedA&& a, DerivedB&& b) { + a.swap(b); +} + +} // end namespace Eigen + +#endif // EIGEN_DENSEBASE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DenseCoeffsBase.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DenseCoeffsBase.h new file mode 100644 index 0000000000000000000000000000000000000000..97f9b50f3456350e06736d6d101771086af12be4 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DenseCoeffsBase.h @@ -0,0 +1,569 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DENSECOEFFSBASE_H +#define EIGEN_DENSECOEFFSBASE_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { +template +struct add_const_on_value_type_if_arithmetic { + typedef std::conditional_t::value, T, add_const_on_value_type_t> type; +}; +} // namespace internal + +/** \brief Base class providing read-only coefficient access to matrices and arrays. + * \ingroup Core_Module + * \tparam Derived Type of the derived class + * + * \note #ReadOnlyAccessors Constant indicating read-only access + * + * This class defines the \c operator() \c const function and friends, which can be used to read specific + * entries of a matrix or array. + * + * \sa DenseCoeffsBase, DenseCoeffsBase, + * \ref TopicClassHierarchy + */ +template +class DenseCoeffsBase : public EigenBase { + public: + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + + // Explanation for this CoeffReturnType typedef. + // - This is the return type of the coeff() method. + // - The LvalueBit means exactly that we can offer a coeffRef() method, which means exactly that we can get references + // to coeffs, which means exactly that we can have coeff() return a const reference (as opposed to returning a value). + // - The is_arithmetic check is required since "const int", "const double", etc. will cause warnings on some systems + // while the declaration of "const T", where T is a non arithmetic type does not. Always returning "const Scalar&" is + // not possible, since the underlying expressions might not offer a valid address the reference could be referring to. + typedef std::conditional_t::Flags& LvalueBit), const Scalar&, + std::conditional_t::value, Scalar, const Scalar>> + CoeffReturnType; + + typedef typename internal::add_const_on_value_type_if_arithmetic::type>::type + PacketReturnType; + + typedef EigenBase Base; + using Base::cols; + using Base::derived; + using Base::rows; + using Base::size; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const { + return int(Derived::RowsAtCompileTime) == 1 ? 0 + : int(Derived::ColsAtCompileTime) == 1 ? inner + : int(Derived::Flags) & RowMajorBit ? outer + : inner; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const { + return int(Derived::ColsAtCompileTime) == 1 ? 0 + : int(Derived::RowsAtCompileTime) == 1 ? inner + : int(Derived::Flags) & RowMajorBit ? inner + : outer; + } + + /** Short version: don't use this function, use + * \link operator()(Index,Index) const \endlink instead. + * + * Long version: this function is similar to + * \link operator()(Index,Index) const \endlink, but without the assertion. + * Use this for limiting the performance cost of debugging code when doing + * repeated coefficient access. Only use this when it is guaranteed that the + * parameters \a row and \a col are in range. + * + * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this + * function equivalent to \link operator()(Index,Index) const \endlink. + * + * \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType coeff(Index row, Index col) const { + eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); + return internal::evaluator(derived()).coeff(row, col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType coeffByOuterInner(Index outer, + Index inner) const { + return coeff(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner)); + } + + /** \returns the coefficient at given the given row and column. + * + * \sa operator()(Index,Index), operator[](Index) + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType operator()(Index row, Index col) const { + eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); + return coeff(row, col); + } + + /** Short version: don't use this function, use + * \link operator[](Index) const \endlink instead. + * + * Long version: this function is similar to + * \link operator[](Index) const \endlink, but without the assertion. + * Use this for limiting the performance cost of debugging code when doing + * repeated coefficient access. Only use this when it is guaranteed that the + * parameter \a index is in range. + * + * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this + * function equivalent to \link operator[](Index) const \endlink. + * + * \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const + */ + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType coeff(Index index) const { + EIGEN_STATIC_ASSERT(internal::evaluator::Flags & LinearAccessBit, + THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS) + eigen_internal_assert(index >= 0 && index < size()); + return internal::evaluator(derived()).coeff(index); + } + + /** \returns the coefficient at given index. + * + * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. + * + * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const, + * z() const, w() const + */ + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType operator[](Index index) const { + EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, + THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) + eigen_assert(index >= 0 && index < size()); + return coeff(index); + } + + /** \returns the coefficient at given index. + * + * This is synonymous to operator[](Index) const. + * + * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. + * + * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const, + * z() const, w() const + */ + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType operator()(Index index) const { + eigen_assert(index >= 0 && index < size()); + return coeff(index); + } + + /** equivalent to operator[](0). */ + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType x() const { return (*this)[0]; } + + /** equivalent to operator[](1). */ + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType y() const { + EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 2, OUT_OF_RANGE_ACCESS); + return (*this)[1]; + } + + /** equivalent to operator[](2). */ + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType z() const { + EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 3, OUT_OF_RANGE_ACCESS); + return (*this)[2]; + } + + /** equivalent to operator[](3). */ + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType w() const { + EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 4, OUT_OF_RANGE_ACCESS); + return (*this)[3]; + } + + /** \internal + * \returns the packet of coefficients starting at the given row and column. It is your responsibility + * to ensure that a packet really starts there. This method is only available on expressions having the + * PacketAccessBit. + * + * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select + * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets + * starting at an address which is a multiple of the packet size. + */ + + template + EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const { + typedef typename internal::packet_traits::type DefaultPacketType; + eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); + return internal::evaluator(derived()).template packet(row, col); + } + + /** \internal */ + template + EIGEN_STRONG_INLINE PacketReturnType packetByOuterInner(Index outer, Index inner) const { + return packet(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner)); + } + + /** \internal + * \returns the packet of coefficients starting at the given index. It is your responsibility + * to ensure that a packet really starts there. This method is only available on expressions having the + * PacketAccessBit and the LinearAccessBit. + * + * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select + * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets + * starting at an address which is a multiple of the packet size. + */ + + template + EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { + EIGEN_STATIC_ASSERT(internal::evaluator::Flags & LinearAccessBit, + THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS) + typedef typename internal::packet_traits::type DefaultPacketType; + eigen_internal_assert(index >= 0 && index < size()); + return internal::evaluator(derived()).template packet(index); + } + + protected: + // explanation: DenseBase is doing "using ..." on the methods from DenseCoeffsBase. + // But some methods are only available in the DirectAccess case. + // So we add dummy methods here with these names, so that "using... " doesn't fail. + // It's not private so that the child class DenseBase can access them, and it's not public + // either since it's an implementation detail, so has to be protected. + void coeffRef(); + void coeffRefByOuterInner(); + void writePacket(); + void writePacketByOuterInner(); + void copyCoeff(); + void copyCoeffByOuterInner(); + void copyPacket(); + void copyPacketByOuterInner(); + void stride(); + void innerStride(); + void outerStride(); + void rowStride(); + void colStride(); +}; + +/** \brief Base class providing read/write coefficient access to matrices and arrays. + * \ingroup Core_Module + * \tparam Derived Type of the derived class + * + * \note #WriteAccessors Constant indicating read/write access + * + * This class defines the non-const \c operator() function and friends, which can be used to write specific + * entries of a matrix or array. This class inherits DenseCoeffsBase which + * defines the const variant for reading specific entries. + * + * \sa DenseCoeffsBase, \ref TopicClassHierarchy + */ +template +class DenseCoeffsBase : public DenseCoeffsBase { + public: + typedef DenseCoeffsBase Base; + + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + + using Base::coeff; + using Base::colIndexByOuterInner; + using Base::cols; + using Base::derived; + using Base::rowIndexByOuterInner; + using Base::rows; + using Base::size; + using Base::operator[]; + using Base::operator(); + using Base::w; + using Base::x; + using Base::y; + using Base::z; + + /** Short version: don't use this function, use + * \link operator()(Index,Index) \endlink instead. + * + * Long version: this function is similar to + * \link operator()(Index,Index) \endlink, but without the assertion. + * Use this for limiting the performance cost of debugging code when doing + * repeated coefficient access. Only use this when it is guaranteed that the + * parameters \a row and \a col are in range. + * + * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this + * function equivalent to \link operator()(Index,Index) \endlink. + * + * \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index) + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& coeffRef(Index row, Index col) { + eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); + return internal::evaluator(derived()).coeffRef(row, col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRefByOuterInner(Index outer, Index inner) { + return coeffRef(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner)); + } + + /** \returns a reference to the coefficient at given the given row and column. + * + * \sa operator[](Index) + */ + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& operator()(Index row, Index col) { + eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); + return coeffRef(row, col); + } + + /** Short version: don't use this function, use + * \link operator[](Index) \endlink instead. + * + * Long version: this function is similar to + * \link operator[](Index) \endlink, but without the assertion. + * Use this for limiting the performance cost of debugging code when doing + * repeated coefficient access. Only use this when it is guaranteed that the + * parameters \a row and \a col are in range. + * + * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this + * function equivalent to \link operator[](Index) \endlink. + * + * \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index) + */ + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& coeffRef(Index index) { + EIGEN_STATIC_ASSERT(internal::evaluator::Flags & LinearAccessBit, + THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS) + eigen_internal_assert(index >= 0 && index < size()); + return internal::evaluator(derived()).coeffRef(index); + } + + /** \returns a reference to the coefficient at given index. + * + * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. + * + * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w() + */ + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& operator[](Index index) { + EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, + THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) + eigen_assert(index >= 0 && index < size()); + return coeffRef(index); + } + + /** \returns a reference to the coefficient at given index. + * + * This is synonymous to operator[](Index). + * + * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. + * + * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w() + */ + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Scalar& operator()(Index index) { + eigen_assert(index >= 0 && index < size()); + return coeffRef(index); + } + + /** equivalent to operator[](0). */ + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Scalar& x() { return (*this)[0]; } + + /** equivalent to operator[](1). */ + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Scalar& y() { + EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 2, OUT_OF_RANGE_ACCESS); + return (*this)[1]; + } + + /** equivalent to operator[](2). */ + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Scalar& z() { + EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 3, OUT_OF_RANGE_ACCESS); + return (*this)[2]; + } + + /** equivalent to operator[](3). */ + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Scalar& w() { + EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 4, OUT_OF_RANGE_ACCESS); + return (*this)[3]; + } +}; + +/** \brief Base class providing direct read-only coefficient access to matrices and arrays. + * \ingroup Core_Module + * \tparam Derived Type of the derived class + * + * \note #DirectAccessors Constant indicating direct access + * + * This class defines functions to work with strides which can be used to access entries directly. This class + * inherits DenseCoeffsBase which defines functions to access entries read-only using + * \c operator() . + * + * \sa \blank \ref TopicClassHierarchy + */ +template +class DenseCoeffsBase : public DenseCoeffsBase { + public: + typedef DenseCoeffsBase Base; + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + using Base::cols; + using Base::derived; + using Base::rows; + using Base::size; + + /** \returns the pointer increment between two consecutive elements within a slice in the inner direction. + * + * \sa outerStride(), rowStride(), colStride() + */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const { return derived().innerStride(); } + + /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns + * in a column-major matrix). + * + * \sa innerStride(), rowStride(), colStride() + */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const { return derived().outerStride(); } + + // FIXME shall we remove it ? + EIGEN_CONSTEXPR inline Index stride() const { return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); } + + /** \returns the pointer increment between two consecutive rows. + * + * \sa innerStride(), outerStride(), colStride() + */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rowStride() const { + return Derived::IsRowMajor ? outerStride() : innerStride(); + } + + /** \returns the pointer increment between two consecutive columns. + * + * \sa innerStride(), outerStride(), rowStride() + */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index colStride() const { + return Derived::IsRowMajor ? innerStride() : outerStride(); + } +}; + +/** \brief Base class providing direct read/write coefficient access to matrices and arrays. + * \ingroup Core_Module + * \tparam Derived Type of the derived class + * + * \note #DirectWriteAccessors Constant indicating direct access + * + * This class defines functions to work with strides which can be used to access entries directly. This class + * inherits DenseCoeffsBase which defines functions to access entries read/write using + * \c operator(). + * + * \sa \blank \ref TopicClassHierarchy + */ +template +class DenseCoeffsBase : public DenseCoeffsBase { + public: + typedef DenseCoeffsBase Base; + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + using Base::cols; + using Base::derived; + using Base::rows; + using Base::size; + + /** \returns the pointer increment between two consecutive elements within a slice in the inner direction. + * + * \sa outerStride(), rowStride(), colStride() + */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { return derived().innerStride(); } + + /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns + * in a column-major matrix). + * + * \sa innerStride(), rowStride(), colStride() + */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { return derived().outerStride(); } + + // FIXME shall we remove it ? + EIGEN_CONSTEXPR inline Index stride() const EIGEN_NOEXCEPT { + return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); + } + + /** \returns the pointer increment between two consecutive rows. + * + * \sa innerStride(), outerStride(), colStride() + */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rowStride() const EIGEN_NOEXCEPT { + return Derived::IsRowMajor ? outerStride() : innerStride(); + } + + /** \returns the pointer increment between two consecutive columns. + * + * \sa innerStride(), outerStride(), rowStride() + */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index colStride() const EIGEN_NOEXCEPT { + return Derived::IsRowMajor ? innerStride() : outerStride(); + } +}; + +namespace internal { + +template +struct first_aligned_impl { + static EIGEN_CONSTEXPR inline Index run(const Derived&) EIGEN_NOEXCEPT { return 0; } +}; + +template +struct first_aligned_impl { + static inline Index run(const Derived& m) { return internal::first_aligned(m.data(), m.size()); } +}; + +/** \internal \returns the index of the first element of the array stored by \a m that is properly aligned with respect + * to \a Alignment for vectorization. + * + * \tparam Alignment requested alignment in Bytes. + * + * There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more + * documentation. + */ +template +static inline Index first_aligned(const DenseBase& m) { + enum { ReturnZero = (int(evaluator::Alignment) >= Alignment) || !(Derived::Flags & DirectAccessBit) }; + return first_aligned_impl::run(m.derived()); +} + +template +static inline Index first_default_aligned(const DenseBase& m) { + typedef typename Derived::Scalar Scalar; + typedef typename packet_traits::type DefaultPacketType; + return internal::first_aligned::alignment), Derived>(m); +} + +template ::ret> +struct inner_stride_at_compile_time { + enum { ret = traits::InnerStrideAtCompileTime }; +}; + +template +struct inner_stride_at_compile_time { + enum { ret = 0 }; +}; + +template ::ret> +struct outer_stride_at_compile_time { + enum { ret = traits::OuterStrideAtCompileTime }; +}; + +template +struct outer_stride_at_compile_time { + enum { ret = 0 }; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_DENSECOEFFSBASE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DenseStorage.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DenseStorage.h new file mode 100644 index 0000000000000000000000000000000000000000..d62586c99b5af40c61f2578903fb51edf393275b --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DenseStorage.h @@ -0,0 +1,578 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2009 Benoit Jacob +// Copyright (C) 2010-2013 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIXSTORAGE_H +#define EIGEN_MATRIXSTORAGE_H + +#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN +#define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X) \ + X; \ + EIGEN_DENSE_STORAGE_CTOR_PLUGIN; +#else +#define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X) +#endif + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +#if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT) +#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(Alignment) +#else +#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(Alignment) \ + eigen_assert((is_constant_evaluated() || (std::uintptr_t(array) % Alignment == 0)) && \ + "this assertion is explained here: " \ + "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \ + " **** READ THIS WEB PAGE !!! ****"); +#endif + +#if EIGEN_STACK_ALLOCATION_LIMIT +#define EIGEN_MAKE_STACK_ALLOCATION_ASSERT(X) \ + EIGEN_STATIC_ASSERT(X <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG) +#else +#define EIGEN_MAKE_STACK_ALLOCATION_ASSERT(X) +#endif + +/** \internal + * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned: + * to 16 bytes boundary if the total size is a multiple of 16 bytes. + */ + +template ::value> +struct plain_array { + EIGEN_ALIGN_TO_BOUNDARY(Alignment) T array[Size]; +#if defined(EIGEN_NO_DEBUG) || defined(EIGEN_TESTING_PLAINOBJECT_CTOR) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr plain_array() = default; +#else + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr plain_array() { + EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(Alignment) + EIGEN_MAKE_STACK_ALLOCATION_ASSERT(Size * sizeof(T)) + } +#endif +}; + +template +struct plain_array { + T array[Size]; +#if defined(EIGEN_NO_DEBUG) || defined(EIGEN_TESTING_PLAINOBJECT_CTOR) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr plain_array() = default; +#else + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr plain_array() { EIGEN_MAKE_STACK_ALLOCATION_ASSERT(Size * sizeof(T)) } +#endif +}; + +template +struct plain_array { + T array[1]; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr plain_array() = default; +}; + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap_plain_array(plain_array& a, + plain_array& b, + Index a_size, Index b_size) { + Index common_size = numext::mini(a_size, b_size); + std::swap_ranges(a.array, a.array + common_size, b.array); + if (a_size > b_size) + smart_copy(a.array + common_size, a.array + a_size, b.array + common_size); + else if (b_size > a_size) + smart_copy(b.array + common_size, b.array + b_size, a.array + common_size); +} + +template +class DenseStorage_impl { + plain_array m_data; + + public: +#ifndef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl&) = default; +#else + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl& other) { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size) + smart_copy(other.m_data.array, other.m_data.array + Size, m_data.array); + } +#endif + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index /*size*/, Index /*rows*/, Index /*cols*/) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl&) = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) { + numext::swap(m_data, other.m_data); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index /*size*/, Index /*rows*/, + Index /*cols*/) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index /*size*/, Index /*rows*/, Index /*cols*/) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return Rows; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return Cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return Rows * Cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return m_data.array; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return m_data.array; } +}; +template +class DenseStorage_impl { + plain_array m_data; + Index m_rows = 0; + + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl& other) + : m_rows(other.m_rows) { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = other.size()) + smart_copy(other.m_data.array, other.m_data.array + other.size(), m_data.array); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index size, Index rows, Index /*cols*/) + : m_rows(rows) { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + EIGEN_UNUSED_VARIABLE(size) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl& other) { + smart_copy(other.m_data.array, other.m_data.array + other.size(), m_data.array); + m_rows = other.m_rows; + return *this; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) { + swap_plain_array(m_data, other.m_data, size(), other.size()); + numext::swap(m_rows, other.m_rows); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index /*size*/, Index rows, Index /*cols*/) { + m_rows = rows; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index /*size*/, Index rows, Index /*cols*/) { + m_rows = rows; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return m_rows; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return Cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return m_rows * Cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return m_data.array; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return m_data.array; } +}; +template +class DenseStorage_impl { + plain_array m_data; + Index m_cols = 0; + + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl& other) + : m_cols(other.m_cols) { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = other.size()) + smart_copy(other.m_data.array, other.m_data.array + other.size(), m_data.array); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index size, Index /*rows*/, Index cols) + : m_cols(cols) { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + EIGEN_UNUSED_VARIABLE(size) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl& other) { + smart_copy(other.m_data.array, other.m_data.array + other.size(), m_data.array); + m_cols = other.m_cols; + return *this; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) { + swap_plain_array(m_data, other.m_data, size(), other.size()); + numext::swap(m_cols, other.m_cols); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index /*size*/, Index /*rows*/, Index cols) { + m_cols = cols; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index /*size*/, Index /*rows*/, Index cols) { + m_cols = cols; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return Rows; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return m_cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return Rows * m_cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return m_data.array; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return m_data.array; } +}; +template +class DenseStorage_impl { + plain_array m_data; + Index m_rows = 0; + Index m_cols = 0; + + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl& other) + : m_rows(other.m_rows), m_cols(other.m_cols) { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = other.size()) + smart_copy(other.m_data.array, other.m_data.array + other.size(), m_data.array); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index size, Index rows, Index cols) + : m_rows(rows), m_cols(cols) { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + EIGEN_UNUSED_VARIABLE(size) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl& other) { + smart_copy(other.m_data.array, other.m_data.array + other.size(), m_data.array); + m_rows = other.m_rows; + m_cols = other.m_cols; + return *this; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) { + swap_plain_array(m_data, other.m_data, size(), other.size()); + numext::swap(m_rows, other.m_rows); + numext::swap(m_cols, other.m_cols); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index /*size*/, Index rows, Index cols) { + m_rows = rows; + m_cols = cols; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index /*size*/, Index rows, Index cols) { + m_rows = rows; + m_cols = cols; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return m_rows; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return m_cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return m_rows * m_cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return m_data.array; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return m_data.array; } +}; +// null matrix variants +template +class DenseStorage_impl { + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl&) = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index /*size*/, Index /*rows*/, Index /*cols*/) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl&) = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl&) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index /*size*/, Index /*rows*/, + Index /*cols*/) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index /*size*/, Index /*rows*/, Index /*cols*/) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return Rows; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return Cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return Rows * Cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return nullptr; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return nullptr; } +}; +template +class DenseStorage_impl { + Index m_rows = 0; + + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl&) = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index /*size*/, Index rows, Index /*cols*/) + : m_rows(rows) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl&) = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) noexcept { + numext::swap(m_rows, other.m_rows); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index /*size*/, Index rows, Index /*cols*/) { + m_rows = rows; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index /*size*/, Index rows, Index /*cols*/) { + m_rows = rows; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return m_rows; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return Cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return m_rows * Cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return nullptr; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return nullptr; } +}; +template +class DenseStorage_impl { + Index m_cols = 0; + + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl&) = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index /*size*/, Index /*rows*/, Index cols) + : m_cols(cols) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl&) = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) noexcept { + numext::swap(m_cols, other.m_cols); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index /*size*/, Index /*rows*/, Index cols) { + m_cols = cols; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index /*size*/, Index /*rows*/, Index cols) { + m_cols = cols; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return Rows; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return m_cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return Rows * m_cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return nullptr; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return nullptr; } +}; +template +class DenseStorage_impl { + Index m_rows = 0; + Index m_cols = 0; + + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl&) = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index /*size*/, Index rows, Index cols) + : m_rows(rows), m_cols(cols) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl&) = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) noexcept { + numext::swap(m_rows, other.m_rows); + numext::swap(m_cols, other.m_cols); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index /*size*/, Index rows, Index cols) { + m_rows = rows; + m_cols = cols; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index /*size*/, Index rows, Index cols) { + m_rows = rows; + m_cols = cols; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return m_rows; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return m_cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return m_rows * m_cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return nullptr; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return nullptr; } +}; +// fixed-size matrix with dynamic memory allocation not currently supported +template +class DenseStorage_impl {}; +// dynamic-sized variants +template +class DenseStorage_impl { + static constexpr bool Align = (Options & DontAlign) == 0; + T* m_data = nullptr; + Index m_rows = 0; + + public: + static constexpr int Size = Dynamic; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl& other) + : m_data(conditional_aligned_new_auto(other.size())), m_rows(other.m_rows) { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = other.size()) + smart_copy(other.m_data, other.m_data + other.size(), m_data); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index size, Index rows, Index /*cols*/) + : m_data(conditional_aligned_new_auto(size)), m_rows(rows) { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(DenseStorage_impl&& other) noexcept + : m_data(other.m_data), m_rows(other.m_rows) { + other.m_data = nullptr; + other.m_rows = 0; + } + EIGEN_DEVICE_FUNC ~DenseStorage_impl() { conditional_aligned_delete_auto(m_data, size()); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl& other) { + resize(other.size(), other.rows(), other.cols()); + smart_copy(other.m_data, other.m_data + other.size(), m_data); + return *this; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(DenseStorage_impl&& other) noexcept { + this->swap(other); + return *this; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) noexcept { + numext::swap(m_data, other.m_data); + numext::swap(m_rows, other.m_rows); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index size, Index rows, Index /*cols*/) { + m_data = conditional_aligned_realloc_new_auto(m_data, size, this->size()); + m_rows = rows; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index size, Index rows, Index /*cols*/) { + Index oldSize = this->size(); + if (oldSize != size) { + conditional_aligned_delete_auto(m_data, oldSize); + m_data = conditional_aligned_new_auto(size); + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + } + m_rows = rows; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return m_rows; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return Cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return m_rows * Cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return m_data; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return m_data; } +}; +template +class DenseStorage_impl { + static constexpr bool Align = (Options & DontAlign) == 0; + T* m_data = nullptr; + Index m_cols = 0; + + public: + static constexpr int Size = Dynamic; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl& other) + : m_data(conditional_aligned_new_auto(other.size())), m_cols(other.m_cols) { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = other.size()) + smart_copy(other.m_data, other.m_data + other.size(), m_data); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index size, Index /*rows*/, Index cols) + : m_data(conditional_aligned_new_auto(size)), m_cols(cols) { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(DenseStorage_impl&& other) noexcept + : m_data(other.m_data), m_cols(other.m_cols) { + other.m_data = nullptr; + other.m_cols = 0; + } + EIGEN_DEVICE_FUNC ~DenseStorage_impl() { conditional_aligned_delete_auto(m_data, size()); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl& other) { + resize(other.size(), other.rows(), other.cols()); + smart_copy(other.m_data, other.m_data + other.size(), m_data); + return *this; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(DenseStorage_impl&& other) noexcept { + this->swap(other); + return *this; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) noexcept { + numext::swap(m_data, other.m_data); + numext::swap(m_cols, other.m_cols); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index size, Index /*rows*/, Index cols) { + m_data = conditional_aligned_realloc_new_auto(m_data, size, this->size()); + m_cols = cols; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index size, Index /*rows*/, Index cols) { + Index oldSize = this->size(); + if (oldSize != size) { + conditional_aligned_delete_auto(m_data, oldSize); + m_data = conditional_aligned_new_auto(size); + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + } + m_cols = cols; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return Rows; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return m_cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return Rows * m_cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return m_data; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return m_data; } +}; +template +class DenseStorage_impl { + static constexpr bool Align = (Options & DontAlign) == 0; + T* m_data = nullptr; + Index m_rows = 0; + Index m_cols = 0; + + public: + static constexpr int Size = Dynamic; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl& other) + : m_data(conditional_aligned_new_auto(other.size())), m_rows(other.m_rows), m_cols(other.m_cols) { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = other.size()) + smart_copy(other.m_data, other.m_data + other.size(), m_data); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index size, Index rows, Index cols) + : m_data(conditional_aligned_new_auto(size)), m_rows(rows), m_cols(cols) { + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(DenseStorage_impl&& other) noexcept + : m_data(other.m_data), m_rows(other.m_rows), m_cols(other.m_cols) { + other.m_data = nullptr; + other.m_rows = 0; + other.m_cols = 0; + } + EIGEN_DEVICE_FUNC ~DenseStorage_impl() { conditional_aligned_delete_auto(m_data, size()); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl& other) { + resize(other.size(), other.rows(), other.cols()); + smart_copy(other.m_data, other.m_data + other.size(), m_data); + return *this; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(DenseStorage_impl&& other) noexcept { + this->swap(other); + return *this; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) noexcept { + numext::swap(m_data, other.m_data); + numext::swap(m_rows, other.m_rows); + numext::swap(m_cols, other.m_cols); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index size, Index rows, Index cols) { + m_data = conditional_aligned_realloc_new_auto(m_data, size, this->size()); + m_rows = rows; + m_cols = cols; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index size, Index rows, Index cols) { + Index oldSize = this->size(); + if (oldSize != size) { + conditional_aligned_delete_auto(m_data, oldSize); + m_data = conditional_aligned_new_auto(size); + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + } + m_rows = rows; + m_cols = cols; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return m_rows; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return m_cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return m_rows * m_cols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return m_data; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return m_data; } +}; +template +struct use_default_move { + static constexpr bool DynamicObject = Size == Dynamic; + static constexpr bool TrivialObject = + (!NumTraits::RequireInitialization) && (Rows >= 0) && (Cols >= 0) && (Size == Rows * Cols); + static constexpr bool value = DynamicObject || TrivialObject; +}; +} // end namespace internal + +/** \internal + * + * \class DenseStorage_impl + * \ingroup Core_Module + * + * \brief Stores the data of a matrix + * + * This class stores the data of fixed-size, dynamic-size or mixed matrices + * in a way as compact as possible. + * + * \sa Matrix + */ +template ::value> +class DenseStorage : public internal::DenseStorage_impl { + using Base = internal::DenseStorage_impl; + + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage() = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage(const DenseStorage&) = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage(Index size, Index rows, Index cols) + : Base(size, rows, cols) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage& operator=(const DenseStorage&) = default; + // if DenseStorage meets the requirements of use_default_move, then use the move construction and move assignment + // operation defined in DenseStorage_impl, or the compiler-generated version if none is defined + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage(DenseStorage&&) = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage& operator=(DenseStorage&&) = default; +}; +template +class DenseStorage + : public internal::DenseStorage_impl { + using Base = internal::DenseStorage_impl; + + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage() = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage(const DenseStorage&) = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage(Index size, Index rows, Index cols) + : Base(size, rows, cols) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage& operator=(const DenseStorage&) = default; + // if DenseStorage does not meet the requirements of use_default_move, then defer to the copy construction and copy + // assignment behavior + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage(DenseStorage&& other) + : DenseStorage(static_cast(other)) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage& operator=(DenseStorage&& other) { + *this = other; + return *this; + } +}; + +} // end namespace Eigen + +#endif // EIGEN_MATRIX_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DeviceWrapper.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DeviceWrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..3ae8256f573d9cc2c4e1a33442e15b4f5c1bf7f4 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DeviceWrapper.h @@ -0,0 +1,155 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2023 Charlie Schlosser +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DEVICEWRAPPER_H +#define EIGEN_DEVICEWRAPPER_H + +namespace Eigen { +template +struct DeviceWrapper { + using Base = EigenBase>; + using Scalar = typename Derived::Scalar; + + EIGEN_DEVICE_FUNC DeviceWrapper(Base& xpr, Device& device) : m_xpr(xpr.derived()), m_device(device) {} + EIGEN_DEVICE_FUNC DeviceWrapper(const Base& xpr, Device& device) : m_xpr(xpr.derived()), m_device(device) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const EigenBase& other) { + using AssignOp = internal::assign_op; + internal::call_assignment(*this, other.derived(), AssignOp()); + return m_xpr; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const EigenBase& other) { + using AddAssignOp = internal::add_assign_op; + internal::call_assignment(*this, other.derived(), AddAssignOp()); + return m_xpr; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const EigenBase& other) { + using SubAssignOp = internal::sub_assign_op; + internal::call_assignment(*this, other.derived(), SubAssignOp()); + return m_xpr; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& derived() { return m_xpr; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Device& device() { return m_device; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NoAlias noalias() { + return NoAlias(*this); + } + + Derived& m_xpr; + Device& m_device; +}; + +namespace internal { + +// this is where we differentiate between lazy assignment and specialized kernels (e.g. matrix products) +template ::Shape, + typename evaluator_traits::Shape>::Kind, + typename EnableIf = void> +struct AssignmentWithDevice; + +// unless otherwise specified, use the default product implementation +template +struct AssignmentWithDevice, Functor, Device, Dense2Dense, Weak> { + using SrcXprType = Product; + using Base = Assignment; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType& dst, const SrcXprType& src, const Functor& func, + Device&) { + Base::run(dst, src, func); + }; +}; + +// specialization for coeffcient-wise assignment +template +struct AssignmentWithDevice { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType& dst, const SrcXprType& src, const Functor& func, + Device& device) { +#ifndef EIGEN_NO_DEBUG + internal::check_for_aliasing(dst, src); +#endif + + call_dense_assignment_loop(dst, src, func, device); + } +}; + +// this allows us to use the default evaluation scheme if it is not specialized for the device +template +struct dense_assignment_loop_with_device { + using Base = dense_assignment_loop; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void run(Kernel& kernel, Device&) { Base::run(kernel); } +}; + +// entry point for a generic expression with device +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void call_assignment_no_alias(DeviceWrapper dst, + const Src& src, const Func& func) { + enum { + NeedToTranspose = ((int(Dst::RowsAtCompileTime) == 1 && int(Src::ColsAtCompileTime) == 1) || + (int(Dst::ColsAtCompileTime) == 1 && int(Src::RowsAtCompileTime) == 1)) && + int(Dst::SizeAtCompileTime) != 1 + }; + + using ActualDstTypeCleaned = std::conditional_t, Dst>; + using ActualDstType = std::conditional_t, Dst&>; + ActualDstType actualDst(dst.derived()); + + // TODO check whether this is the right place to perform these checks: + EIGEN_STATIC_ASSERT_LVALUE(Dst) + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(ActualDstTypeCleaned, Src) + EIGEN_CHECK_BINARY_COMPATIBILIY(Func, typename ActualDstTypeCleaned::Scalar, typename Src::Scalar); + + // this provides a mechanism for specializing simple assignments, matrix products, etc + AssignmentWithDevice::run(actualDst, src, func, dst.device()); +} + +// copy and pasted from AssignEvaluator except forward device to kernel +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void call_dense_assignment_loop(DstXprType& dst, + const SrcXprType& src, + const Functor& func, + Device& device) { + using DstEvaluatorType = evaluator; + using SrcEvaluatorType = evaluator; + + SrcEvaluatorType srcEvaluator(src); + + // NOTE To properly handle A = (A*A.transpose())/s with A rectangular, + // we need to resize the destination after the source evaluator has been created. + resize_if_allowed(dst, src, func); + + DstEvaluatorType dstEvaluator(dst); + + using Kernel = generic_dense_assignment_kernel; + + Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived()); + + dense_assignment_loop_with_device::run(kernel, device); +} + +} // namespace internal + +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DeviceWrapper EigenBase::device(Device& device) { + return DeviceWrapper(derived(), device); +} + +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DeviceWrapper EigenBase::device( + Device& device) const { + return DeviceWrapper(derived(), device); +} +} // namespace Eigen +#endif diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Diagonal.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Diagonal.h new file mode 100644 index 0000000000000000000000000000000000000000..8d27857e0504e04a311120312aa92dbfe0237b4f --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Diagonal.h @@ -0,0 +1,221 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007-2009 Benoit Jacob +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DIAGONAL_H +#define EIGEN_DIAGONAL_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +/** \class Diagonal + * \ingroup Core_Module + * + * \brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix + * + * \tparam MatrixType the type of the object in which we are taking a sub/main/super diagonal + * \tparam DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal. + * A positive value means a superdiagonal, a negative value means a subdiagonal. + * You can also use DynamicIndex so the index can be set at runtime. + * + * The matrix is not required to be square. + * + * This class represents an expression of the main diagonal, or any sub/super diagonal + * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the + * time this is the only way it is used. + * + * \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index) + */ + +namespace internal { +template +struct traits > : traits { + typedef typename ref_selector::type MatrixTypeNested; + typedef std::remove_reference_t MatrixTypeNested_; + typedef typename MatrixType::StorageKind StorageKind; + enum { + RowsAtCompileTime = (int(DiagIndex) == DynamicIndex || int(MatrixType::SizeAtCompileTime) == Dynamic) + ? Dynamic + : (plain_enum_min(MatrixType::RowsAtCompileTime - plain_enum_max(-DiagIndex, 0), + MatrixType::ColsAtCompileTime - plain_enum_max(DiagIndex, 0))), + ColsAtCompileTime = 1, + MaxRowsAtCompileTime = + int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic + : DiagIndex == DynamicIndex + ? min_size_prefer_fixed(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime) + : (plain_enum_min(MatrixType::MaxRowsAtCompileTime - plain_enum_max(-DiagIndex, 0), + MatrixType::MaxColsAtCompileTime - plain_enum_max(DiagIndex, 0))), + MaxColsAtCompileTime = 1, + MaskLvalueBit = is_lvalue::value ? LvalueBit : 0, + Flags = (unsigned int)MatrixTypeNested_::Flags & (RowMajorBit | MaskLvalueBit | DirectAccessBit) & + ~RowMajorBit, // FIXME DirectAccessBit should not be handled by expressions + MatrixTypeOuterStride = outer_stride_at_compile_time::ret, + InnerStrideAtCompileTime = MatrixTypeOuterStride == Dynamic ? Dynamic : MatrixTypeOuterStride + 1, + OuterStrideAtCompileTime = 0 + }; +}; +} // namespace internal + +template +class Diagonal : public internal::dense_xpr_base >::type { + public: + enum { DiagIndex = DiagIndex_ }; + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal) + + EIGEN_DEVICE_FUNC explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) + : m_matrix(matrix), m_index(a_index) { + eigen_assert(a_index <= m_matrix.cols() && -a_index <= m_matrix.rows()); + } + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal) + + EIGEN_DEVICE_FUNC inline Index rows() const { + return m_index.value() < 0 ? numext::mini(m_matrix.cols(), m_matrix.rows() + m_index.value()) + : numext::mini(m_matrix.rows(), m_matrix.cols() - m_index.value()); + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return 1; } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { + return m_matrix.outerStride() + 1; + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { return 0; } + + typedef std::conditional_t::value, Scalar, const Scalar> ScalarWithConstIfNotLvalue; + + EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return &(m_matrix.coeffRef(rowOffset(), colOffset())); } + EIGEN_DEVICE_FUNC inline const Scalar* data() const { return &(m_matrix.coeffRef(rowOffset(), colOffset())); } + + EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index) { + EIGEN_STATIC_ASSERT_LVALUE(MatrixType) + return m_matrix.coeffRef(row + rowOffset(), row + colOffset()); + } + + EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index row, Index) const { + return m_matrix.coeffRef(row + rowOffset(), row + colOffset()); + } + + EIGEN_DEVICE_FUNC inline CoeffReturnType coeff(Index row, Index) const { + return m_matrix.coeff(row + rowOffset(), row + colOffset()); + } + + EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index idx) { + EIGEN_STATIC_ASSERT_LVALUE(MatrixType) + return m_matrix.coeffRef(idx + rowOffset(), idx + colOffset()); + } + + EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index idx) const { + return m_matrix.coeffRef(idx + rowOffset(), idx + colOffset()); + } + + EIGEN_DEVICE_FUNC inline CoeffReturnType coeff(Index idx) const { + return m_matrix.coeff(idx + rowOffset(), idx + colOffset()); + } + + EIGEN_DEVICE_FUNC inline const internal::remove_all_t& nestedExpression() const { + return m_matrix; + } + + EIGEN_DEVICE_FUNC inline Index index() const { return m_index.value(); } + + protected: + typename internal::ref_selector::non_const_type m_matrix; + const internal::variable_if_dynamicindex m_index; + + private: + // some compilers may fail to optimize std::max etc in case of compile-time constants... + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index absDiagIndex() const EIGEN_NOEXCEPT { + return m_index.value() > 0 ? m_index.value() : -m_index.value(); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rowOffset() const EIGEN_NOEXCEPT { + return m_index.value() > 0 ? 0 : -m_index.value(); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index colOffset() const EIGEN_NOEXCEPT { + return m_index.value() > 0 ? m_index.value() : 0; + } + // trigger a compile-time error if someone try to call packet + template + typename MatrixType::PacketReturnType packet(Index) const; + template + typename MatrixType::PacketReturnType packet(Index, Index) const; +}; + +/** \returns an expression of the main diagonal of the matrix \c *this + * + * \c *this is not required to be square. + * + * Example: \include MatrixBase_diagonal.cpp + * Output: \verbinclude MatrixBase_diagonal.out + * + * \sa class Diagonal */ +template +EIGEN_DEVICE_FUNC inline typename MatrixBase::DiagonalReturnType MatrixBase::diagonal() { + return DiagonalReturnType(derived()); +} + +/** This is the const version of diagonal(). */ +template +EIGEN_DEVICE_FUNC inline const typename MatrixBase::ConstDiagonalReturnType MatrixBase::diagonal() + const { + return ConstDiagonalReturnType(derived()); +} + +/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this + * + * \c *this is not required to be square. + * + * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0 + * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal. + * + * Example: \include MatrixBase_diagonal_int.cpp + * Output: \verbinclude MatrixBase_diagonal_int.out + * + * \sa MatrixBase::diagonal(), class Diagonal */ +template +EIGEN_DEVICE_FUNC inline Diagonal MatrixBase::diagonal(Index index) { + return Diagonal(derived(), index); +} + +/** This is the const version of diagonal(Index). */ +template +EIGEN_DEVICE_FUNC inline const Diagonal MatrixBase::diagonal(Index index) const { + return Diagonal(derived(), index); +} + +/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this + * + * \c *this is not required to be square. + * + * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0 + * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal. + * + * Example: \include MatrixBase_diagonal_template_int.cpp + * Output: \verbinclude MatrixBase_diagonal_template_int.out + * + * \sa MatrixBase::diagonal(), class Diagonal */ +template +template +EIGEN_DEVICE_FUNC inline Diagonal MatrixBase::diagonal() { + return Diagonal(derived()); +} + +/** This is the const version of diagonal(). */ +template +template +EIGEN_DEVICE_FUNC inline const Diagonal MatrixBase::diagonal() const { + return Diagonal(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_DIAGONAL_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DiagonalMatrix.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DiagonalMatrix.h new file mode 100644 index 0000000000000000000000000000000000000000..248e4586f78f3006d1f17cea31fd8cc9b4abc608 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DiagonalMatrix.h @@ -0,0 +1,417 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2007-2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DIAGONALMATRIX_H +#define EIGEN_DIAGONALMATRIX_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +/** \class DiagonalBase + * \ingroup Core_Module + * + * \brief Base class for diagonal matrices and expressions + * + * This is the base class that is inherited by diagonal matrix and related expression + * types, which internally use a vector for storing the diagonal entries. Diagonal + * types always represent square matrices. + * + * \tparam Derived is the derived type, a DiagonalMatrix or DiagonalWrapper. + * + * \sa class DiagonalMatrix, class DiagonalWrapper + */ +template +class DiagonalBase : public EigenBase { + public: + typedef typename internal::traits::DiagonalVectorType DiagonalVectorType; + typedef typename DiagonalVectorType::Scalar Scalar; + typedef typename DiagonalVectorType::RealScalar RealScalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::StorageIndex StorageIndex; + + enum { + RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, + ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, + MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, + MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, + IsVectorAtCompileTime = 0, + Flags = NoPreferredStorageOrderBit + }; + + typedef Matrix + DenseMatrixType; + typedef DenseMatrixType DenseType; + typedef DiagonalMatrix + PlainObject; + + /** \returns a reference to the derived object. */ + EIGEN_DEVICE_FUNC inline const Derived& derived() const { return *static_cast(this); } + /** \returns a const reference to the derived object. */ + EIGEN_DEVICE_FUNC inline Derived& derived() { return *static_cast(this); } + + /** + * Constructs a dense matrix from \c *this. Note, this directly returns a dense matrix type, + * not an expression. + * \returns A dense matrix, with its diagonal entries set from the the derived object. */ + EIGEN_DEVICE_FUNC DenseMatrixType toDenseMatrix() const { return derived(); } + + /** \returns a reference to the derived object's vector of diagonal coefficients. */ + EIGEN_DEVICE_FUNC inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); } + /** \returns a const reference to the derived object's vector of diagonal coefficients. */ + EIGEN_DEVICE_FUNC inline DiagonalVectorType& diagonal() { return derived().diagonal(); } + + /** \returns the value of the coefficient as if \c *this was a dense matrix. */ + EIGEN_DEVICE_FUNC inline Scalar coeff(Index row, Index col) const { + eigen_assert(row >= 0 && col >= 0 && row < rows() && col <= cols()); + return row == col ? diagonal().coeff(row) : Scalar(0); + } + + /** \returns the number of rows. */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const { return diagonal().size(); } + /** \returns the number of columns. */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const { return diagonal().size(); } + + /** \returns the diagonal matrix product of \c *this by the dense matrix, \a matrix */ + template + EIGEN_DEVICE_FUNC const Product operator*( + const MatrixBase& matrix) const { + return Product(derived(), matrix.derived()); + } + + template + using DiagonalProductReturnType = DiagonalWrapper; + + /** \returns the diagonal matrix product of \c *this by the diagonal matrix \a other */ + template + EIGEN_DEVICE_FUNC const DiagonalProductReturnType operator*( + const DiagonalBase& other) const { + return diagonal().cwiseProduct(other.diagonal()).asDiagonal(); + } + + using DiagonalInverseReturnType = + DiagonalWrapper, const DiagonalVectorType>>; + + /** \returns the inverse \c *this. Computed as the coefficient-wise inverse of the diagonal. */ + EIGEN_DEVICE_FUNC inline const DiagonalInverseReturnType inverse() const { + return diagonal().cwiseInverse().asDiagonal(); + } + + using DiagonalScaleReturnType = + DiagonalWrapper; + + /** \returns the product of \c *this by the scalar \a scalar */ + EIGEN_DEVICE_FUNC inline const DiagonalScaleReturnType operator*(const Scalar& scalar) const { + return (diagonal() * scalar).asDiagonal(); + } + + using ScaleDiagonalReturnType = + DiagonalWrapper; + + /** \returns the product of a scalar and the diagonal matrix \a other */ + EIGEN_DEVICE_FUNC friend inline const ScaleDiagonalReturnType operator*(const Scalar& scalar, + const DiagonalBase& other) { + return (scalar * other.diagonal()).asDiagonal(); + } + + template + using DiagonalSumReturnType = DiagonalWrapper; + + /** \returns the sum of \c *this and the diagonal matrix \a other */ + template + EIGEN_DEVICE_FUNC inline const DiagonalSumReturnType operator+( + const DiagonalBase& other) const { + return (diagonal() + other.diagonal()).asDiagonal(); + } + + template + using DiagonalDifferenceReturnType = DiagonalWrapper; + + /** \returns the difference of \c *this and the diagonal matrix \a other */ + template + EIGEN_DEVICE_FUNC inline const DiagonalDifferenceReturnType operator-( + const DiagonalBase& other) const { + return (diagonal() - other.diagonal()).asDiagonal(); + } +}; + +/** \class DiagonalMatrix + * \ingroup Core_Module + * + * \brief Represents a diagonal matrix with its storage + * + * \tparam Scalar_ the type of coefficients + * \tparam SizeAtCompileTime the dimension of the matrix, or Dynamic + * \tparam MaxSizeAtCompileTime the dimension of the matrix, or Dynamic. This parameter is optional and defaults + * to SizeAtCompileTime. Most of the time, you do not need to specify it. + * + * \sa class DiagonalBase, class DiagonalWrapper + */ + +namespace internal { +template +struct traits> + : traits> { + typedef Matrix DiagonalVectorType; + typedef DiagonalShape StorageKind; + enum { Flags = LvalueBit | NoPreferredStorageOrderBit | NestByRefBit }; +}; +} // namespace internal +template +class DiagonalMatrix : public DiagonalBase> { + public: +#ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename internal::traits::DiagonalVectorType DiagonalVectorType; + typedef const DiagonalMatrix& Nested; + typedef Scalar_ Scalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::StorageIndex StorageIndex; +#endif + + protected: + DiagonalVectorType m_diagonal; + + public: + /** const version of diagonal(). */ + EIGEN_DEVICE_FUNC inline const DiagonalVectorType& diagonal() const { return m_diagonal; } + /** \returns a reference to the stored vector of diagonal coefficients. */ + EIGEN_DEVICE_FUNC inline DiagonalVectorType& diagonal() { return m_diagonal; } + + /** Default constructor without initialization */ + EIGEN_DEVICE_FUNC inline DiagonalMatrix() {} + + /** Constructs a diagonal matrix with given dimension */ + EIGEN_DEVICE_FUNC explicit inline DiagonalMatrix(Index dim) : m_diagonal(dim) {} + + /** 2D constructor. */ + EIGEN_DEVICE_FUNC inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x, y) {} + + /** 3D constructor. */ + EIGEN_DEVICE_FUNC inline DiagonalMatrix(const Scalar& x, const Scalar& y, const Scalar& z) : m_diagonal(x, y, z) {} + + /** \brief Construct a diagonal matrix with fixed size from an arbitrary number of coefficients. + * + * \warning To construct a diagonal matrix of fixed size, the number of values passed to this + * constructor must match the fixed dimension of \c *this. + * + * \sa DiagonalMatrix(const Scalar&, const Scalar&) + * \sa DiagonalMatrix(const Scalar&, const Scalar&, const Scalar&) + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DiagonalMatrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, + const ArgTypes&... args) + : m_diagonal(a0, a1, a2, args...) {} + + /** \brief Constructs a DiagonalMatrix and initializes it by elements given by an initializer list of initializer + * lists \cpp11 + */ + EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE DiagonalMatrix( + const std::initializer_list>& list) + : m_diagonal(list) {} + + /** \brief Constructs a DiagonalMatrix from an r-value diagonal vector type */ + EIGEN_DEVICE_FUNC explicit inline DiagonalMatrix(DiagonalVectorType&& diag) : m_diagonal(std::move(diag)) {} + + /** Copy constructor. */ + template + EIGEN_DEVICE_FUNC inline DiagonalMatrix(const DiagonalBase& other) : m_diagonal(other.diagonal()) {} + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** copy constructor. prevent a default copy constructor from hiding the other templated constructor */ + inline DiagonalMatrix(const DiagonalMatrix& other) : m_diagonal(other.diagonal()) {} +#endif + + /** generic constructor from expression of the diagonal coefficients */ + template + EIGEN_DEVICE_FUNC explicit inline DiagonalMatrix(const MatrixBase& other) : m_diagonal(other) {} + + /** Copy operator. */ + template + EIGEN_DEVICE_FUNC DiagonalMatrix& operator=(const DiagonalBase& other) { + m_diagonal = other.diagonal(); + return *this; + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + EIGEN_DEVICE_FUNC DiagonalMatrix& operator=(const DiagonalMatrix& other) { + m_diagonal = other.diagonal(); + return *this; + } +#endif + + typedef DiagonalWrapper, DiagonalVectorType>> + InitializeReturnType; + + typedef DiagonalWrapper, DiagonalVectorType>> + ZeroInitializeReturnType; + + /** Initializes a diagonal matrix of size SizeAtCompileTime with coefficients set to zero */ + EIGEN_DEVICE_FUNC static const ZeroInitializeReturnType Zero() { return DiagonalVectorType::Zero().asDiagonal(); } + /** Initializes a diagonal matrix of size dim with coefficients set to zero */ + EIGEN_DEVICE_FUNC static const ZeroInitializeReturnType Zero(Index size) { + return DiagonalVectorType::Zero(size).asDiagonal(); + } + /** Initializes a identity matrix of size SizeAtCompileTime */ + EIGEN_DEVICE_FUNC static const InitializeReturnType Identity() { return DiagonalVectorType::Ones().asDiagonal(); } + /** Initializes a identity matrix of size dim */ + EIGEN_DEVICE_FUNC static const InitializeReturnType Identity(Index size) { + return DiagonalVectorType::Ones(size).asDiagonal(); + } + + /** Resizes to given size. */ + EIGEN_DEVICE_FUNC inline void resize(Index size) { m_diagonal.resize(size); } + /** Sets all coefficients to zero. */ + EIGEN_DEVICE_FUNC inline void setZero() { m_diagonal.setZero(); } + /** Resizes and sets all coefficients to zero. */ + EIGEN_DEVICE_FUNC inline void setZero(Index size) { m_diagonal.setZero(size); } + /** Sets this matrix to be the identity matrix of the current size. */ + EIGEN_DEVICE_FUNC inline void setIdentity() { m_diagonal.setOnes(); } + /** Sets this matrix to be the identity matrix of the given size. */ + EIGEN_DEVICE_FUNC inline void setIdentity(Index size) { m_diagonal.setOnes(size); } +}; + +/** \class DiagonalWrapper + * \ingroup Core_Module + * + * \brief Expression of a diagonal matrix + * + * \tparam DiagonalVectorType_ the type of the vector of diagonal coefficients + * + * This class is an expression of a diagonal matrix, but not storing its own vector of diagonal coefficients, + * instead wrapping an existing vector expression. It is the return type of MatrixBase::asDiagonal() + * and most of the time this is the only way that it is used. + * + * \sa class DiagonalMatrix, class DiagonalBase, MatrixBase::asDiagonal() + */ + +namespace internal { +template +struct traits> { + typedef DiagonalVectorType_ DiagonalVectorType; + typedef typename DiagonalVectorType::Scalar Scalar; + typedef typename DiagonalVectorType::StorageIndex StorageIndex; + typedef DiagonalShape StorageKind; + typedef typename traits::XprKind XprKind; + enum { + RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, + ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, + MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, + MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, + Flags = (traits::Flags & LvalueBit) | NoPreferredStorageOrderBit + }; +}; +} // namespace internal + +template +class DiagonalWrapper : public DiagonalBase>, internal::no_assignment_operator { + public: +#ifndef EIGEN_PARSED_BY_DOXYGEN + typedef DiagonalVectorType_ DiagonalVectorType; + typedef DiagonalWrapper Nested; +#endif + + /** Constructor from expression of diagonal coefficients to wrap. */ + EIGEN_DEVICE_FUNC explicit inline DiagonalWrapper(DiagonalVectorType& a_diagonal) : m_diagonal(a_diagonal) {} + + /** \returns a const reference to the wrapped expression of diagonal coefficients. */ + EIGEN_DEVICE_FUNC const DiagonalVectorType& diagonal() const { return m_diagonal; } + + protected: + typename DiagonalVectorType::Nested m_diagonal; +}; + +/** \returns a pseudo-expression of a diagonal matrix with *this as vector of diagonal coefficients + * + * \only_for_vectors + * + * Example: \include MatrixBase_asDiagonal.cpp + * Output: \verbinclude MatrixBase_asDiagonal.out + * + * \sa class DiagonalWrapper, class DiagonalMatrix, diagonal(), isDiagonal() + **/ +template +EIGEN_DEVICE_FUNC inline const DiagonalWrapper MatrixBase::asDiagonal() const { + return DiagonalWrapper(derived()); +} + +/** \returns true if *this is approximately equal to a diagonal matrix, + * within the precision given by \a prec. + * + * Example: \include MatrixBase_isDiagonal.cpp + * Output: \verbinclude MatrixBase_isDiagonal.out + * + * \sa asDiagonal() + */ +template +bool MatrixBase::isDiagonal(const RealScalar& prec) const { + if (cols() != rows()) return false; + RealScalar maxAbsOnDiagonal = static_cast(-1); + for (Index j = 0; j < cols(); ++j) { + RealScalar absOnDiagonal = numext::abs(coeff(j, j)); + if (absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal; + } + for (Index j = 0; j < cols(); ++j) + for (Index i = 0; i < j; ++i) { + if (!internal::isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false; + if (!internal::isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false; + } + return true; +} + +namespace internal { + +template <> +struct storage_kind_to_shape { + typedef DiagonalShape Shape; +}; + +struct Diagonal2Dense {}; + +template <> +struct AssignmentKind { + typedef Diagonal2Dense Kind; +}; + +// Diagonal matrix to Dense assignment +template +struct Assignment { + static void run(DstXprType& dst, const SrcXprType& src, + const internal::assign_op& /*func*/) { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if ((dst.rows() != dstRows) || (dst.cols() != dstCols)) dst.resize(dstRows, dstCols); + + dst.setZero(); + dst.diagonal() = src.diagonal(); + } + + static void run(DstXprType& dst, const SrcXprType& src, + const internal::add_assign_op& /*func*/) { + dst.diagonal() += src.diagonal(); + } + + static void run(DstXprType& dst, const SrcXprType& src, + const internal::sub_assign_op& /*func*/) { + dst.diagonal() -= src.diagonal(); + } +}; + +} // namespace internal + +} // end namespace Eigen + +#endif // EIGEN_DIAGONALMATRIX_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DiagonalProduct.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DiagonalProduct.h new file mode 100644 index 0000000000000000000000000000000000000000..bd0feeac7f09f9cb2bc6dc861c700a61948c6edb --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/DiagonalProduct.h @@ -0,0 +1,30 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2007-2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DIAGONALPRODUCT_H +#define EIGEN_DIAGONALPRODUCT_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +/** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal. + */ +template +template +EIGEN_DEVICE_FUNC inline const Product MatrixBase::operator*( + const DiagonalBase &a_diagonal) const { + return Product(derived(), a_diagonal.derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_DIAGONALPRODUCT_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Dot.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Dot.h new file mode 100644 index 0000000000000000000000000000000000000000..059527c85f8c7c63a0bb6d6c5b4098303929b184 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Dot.h @@ -0,0 +1,268 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008, 2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DOT_H +#define EIGEN_DOT_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +template ::Scalar> +struct squared_norm_impl { + using Real = typename NumTraits::Real; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Real run(const Derived& a) { + Scalar result = a.unaryExpr(squared_norm_functor()).sum(); + return numext::real(result) + numext::imag(result); + } +}; + +template +struct squared_norm_impl { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(const Derived& a) { return a.any(); } +}; + +} // end namespace internal + +/** \fn MatrixBase::dot + * \returns the dot product of *this with other. + * + * \only_for_vectors + * + * \note If the scalar type is complex numbers, then this function returns the hermitian + * (sesquilinear) dot product, conjugate-linear in the first variable and linear in the + * second variable. + * + * \sa squaredNorm(), norm() + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ScalarBinaryOpTraits::Scalar, + typename internal::traits::Scalar>::ReturnType + MatrixBase::dot(const MatrixBase& other) const { + return internal::dot_impl::run(derived(), other.derived()); +} + +//---------- implementation of L2 norm and related functions ---------- + +/** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the squared Frobenius norm. + * In both cases, it consists in the sum of the square of all the matrix entries. + * For vectors, this is also equals to the dot product of \c *this with itself. + * + * \sa dot(), norm(), lpNorm() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits::Scalar>::Real +MatrixBase::squaredNorm() const { + return internal::squared_norm_impl::run(derived()); +} + +/** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm. + * In both cases, it consists in the square root of the sum of the square of all the matrix entries. + * For vectors, this is also equals to the square root of the dot product of \c *this with itself. + * + * \sa lpNorm(), dot(), squaredNorm() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits::Scalar>::Real +MatrixBase::norm() const { + return numext::sqrt(squaredNorm()); +} + +/** \returns an expression of the quotient of \c *this by its own norm. + * + * \warning If the input vector is too small (i.e., this->norm()==0), + * then this function returns a copy of the input. + * + * \only_for_vectors + * + * \sa norm(), normalize() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::PlainObject MatrixBase::normalized() + const { + typedef typename internal::nested_eval::type Nested_; + Nested_ n(derived()); + RealScalar z = n.squaredNorm(); + // NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU + if (z > RealScalar(0)) + return n / numext::sqrt(z); + else + return n; +} + +/** Normalizes the vector, i.e. divides it by its own norm. + * + * \only_for_vectors + * + * \warning If the input vector is too small (i.e., this->norm()==0), then \c *this is left unchanged. + * + * \sa norm(), normalized() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MatrixBase::normalize() { + RealScalar z = squaredNorm(); + // NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU + if (z > RealScalar(0)) derived() /= numext::sqrt(z); +} + +/** \returns an expression of the quotient of \c *this by its own norm while avoiding underflow and overflow. + * + * \only_for_vectors + * + * This method is analogue to the normalized() method, but it reduces the risk of + * underflow and overflow when computing the norm. + * + * \warning If the input vector is too small (i.e., this->norm()==0), + * then this function returns a copy of the input. + * + * \sa stableNorm(), stableNormalize(), normalized() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::PlainObject +MatrixBase::stableNormalized() const { + typedef typename internal::nested_eval::type Nested_; + Nested_ n(derived()); + RealScalar w = n.cwiseAbs().maxCoeff(); + RealScalar z = (n / w).squaredNorm(); + if (z > RealScalar(0)) + return n / (numext::sqrt(z) * w); + else + return n; +} + +/** Normalizes the vector while avoid underflow and overflow + * + * \only_for_vectors + * + * This method is analogue to the normalize() method, but it reduces the risk of + * underflow and overflow when computing the norm. + * + * \warning If the input vector is too small (i.e., this->norm()==0), then \c *this is left unchanged. + * + * \sa stableNorm(), stableNormalized(), normalize() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MatrixBase::stableNormalize() { + RealScalar w = cwiseAbs().maxCoeff(); + RealScalar z = (derived() / w).squaredNorm(); + if (z > RealScalar(0)) derived() /= numext::sqrt(z) * w; +} + +//---------- implementation of other norms ---------- + +namespace internal { + +template +struct lpNorm_selector { + typedef typename NumTraits::Scalar>::Real RealScalar; + EIGEN_DEVICE_FUNC static inline RealScalar run(const MatrixBase& m) { + EIGEN_USING_STD(pow) + return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1) / p); + } +}; + +template +struct lpNorm_selector { + EIGEN_DEVICE_FUNC static inline typename NumTraits::Scalar>::Real run( + const MatrixBase& m) { + return m.cwiseAbs().sum(); + } +}; + +template +struct lpNorm_selector { + EIGEN_DEVICE_FUNC static inline typename NumTraits::Scalar>::Real run( + const MatrixBase& m) { + return m.norm(); + } +}; + +template +struct lpNorm_selector { + typedef typename NumTraits::Scalar>::Real RealScalar; + EIGEN_DEVICE_FUNC static inline RealScalar run(const MatrixBase& m) { + if (Derived::SizeAtCompileTime == 0 || (Derived::SizeAtCompileTime == Dynamic && m.size() == 0)) + return RealScalar(0); + return m.cwiseAbs().maxCoeff(); + } +}; + +} // end namespace internal + +/** \returns the \b coefficient-wise \f$ \ell^p \f$ norm of \c *this, that is, returns the p-th root of the sum of the + * p-th powers of the absolute values of the coefficients of \c *this. If \a p is the special value \a Eigen::Infinity, + * this function returns the \f$ \ell^\infty \f$ norm, that is the maximum of the absolute values of the coefficients of + * \c *this. + * + * In all cases, if \c *this is empty, then the value 0 is returned. + * + * \note For matrices, this function does not compute the operator-norm. That is, if \c *this is a matrix, then its + * coefficients are interpreted as a 1D vector. Nonetheless, you can easily compute the 1-norm and \f$\infty\f$-norm + * matrix operator norms using \link TutorialReductionsVisitorsBroadcastingReductionsNorm partial reductions \endlink. + * + * \sa norm() + */ +template +template +#ifndef EIGEN_PARSED_BY_DOXYGEN +EIGEN_DEVICE_FUNC inline typename NumTraits::Scalar>::Real +#else +EIGEN_DEVICE_FUNC MatrixBase::RealScalar +#endif +MatrixBase::lpNorm() const { + return internal::lpNorm_selector::run(*this); +} + +//---------- implementation of isOrthogonal / isUnitary ---------- + +/** \returns true if *this is approximately orthogonal to \a other, + * within the precision given by \a prec. + * + * Example: \include MatrixBase_isOrthogonal.cpp + * Output: \verbinclude MatrixBase_isOrthogonal.out + */ +template +template +bool MatrixBase::isOrthogonal(const MatrixBase& other, const RealScalar& prec) const { + typename internal::nested_eval::type nested(derived()); + typename internal::nested_eval::type otherNested(other.derived()); + return numext::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm(); +} + +/** \returns true if *this is approximately an unitary matrix, + * within the precision given by \a prec. In the case where the \a Scalar + * type is real numbers, a unitary matrix is an orthogonal matrix, whence the name. + * + * \note This can be used to check whether a family of vectors forms an orthonormal basis. + * Indeed, \c m.isUnitary() returns true if and only if the columns (equivalently, the rows) of m form an + * orthonormal basis. + * + * Example: \include MatrixBase_isUnitary.cpp + * Output: \verbinclude MatrixBase_isUnitary.out + */ +template +bool MatrixBase::isUnitary(const RealScalar& prec) const { + typename internal::nested_eval::type self(derived()); + for (Index i = 0; i < cols(); ++i) { + if (!internal::isApprox(self.col(i).squaredNorm(), static_cast(1), prec)) return false; + for (Index j = 0; j < i; ++j) + if (!internal::isMuchSmallerThan(self.col(i).dot(self.col(j)), static_cast(1), prec)) return false; + } + return true; +} + +} // end namespace Eigen + +#endif // EIGEN_DOT_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/EigenBase.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/EigenBase.h new file mode 100644 index 0000000000000000000000000000000000000000..6d167006a094181fa3693b19f6b9daeb6f2afb79 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/EigenBase.h @@ -0,0 +1,149 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Benoit Jacob +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_EIGENBASE_H +#define EIGEN_EIGENBASE_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +/** \class EigenBase + * \ingroup Core_Module + * + * Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T). + * + * In other words, an EigenBase object is an object that can be copied into a MatrixBase. + * + * Besides MatrixBase-derived classes, this also includes special matrix classes such as diagonal matrices, etc. + * + * Notice that this class is trivial, it is only used to disambiguate overloaded functions. + * + * \sa \blank \ref TopicClassHierarchy + */ +template +struct EigenBase { + // typedef typename internal::plain_matrix_type::type PlainObject; + + /** \brief The interface type of indices + * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE. + * \sa StorageIndex, \ref TopicPreprocessorDirectives. + * DEPRECATED: Since Eigen 3.3, its usage is deprecated. Use Eigen::Index instead. + * Deprecation is not marked with a doxygen comment because there are too many existing usages to add the deprecation + * attribute. + */ + typedef Eigen::Index Index; + + // FIXME is it needed? + typedef typename internal::traits::StorageKind StorageKind; + + /** \returns a reference to the derived object */ + EIGEN_DEVICE_FUNC constexpr Derived& derived() { return *static_cast(this); } + /** \returns a const reference to the derived object */ + EIGEN_DEVICE_FUNC constexpr const Derived& derived() const { return *static_cast(this); } + + EIGEN_DEVICE_FUNC inline Derived& const_cast_derived() const { + return *static_cast(const_cast(this)); + } + EIGEN_DEVICE_FUNC inline const Derived& const_derived() const { return *static_cast(this); } + + /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return derived().rows(); } + /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return derived().cols(); } + /** \returns the number of coefficients, which is rows()*cols(). + * \sa rows(), cols(), SizeAtCompileTime. */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index size() const EIGEN_NOEXCEPT { return rows() * cols(); } + + /** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */ + template + EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const { + derived().evalTo(dst); + } + + /** \internal Don't use it, but do the equivalent: \code dst += *this; \endcode */ + template + EIGEN_DEVICE_FUNC inline void addTo(Dest& dst) const { + // This is the default implementation, + // derived class can reimplement it in a more optimized way. + typename Dest::PlainObject res(rows(), cols()); + evalTo(res); + dst += res; + } + + /** \internal Don't use it, but do the equivalent: \code dst -= *this; \endcode */ + template + EIGEN_DEVICE_FUNC inline void subTo(Dest& dst) const { + // This is the default implementation, + // derived class can reimplement it in a more optimized way. + typename Dest::PlainObject res(rows(), cols()); + evalTo(res); + dst -= res; + } + + /** \internal Don't use it, but do the equivalent: \code dst.applyOnTheRight(*this); \endcode */ + template + EIGEN_DEVICE_FUNC inline void applyThisOnTheRight(Dest& dst) const { + // This is the default implementation, + // derived class can reimplement it in a more optimized way. + dst = dst * this->derived(); + } + + /** \internal Don't use it, but do the equivalent: \code dst.applyOnTheLeft(*this); \endcode */ + template + EIGEN_DEVICE_FUNC inline void applyThisOnTheLeft(Dest& dst) const { + // This is the default implementation, + // derived class can reimplement it in a more optimized way. + dst = this->derived() * dst; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DeviceWrapper device(Device& device); + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DeviceWrapper device(Device& device) const; +}; + +/*************************************************************************** + * Implementation of matrix base methods + ***************************************************************************/ + +/** \brief Copies the generic expression \a other into *this. + * + * \details The expression must provide a (templated) evalTo(Derived& dst) const + * function which does the actual job. In practice, this allows any user to write + * its own special matrix without having to modify MatrixBase + * + * \returns a reference to *this. + */ +template +template +EIGEN_DEVICE_FUNC Derived& DenseBase::operator=(const EigenBase& other) { + call_assignment(derived(), other.derived()); + return derived(); +} + +template +template +EIGEN_DEVICE_FUNC Derived& DenseBase::operator+=(const EigenBase& other) { + call_assignment(derived(), other.derived(), internal::add_assign_op()); + return derived(); +} + +template +template +EIGEN_DEVICE_FUNC Derived& DenseBase::operator-=(const EigenBase& other) { + call_assignment(derived(), other.derived(), internal::sub_assign_op()); + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_EIGENBASE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Fill.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Fill.h new file mode 100644 index 0000000000000000000000000000000000000000..7a8115e8678bbaf7969d15f9333b11af790be5e5 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Fill.h @@ -0,0 +1,138 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2024 Charles Schlosser +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_FILL_H +#define EIGEN_FILL_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +template +struct eigen_fill_helper : std::false_type {}; + +template +struct eigen_fill_helper> : std::true_type {}; + +template +struct eigen_fill_helper> : std::true_type {}; + +template +struct eigen_fill_helper> : eigen_fill_helper {}; + +template +struct eigen_fill_helper> + : std::integral_constant::value && + (Xpr::IsRowMajor ? (BlockRows == 1) : (BlockCols == 1))> {}; + +template +struct eigen_fill_helper>> : eigen_fill_helper {}; + +template +struct eigen_fill_helper>> + : std::integral_constant::value && + enum_eq_not_dynamic(OuterStride_, Xpr::InnerSizeAtCompileTime)> {}; + +template +struct eigen_fill_helper>> + : eigen_fill_helper>> {}; + +template +struct eigen_fill_helper>> + : eigen_fill_helper>> {}; + +template +struct eigen_fill_helper>> + : eigen_fill_helper>> {}; + +template +struct eigen_fill_impl { + using Scalar = typename Xpr::Scalar; + using Func = scalar_constant_op; + using PlainObject = typename Xpr::PlainObject; + using Constant = typename PlainObject::ConstantReturnType; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Xpr& dst, const Scalar& val) { + const Constant src(dst.rows(), dst.cols(), val); + run(dst, src); + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Xpr& dst, const SrcXpr& src) { + call_dense_assignment_loop(dst, src, assign_op()); + } +}; + +#if EIGEN_COMP_MSVC || defined(EIGEN_GPU_COMPILE_PHASE) +template +struct eigen_fill_impl : eigen_fill_impl {}; +#else +template +struct eigen_fill_impl { + using Scalar = typename Xpr::Scalar; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Xpr& dst, const Scalar& val) { + EIGEN_USING_STD(fill_n); + fill_n(dst.data(), dst.size(), val); + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Xpr& dst, const SrcXpr& src) { + resize_if_allowed(dst, src, assign_op()); + const Scalar& val = src.functor()(); + run(dst, val); + } +}; +#endif + +template +struct eigen_memset_helper { + static constexpr bool value = std::is_trivial::value && eigen_fill_helper::value; +}; + +template +struct eigen_zero_impl { + using Scalar = typename Xpr::Scalar; + using PlainObject = typename Xpr::PlainObject; + using Zero = typename PlainObject::ZeroReturnType; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Xpr& dst) { + const Zero src(dst.rows(), dst.cols()); + run(dst, src); + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Xpr& dst, const SrcXpr& src) { + call_dense_assignment_loop(dst, src, assign_op()); + } +}; + +template +struct eigen_zero_impl { + using Scalar = typename Xpr::Scalar; + static constexpr size_t max_bytes = (std::numeric_limits::max)(); + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Xpr& dst) { + const size_t num_bytes = dst.size() * sizeof(Scalar); + if (num_bytes == 0) return; + void* dst_ptr = static_cast(dst.data()); +#ifndef EIGEN_NO_DEBUG + if (num_bytes > max_bytes) throw_std_bad_alloc(); + eigen_assert((dst_ptr != nullptr) && "null pointer dereference error!"); +#endif + EIGEN_USING_STD(memset); + memset(dst_ptr, 0, num_bytes); + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Xpr& dst, const SrcXpr& src) { + resize_if_allowed(dst, src, assign_op()); + run(dst); + } +}; + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_FILL_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ForceAlignedAccess.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ForceAlignedAccess.h new file mode 100644 index 0000000000000000000000000000000000000000..a91b0da6a35769c0de787cc16983ee12cbfe8bba --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ForceAlignedAccess.h @@ -0,0 +1,131 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_FORCEALIGNEDACCESS_H +#define EIGEN_FORCEALIGNEDACCESS_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +/** \class ForceAlignedAccess + * \ingroup Core_Module + * + * \brief Enforce aligned packet loads and stores regardless of what is requested + * + * \param ExpressionType the type of the object of which we are forcing aligned packet access + * + * This class is the return type of MatrixBase::forceAlignedAccess() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::forceAlignedAccess() + */ + +namespace internal { +template +struct traits> : public traits {}; +} // namespace internal + +template +class ForceAlignedAccess : public internal::dense_xpr_base>::type { + public: + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(ForceAlignedAccess) + + EIGEN_DEVICE_FUNC explicit inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {} + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { + return m_expression.outerStride(); + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { + return m_expression.innerStride(); + } + + EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index row, Index col) const { + return m_expression.coeff(row, col); + } + + EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col) { + return m_expression.const_cast_derived().coeffRef(row, col); + } + + EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const { return m_expression.coeff(index); } + + EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index) { return m_expression.const_cast_derived().coeffRef(index); } + + template + inline const PacketScalar packet(Index row, Index col) const { + return m_expression.template packet(row, col); + } + + template + inline void writePacket(Index row, Index col, const PacketScalar& x) { + m_expression.const_cast_derived().template writePacket(row, col, x); + } + + template + inline const PacketScalar packet(Index index) const { + return m_expression.template packet(index); + } + + template + inline void writePacket(Index index, const PacketScalar& x) { + m_expression.const_cast_derived().template writePacket(index, x); + } + + EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; } + + protected: + const ExpressionType& m_expression; + + private: + ForceAlignedAccess& operator=(const ForceAlignedAccess&); +}; + +/** \returns an expression of *this with forced aligned access + * \sa forceAlignedAccessIf(),class ForceAlignedAccess + */ +template +inline const ForceAlignedAccess MatrixBase::forceAlignedAccess() const { + return ForceAlignedAccess(derived()); +} + +/** \returns an expression of *this with forced aligned access + * \sa forceAlignedAccessIf(), class ForceAlignedAccess + */ +template +inline ForceAlignedAccess MatrixBase::forceAlignedAccess() { + return ForceAlignedAccess(derived()); +} + +/** \returns an expression of *this with forced aligned access if \a Enable is true. + * \sa forceAlignedAccess(), class ForceAlignedAccess + */ +template +template +inline add_const_on_value_type_t, Derived&>> +MatrixBase::forceAlignedAccessIf() const { + return derived(); // FIXME This should not work but apparently is never used +} + +/** \returns an expression of *this with forced aligned access if \a Enable is true. + * \sa forceAlignedAccess(), class ForceAlignedAccess + */ +template +template +inline std::conditional_t, Derived&> MatrixBase::forceAlignedAccessIf() { + return derived(); // FIXME This should not work but apparently is never used +} + +} // end namespace Eigen + +#endif // EIGEN_FORCEALIGNEDACCESS_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Fuzzy.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Fuzzy.h new file mode 100644 index 0000000000000000000000000000000000000000..ed6b4ffead7488a977452bb719284f3a9948dd40 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Fuzzy.h @@ -0,0 +1,132 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_FUZZY_H +#define EIGEN_FUZZY_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +template ::IsInteger> +struct isApprox_selector { + EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) { + typename internal::nested_eval::type nested(x); + typename internal::nested_eval::type otherNested(y); + return (nested.matrix() - otherNested.matrix()).cwiseAbs2().sum() <= + prec * prec * numext::mini(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum()); + } +}; + +template +struct isApprox_selector { + EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar&) { + return x.matrix() == y.matrix(); + } +}; + +template ::IsInteger> +struct isMuchSmallerThan_object_selector { + EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) { + return x.cwiseAbs2().sum() <= numext::abs2(prec) * y.cwiseAbs2().sum(); + } +}; + +template +struct isMuchSmallerThan_object_selector { + EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived&, const typename Derived::RealScalar&) { + return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix(); + } +}; + +template ::IsInteger> +struct isMuchSmallerThan_scalar_selector { + EIGEN_DEVICE_FUNC static bool run(const Derived& x, const typename Derived::RealScalar& y, + const typename Derived::RealScalar& prec) { + return x.cwiseAbs2().sum() <= numext::abs2(prec * y); + } +}; + +template +struct isMuchSmallerThan_scalar_selector { + EIGEN_DEVICE_FUNC static bool run(const Derived& x, const typename Derived::RealScalar&, + const typename Derived::RealScalar&) { + return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix(); + } +}; + +} // end namespace internal + +/** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$ + * are considered to be approximately equal within precision \f$ p \f$ if + * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f] + * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm + * L2 norm). + * + * \note Because of the multiplicativeness of this comparison, one can't use this function + * to check whether \c *this is approximately equal to the zero matrix or vector. + * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix + * or vector. If you want to test whether \c *this is zero, use internal::isMuchSmallerThan(const + * RealScalar&, RealScalar) instead. + * + * \sa internal::isMuchSmallerThan(const RealScalar&, RealScalar) const + */ +template +template +EIGEN_DEVICE_FUNC bool DenseBase::isApprox(const DenseBase& other, + const RealScalar& prec) const { + return internal::isApprox_selector::run(derived(), other.derived(), prec); +} + +/** \returns \c true if the norm of \c *this is much smaller than \a other, + * within the precision determined by \a prec. + * + * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is + * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if + * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f] + * + * For matrices, the comparison is done using the Hilbert-Schmidt norm. For this reason, + * the value of the reference scalar \a other should come from the Hilbert-Schmidt norm + * of a reference matrix of same dimensions. + * + * \sa isApprox(), isMuchSmallerThan(const DenseBase&, RealScalar) const + */ +template +EIGEN_DEVICE_FUNC bool DenseBase::isMuchSmallerThan(const typename NumTraits::Real& other, + const RealScalar& prec) const { + return internal::isMuchSmallerThan_scalar_selector::run(derived(), other, prec); +} + +/** \returns \c true if the norm of \c *this is much smaller than the norm of \a other, + * within the precision determined by \a prec. + * + * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is + * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if + * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f] + * For matrices, the comparison is done using the Hilbert-Schmidt norm. + * + * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const + */ +template +template +EIGEN_DEVICE_FUNC bool DenseBase::isMuchSmallerThan(const DenseBase& other, + const RealScalar& prec) const { + return internal::isMuchSmallerThan_object_selector::run(derived(), other.derived(), prec); +} + +} // end namespace Eigen + +#endif // EIGEN_FUZZY_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/GeneralProduct.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/GeneralProduct.h new file mode 100644 index 0000000000000000000000000000000000000000..e4c51d2a6f6c1870c1a78f7894051b400372062f --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/GeneralProduct.h @@ -0,0 +1,519 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008-2011 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GENERAL_PRODUCT_H +#define EIGEN_GENERAL_PRODUCT_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +enum { Large = 2, Small = 3 }; + +// Define the threshold value to fallback from the generic matrix-matrix product +// implementation (heavy) to the lightweight coeff-based product one. +// See generic_product_impl +// in products/GeneralMatrixMatrix.h for more details. +// TODO This threshold should also be used in the compile-time selector below. +#ifndef EIGEN_GEMM_TO_COEFFBASED_THRESHOLD +// This default value has been obtained on a Haswell architecture. +#define EIGEN_GEMM_TO_COEFFBASED_THRESHOLD 20 +#endif + +namespace internal { + +template +struct product_type_selector; + +template +struct product_size_category { + enum { +#ifndef EIGEN_GPU_COMPILE_PHASE + is_large = MaxSize == Dynamic || Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD || + (Size == Dynamic && MaxSize >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD), +#else + is_large = 0, +#endif + value = is_large ? Large + : Size == 1 ? 1 + : Small + }; +}; + +template +struct product_type { + typedef remove_all_t Lhs_; + typedef remove_all_t Rhs_; + enum { + MaxRows = traits::MaxRowsAtCompileTime, + Rows = traits::RowsAtCompileTime, + MaxCols = traits::MaxColsAtCompileTime, + Cols = traits::ColsAtCompileTime, + MaxDepth = min_size_prefer_fixed(traits::MaxColsAtCompileTime, traits::MaxRowsAtCompileTime), + Depth = min_size_prefer_fixed(traits::ColsAtCompileTime, traits::RowsAtCompileTime) + }; + + // the splitting into different lines of code here, introducing the _select enums and the typedef below, + // is to work around an internal compiler error with gcc 4.1 and 4.2. + private: + enum { + rows_select = product_size_category::value, + cols_select = product_size_category::value, + depth_select = product_size_category::value + }; + typedef product_type_selector selector; + + public: + enum { value = selector::ret, ret = selector::ret }; +#ifdef EIGEN_DEBUG_PRODUCT + static void debug() { + EIGEN_DEBUG_VAR(Rows); + EIGEN_DEBUG_VAR(Cols); + EIGEN_DEBUG_VAR(Depth); + EIGEN_DEBUG_VAR(rows_select); + EIGEN_DEBUG_VAR(cols_select); + EIGEN_DEBUG_VAR(depth_select); + EIGEN_DEBUG_VAR(value); + } +#endif +}; + +/* The following allows to select the kind of product at compile time + * based on the three dimensions of the product. + * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */ +// FIXME I'm not sure the current mapping is the ideal one. +template +struct product_type_selector { + enum { ret = OuterProduct }; +}; +template +struct product_type_selector { + enum { ret = LazyCoeffBasedProductMode }; +}; +template +struct product_type_selector<1, N, 1> { + enum { ret = LazyCoeffBasedProductMode }; +}; +template +struct product_type_selector<1, 1, Depth> { + enum { ret = InnerProduct }; +}; +template <> +struct product_type_selector<1, 1, 1> { + enum { ret = InnerProduct }; +}; +template <> +struct product_type_selector { + enum { ret = CoeffBasedProductMode }; +}; +template <> +struct product_type_selector<1, Small, Small> { + enum { ret = CoeffBasedProductMode }; +}; +template <> +struct product_type_selector { + enum { ret = CoeffBasedProductMode }; +}; +template <> +struct product_type_selector { + enum { ret = LazyCoeffBasedProductMode }; +}; +template <> +struct product_type_selector { + enum { ret = LazyCoeffBasedProductMode }; +}; +template <> +struct product_type_selector { + enum { ret = LazyCoeffBasedProductMode }; +}; +template <> +struct product_type_selector<1, Large, Small> { + enum { ret = CoeffBasedProductMode }; +}; +template <> +struct product_type_selector<1, Large, Large> { + enum { ret = GemvProduct }; +}; +template <> +struct product_type_selector<1, Small, Large> { + enum { ret = CoeffBasedProductMode }; +}; +template <> +struct product_type_selector { + enum { ret = CoeffBasedProductMode }; +}; +template <> +struct product_type_selector { + enum { ret = GemvProduct }; +}; +template <> +struct product_type_selector { + enum { ret = CoeffBasedProductMode }; +}; +template <> +struct product_type_selector { + enum { ret = GemmProduct }; +}; +template <> +struct product_type_selector { + enum { ret = GemmProduct }; +}; +template <> +struct product_type_selector { + enum { ret = GemmProduct }; +}; +template <> +struct product_type_selector { + enum { ret = GemmProduct }; +}; +template <> +struct product_type_selector { + enum { ret = CoeffBasedProductMode }; +}; +template <> +struct product_type_selector { + enum { ret = CoeffBasedProductMode }; +}; +template <> +struct product_type_selector { + enum { ret = GemmProduct }; +}; + +} // end namespace internal + +/*********************************************************************** + * Implementation of Inner Vector Vector Product + ***********************************************************************/ + +// FIXME : maybe the "inner product" could return a Scalar +// instead of a 1x1 matrix ?? +// Pro: more natural for the user +// Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix +// product ends up to a row-vector times col-vector product... To tackle this use +// case, we could have a specialization for Block with: operator=(Scalar x); + +/*********************************************************************** + * Implementation of Outer Vector Vector Product + ***********************************************************************/ + +/*********************************************************************** + * Implementation of General Matrix Vector Product + ***********************************************************************/ + +/* According to the shape/flags of the matrix we have to distinghish 3 different cases: + * 1 - the matrix is col-major, BLAS compatible and M is large => call fast BLAS-like colmajor routine + * 2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine + * 3 - all other cases are handled using a simple loop along the outer-storage direction. + * Therefore we need a lower level meta selector. + * Furthermore, if the matrix is the rhs, then the product has to be transposed. + */ +namespace internal { + +template +struct gemv_dense_selector; + +} // end namespace internal + +namespace internal { + +template +struct gemv_static_vector_if; + +template +struct gemv_static_vector_if { + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr Scalar* data() { + eigen_internal_assert(false && "should never be called"); + return 0; + } +}; + +template +struct gemv_static_vector_if { + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr Scalar* data() { return 0; } +}; + +template +struct gemv_static_vector_if { +#if EIGEN_MAX_STATIC_ALIGN_BYTES != 0 + internal::plain_array m_data; + EIGEN_STRONG_INLINE constexpr Scalar* data() { return m_data.array; } +#else + // Some architectures cannot align on the stack, + // => let's manually enforce alignment by allocating more data and return the address of the first aligned element. + internal::plain_array m_data; + EIGEN_STRONG_INLINE constexpr Scalar* data() { + return reinterpret_cast((std::uintptr_t(m_data.array) & ~(std::size_t(EIGEN_MAX_ALIGN_BYTES - 1))) + + EIGEN_MAX_ALIGN_BYTES); + } +#endif +}; + +// The vector is on the left => transposition +template +struct gemv_dense_selector { + template + static void run(const Lhs& lhs, const Rhs& rhs, Dest& dest, const typename Dest::Scalar& alpha) { + Transpose destT(dest); + enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor }; + gemv_dense_selector::run(rhs.transpose(), lhs.transpose(), destT, + alpha); + } +}; + +template <> +struct gemv_dense_selector { + template + static inline void run(const Lhs& lhs, const Rhs& rhs, Dest& dest, const typename Dest::Scalar& alpha) { + typedef typename Lhs::Scalar LhsScalar; + typedef typename Rhs::Scalar RhsScalar; + typedef typename Dest::Scalar ResScalar; + + typedef internal::blas_traits LhsBlasTraits; + typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; + typedef internal::blas_traits RhsBlasTraits; + typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; + + typedef Map, plain_enum_min(AlignedMax, internal::packet_traits::size)> + MappedDest; + + ActualLhsType actualLhs = LhsBlasTraits::extract(lhs); + ActualRhsType actualRhs = RhsBlasTraits::extract(rhs); + + ResScalar actualAlpha = combine_scalar_factors(alpha, lhs, rhs); + + // make sure Dest is a compile-time vector type (bug 1166) + typedef std::conditional_t ActualDest; + + enum { + // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 + // on, the other hand it is good for the cache to pack the vector anyways... + EvalToDestAtCompileTime = (ActualDest::InnerStrideAtCompileTime == 1), + ComplexByReal = (NumTraits::IsComplex) && (!NumTraits::IsComplex), + MightCannotUseDest = ((!EvalToDestAtCompileTime) || ComplexByReal) && (ActualDest::MaxSizeAtCompileTime != 0) + }; + + typedef const_blas_data_mapper LhsMapper; + typedef const_blas_data_mapper RhsMapper; + RhsScalar compatibleAlpha = get_factor::run(actualAlpha); + + if (!MightCannotUseDest) { + // shortcut if we are sure to be able to use dest directly, + // this ease the compiler to generate cleaner and more optimzized code for most common cases + general_matrix_vector_product::run(actualLhs.rows(), actualLhs.cols(), + LhsMapper(actualLhs.data(), + actualLhs.outerStride()), + RhsMapper(actualRhs.data(), + actualRhs.innerStride()), + dest.data(), 1, compatibleAlpha); + } else { + gemv_static_vector_if + static_dest; + + const bool alphaIsCompatible = (!ComplexByReal) || (numext::is_exactly_zero(numext::imag(actualAlpha))); + const bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible; + + ei_declare_aligned_stack_constructed_variable(ResScalar, actualDestPtr, dest.size(), + evalToDest ? dest.data() : static_dest.data()); + + if (!evalToDest) { +#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + constexpr int Size = Dest::SizeAtCompileTime; + Index size = dest.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN +#endif + if (!alphaIsCompatible) { + MappedDest(actualDestPtr, dest.size()).setZero(); + compatibleAlpha = RhsScalar(1); + } else + MappedDest(actualDestPtr, dest.size()) = dest; + } + + general_matrix_vector_product::run(actualLhs.rows(), actualLhs.cols(), + LhsMapper(actualLhs.data(), + actualLhs.outerStride()), + RhsMapper(actualRhs.data(), + actualRhs.innerStride()), + actualDestPtr, 1, compatibleAlpha); + + if (!evalToDest) { + if (!alphaIsCompatible) + dest.matrix() += actualAlpha * MappedDest(actualDestPtr, dest.size()); + else + dest = MappedDest(actualDestPtr, dest.size()); + } + } + } +}; + +template <> +struct gemv_dense_selector { + template + static void run(const Lhs& lhs, const Rhs& rhs, Dest& dest, const typename Dest::Scalar& alpha) { + typedef typename Lhs::Scalar LhsScalar; + typedef typename Rhs::Scalar RhsScalar; + typedef typename Dest::Scalar ResScalar; + + typedef internal::blas_traits LhsBlasTraits; + typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; + typedef internal::blas_traits RhsBlasTraits; + typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; + typedef internal::remove_all_t ActualRhsTypeCleaned; + + std::add_const_t actualLhs = LhsBlasTraits::extract(lhs); + std::add_const_t actualRhs = RhsBlasTraits::extract(rhs); + + ResScalar actualAlpha = combine_scalar_factors(alpha, lhs, rhs); + + enum { + // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 + // on, the other hand it is good for the cache to pack the vector anyways... + DirectlyUseRhs = + ActualRhsTypeCleaned::InnerStrideAtCompileTime == 1 || ActualRhsTypeCleaned::MaxSizeAtCompileTime == 0 + }; + + gemv_static_vector_if + static_rhs; + + ei_declare_aligned_stack_constructed_variable( + RhsScalar, actualRhsPtr, actualRhs.size(), + DirectlyUseRhs ? const_cast(actualRhs.data()) : static_rhs.data()); + + if (!DirectlyUseRhs) { +#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + constexpr int Size = ActualRhsTypeCleaned::SizeAtCompileTime; + Index size = actualRhs.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN +#endif + Map(actualRhsPtr, actualRhs.size()) = actualRhs; + } + + typedef const_blas_data_mapper LhsMapper; + typedef const_blas_data_mapper RhsMapper; + general_matrix_vector_product:: + run(actualLhs.rows(), actualLhs.cols(), LhsMapper(actualLhs.data(), actualLhs.outerStride()), + RhsMapper(actualRhsPtr, 1), dest.data(), + dest.col(0).innerStride(), // NOTE if dest is not a vector at compile-time, then dest.innerStride() might + // be wrong. (bug 1166) + actualAlpha); + } +}; + +template <> +struct gemv_dense_selector { + template + static void run(const Lhs& lhs, const Rhs& rhs, Dest& dest, const typename Dest::Scalar& alpha) { + EIGEN_STATIC_ASSERT((!nested_eval::Evaluate), + EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE); + // TODO if rhs is large enough it might be beneficial to make sure that dest is sequentially stored in memory, + // otherwise use a temp + typename nested_eval::type actual_rhs(rhs); + const Index size = rhs.rows(); + for (Index k = 0; k < size; ++k) dest += (alpha * actual_rhs.coeff(k)) * lhs.col(k); + } +}; + +template <> +struct gemv_dense_selector { + template + static void run(const Lhs& lhs, const Rhs& rhs, Dest& dest, const typename Dest::Scalar& alpha) { + EIGEN_STATIC_ASSERT((!nested_eval::Evaluate), + EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE); + typename nested_eval::type actual_rhs(rhs); + const Index rows = dest.rows(); + for (Index i = 0; i < rows; ++i) + dest.coeffRef(i) += alpha * (lhs.row(i).cwiseProduct(actual_rhs.transpose())).sum(); + } +}; + +} // end namespace internal + +/*************************************************************************** + * Implementation of matrix base methods + ***************************************************************************/ + +/** \returns the matrix product of \c *this and \a other. + * + * \note If instead of the matrix product you want the coefficient-wise product, see Cwise::operator*(). + * + * \sa lazyProduct(), operator*=(const MatrixBase&), Cwise::operator*() + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Product MatrixBase::operator*( + const MatrixBase& other) const { + // A note regarding the function declaration: In MSVC, this function will sometimes + // not be inlined since DenseStorage is an unwindable object for dynamic + // matrices and product types are holding a member to store the result. + // Thus it does not help tagging this function with EIGEN_STRONG_INLINE. + enum { + ProductIsValid = Derived::ColsAtCompileTime == Dynamic || OtherDerived::RowsAtCompileTime == Dynamic || + int(Derived::ColsAtCompileTime) == int(OtherDerived::RowsAtCompileTime), + AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, + SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived, OtherDerived) + }; + // note to the lost user: + // * for a dot product use: v1.dot(v2) + // * for a coeff-wise product use: v1.cwiseProduct(v2) + EIGEN_STATIC_ASSERT( + ProductIsValid || !(AreVectors && SameSizes), + INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) + EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), + INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) + EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) +#ifdef EIGEN_DEBUG_PRODUCT + internal::product_type::debug(); +#endif + + return Product(derived(), other.derived()); +} + +/** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation. + * + * The returned product will behave like any other expressions: the coefficients of the product will be + * computed once at a time as requested. This might be useful in some extremely rare cases when only + * a small and no coherent fraction of the result's coefficients have to be computed. + * + * \warning This version of the matrix product can be much much slower. So use it only if you know + * what you are doing and that you measured a true speed improvement. + * + * \sa operator*(const MatrixBase&) + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Product +MatrixBase::lazyProduct(const MatrixBase& other) const { + enum { + ProductIsValid = Derived::ColsAtCompileTime == Dynamic || OtherDerived::RowsAtCompileTime == Dynamic || + int(Derived::ColsAtCompileTime) == int(OtherDerived::RowsAtCompileTime), + AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, + SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived, OtherDerived) + }; + // note to the lost user: + // * for a dot product use: v1.dot(v2) + // * for a coeff-wise product use: v1.cwiseProduct(v2) + EIGEN_STATIC_ASSERT( + ProductIsValid || !(AreVectors && SameSizes), + INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) + EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), + INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) + EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) + + return Product(derived(), other.derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_PRODUCT_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/GenericPacketMath.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/GenericPacketMath.h new file mode 100644 index 0000000000000000000000000000000000000000..26a4634d39ab00209cdbced299331e5412a365ec --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/GenericPacketMath.h @@ -0,0 +1,1532 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GENERIC_PACKET_MATH_H +#define EIGEN_GENERIC_PACKET_MATH_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +/** \internal + * \file GenericPacketMath.h + * + * Default implementation for types not supported by the vectorization. + * In practice these functions are provided to make easier the writing + * of generic vectorized code. + */ + +#ifndef EIGEN_DEBUG_ALIGNED_LOAD +#define EIGEN_DEBUG_ALIGNED_LOAD +#endif + +#ifndef EIGEN_DEBUG_UNALIGNED_LOAD +#define EIGEN_DEBUG_UNALIGNED_LOAD +#endif + +#ifndef EIGEN_DEBUG_ALIGNED_STORE +#define EIGEN_DEBUG_ALIGNED_STORE +#endif + +#ifndef EIGEN_DEBUG_UNALIGNED_STORE +#define EIGEN_DEBUG_UNALIGNED_STORE +#endif + +struct default_packet_traits { + enum { + // Ops that are implemented for most types. + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 1, + HasAbs2 = 1, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 1, + HasSign = 1, + // By default, the nearest integer functions (rint, round, floor, ceil, trunc) are enabled for all scalar and packet + // types + HasRound = 1, + + HasArg = 0, + HasAbsDiff = 0, + HasBlend = 0, + // This flag is used to indicate whether packet comparison is supported. + // pcmp_eq, pcmp_lt and pcmp_le should be defined for it to be true. + HasCmp = 0, + + HasDiv = 0, + HasReciprocal = 0, + HasSqrt = 0, + HasRsqrt = 0, + HasExp = 0, + HasExpm1 = 0, + HasLog = 0, + HasLog1p = 0, + HasLog10 = 0, + HasPow = 0, + HasSin = 0, + HasCos = 0, + HasTan = 0, + HasASin = 0, + HasACos = 0, + HasATan = 0, + HasATanh = 0, + HasSinh = 0, + HasCosh = 0, + HasTanh = 0, + HasLGamma = 0, + HasDiGamma = 0, + HasZeta = 0, + HasPolygamma = 0, + HasErf = 0, + HasErfc = 0, + HasNdtri = 0, + HasBessel = 0, + HasIGamma = 0, + HasIGammaDerA = 0, + HasGammaSampleDerAlpha = 0, + HasIGammac = 0, + HasBetaInc = 0 + }; +}; + +template +struct packet_traits : default_packet_traits { + typedef T type; + typedef T half; + enum { + Vectorizable = 0, + size = 1, + AlignedOnScalar = 0, + }; + enum { + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasConj = 0, + HasSetLinear = 0 + }; +}; + +template +struct packet_traits : packet_traits {}; + +template +struct unpacket_traits { + typedef T type; + typedef T half; + typedef typename numext::get_integer_by_size::signed_type integer_packet; + enum { + size = 1, + alignment = alignof(T), + vectorizable = false, + masked_load_available = false, + masked_store_available = false + }; +}; + +template +struct unpacket_traits : unpacket_traits {}; + +/** \internal A convenience utility for determining if the type is a scalar. + * This is used to enable some generic packet implementations. + */ +template +struct is_scalar { + using Scalar = typename unpacket_traits::type; + enum { value = internal::is_same::value }; +}; + +// automatically and succinctly define combinations of pcast when +// 1) the packets are the same type, or +// 2) the packets differ only in sign. +// In both of these cases, preinterpret (bit_cast) is equivalent to pcast (static_cast) +template ::value && is_scalar::value> +struct is_degenerate_helper : is_same {}; +template <> +struct is_degenerate_helper : std::true_type {}; +template <> +struct is_degenerate_helper : std::true_type {}; +template <> +struct is_degenerate_helper : std::true_type {}; +template <> +struct is_degenerate_helper : std::true_type {}; + +template +struct is_degenerate_helper { + using SrcScalar = typename unpacket_traits::type; + static constexpr int SrcSize = unpacket_traits::size; + using TgtScalar = typename unpacket_traits::type; + static constexpr int TgtSize = unpacket_traits::size; + static constexpr bool value = is_degenerate_helper::value && (SrcSize == TgtSize); +}; + +// is_degenerate::value == is_degenerate::value +template +struct is_degenerate { + static constexpr bool value = + is_degenerate_helper::value || is_degenerate_helper::value; +}; + +template +struct is_half { + using Scalar = typename unpacket_traits::type; + static constexpr int Size = unpacket_traits::size; + using DefaultPacket = typename packet_traits::type; + static constexpr int DefaultSize = unpacket_traits::size; + static constexpr bool value = Size != 1 && Size < DefaultSize; +}; + +template +struct type_casting_traits { + enum { + VectorizedCast = + is_degenerate::value && packet_traits::Vectorizable && packet_traits::Vectorizable, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +// provides a succinct template to define vectorized casting traits with respect to the largest accessible packet types +template +struct vectorized_type_casting_traits { + enum : int { + DefaultSrcPacketSize = packet_traits::size, + DefaultTgtPacketSize = packet_traits::size, + VectorizedCast = 1, + SrcCoeffRatio = plain_enum_max(DefaultTgtPacketSize / DefaultSrcPacketSize, 1), + TgtCoeffRatio = plain_enum_max(DefaultSrcPacketSize / DefaultTgtPacketSize, 1) + }; +}; + +/** \internal Wrapper to ensure that multiple packet types can map to the same + same underlying vector type. */ +template +struct eigen_packet_wrapper { + EIGEN_ALWAYS_INLINE operator T&() { return m_val; } + EIGEN_ALWAYS_INLINE operator const T&() const { return m_val; } + EIGEN_ALWAYS_INLINE eigen_packet_wrapper() = default; + EIGEN_ALWAYS_INLINE eigen_packet_wrapper(const T& v) : m_val(v) {} + EIGEN_ALWAYS_INLINE eigen_packet_wrapper& operator=(const T& v) { + m_val = v; + return *this; + } + + T m_val; +}; + +template ::value> +struct preinterpret_generic; + +template +struct preinterpret_generic { + // the packets are not the same, attempt scalar bit_cast + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Target run(const Packet& a) { + return numext::bit_cast(a); + } +}; + +template +struct preinterpret_generic { + // the packets are the same type: do nothing + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run(const Packet& a) { return a; } +}; + +/** \internal \returns reinterpret_cast(a) */ +template +EIGEN_DEVICE_FUNC inline Target preinterpret(const Packet& a) { + return preinterpret_generic::run(a); +} + +template ::value, + bool TgtIsHalf = is_half::value> +struct pcast_generic; + +template +struct pcast_generic { + // the packets are not degenerate: attempt scalar static_cast + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket run(const SrcPacket& a) { + return cast_impl::run(a); + } +}; + +template +struct pcast_generic { + // the packets are the same: do nothing + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run(const Packet& a) { return a; } +}; + +template +struct pcast_generic { + // the packets are degenerate: preinterpret is equivalent to pcast + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket run(const SrcPacket& a) { return preinterpret(a); } +}; + +/** \internal \returns static_cast(a) (coeff-wise) */ +template +EIGEN_DEVICE_FUNC inline TgtPacket pcast(const SrcPacket& a) { + return pcast_generic::run(a); +} +template +EIGEN_DEVICE_FUNC inline TgtPacket pcast(const SrcPacket& a, const SrcPacket& b) { + return pcast_generic::run(a, b); +} +template +EIGEN_DEVICE_FUNC inline TgtPacket pcast(const SrcPacket& a, const SrcPacket& b, const SrcPacket& c, + const SrcPacket& d) { + return pcast_generic::run(a, b, c, d); +} +template +EIGEN_DEVICE_FUNC inline TgtPacket pcast(const SrcPacket& a, const SrcPacket& b, const SrcPacket& c, const SrcPacket& d, + const SrcPacket& e, const SrcPacket& f, const SrcPacket& g, + const SrcPacket& h) { + return pcast_generic::run(a, b, c, d, e, f, g, h); +} + +template +struct pcast_generic { + // TgtPacket is a half packet of some other type + // perform cast and truncate result + using DefaultTgtPacket = typename is_half::DefaultPacket; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket run(const SrcPacket& a) { + return preinterpret(pcast(a)); + } +}; + +/** \internal \returns a + b (coeff-wise) */ +template +EIGEN_DEVICE_FUNC inline Packet padd(const Packet& a, const Packet& b) { + return a + b; +} +// Avoid compiler warning for boolean algebra. +template <> +EIGEN_DEVICE_FUNC inline bool padd(const bool& a, const bool& b) { + return a || b; +} + +/** \internal \returns a packet version of \a *from, (un-aligned masked add) + * There is no generic implementation. We only have implementations for specialized + * cases. Generic case should not be called. + */ +template +EIGEN_DEVICE_FUNC inline std::enable_if_t::masked_fpops_available, Packet> padd( + const Packet& a, const Packet& b, typename unpacket_traits::mask_t umask); + +/** \internal \returns a - b (coeff-wise) */ +template +EIGEN_DEVICE_FUNC inline Packet psub(const Packet& a, const Packet& b) { + return a - b; +} + +/** \internal \returns -a (coeff-wise) */ +template +EIGEN_DEVICE_FUNC inline Packet pnegate(const Packet& a) { + EIGEN_STATIC_ASSERT((!is_same::type, bool>::value), + NEGATE IS NOT DEFINED FOR BOOLEAN TYPES) + return numext::negate(a); +} + +/** \internal \returns conj(a) (coeff-wise) */ +template +EIGEN_DEVICE_FUNC inline Packet pconj(const Packet& a) { + return numext::conj(a); +} + +/** \internal \returns a * b (coeff-wise) */ +template +EIGEN_DEVICE_FUNC inline Packet pmul(const Packet& a, const Packet& b) { + return a * b; +} +// Avoid compiler warning for boolean algebra. +template <> +EIGEN_DEVICE_FUNC inline bool pmul(const bool& a, const bool& b) { + return a && b; +} + +/** \internal \returns a / b (coeff-wise) */ +template +EIGEN_DEVICE_FUNC inline Packet pdiv(const Packet& a, const Packet& b) { + return a / b; +} + +// In the generic case, memset to all one bits. +template +struct ptrue_impl { + static EIGEN_DEVICE_FUNC inline Packet run(const Packet& /*a*/) { + Packet b; + memset(static_cast(&b), 0xff, sizeof(Packet)); + return b; + } +}; + +// For booleans, we can only directly set a valid `bool` value to avoid UB. +template <> +struct ptrue_impl { + static EIGEN_DEVICE_FUNC inline bool run(const bool& /*a*/) { return true; } +}; + +// For non-trivial scalars, set to Scalar(1) (i.e. a non-zero value). +// Although this is technically not a valid bitmask, the scalar path for pselect +// uses a comparison to zero, so this should still work in most cases. We don't +// have another option, since the scalar type requires initialization. +template +struct ptrue_impl::value && NumTraits::RequireInitialization>> { + static EIGEN_DEVICE_FUNC inline T run(const T& /*a*/) { return T(1); } +}; + +/** \internal \returns one bits. */ +template +EIGEN_DEVICE_FUNC inline Packet ptrue(const Packet& a) { + return ptrue_impl::run(a); +} + +// In the general case, memset to zero. +template +struct pzero_impl { + static EIGEN_DEVICE_FUNC inline Packet run(const Packet& /*a*/) { + Packet b; + memset(static_cast(&b), 0x00, sizeof(Packet)); + return b; + } +}; + +// For scalars, explicitly set to Scalar(0), since the underlying representation +// for zero may not consist of all-zero bits. +template +struct pzero_impl::value>> { + static EIGEN_DEVICE_FUNC inline T run(const T& /*a*/) { return T(0); } +}; + +/** \internal \returns packet of zeros */ +template +EIGEN_DEVICE_FUNC inline Packet pzero(const Packet& a) { + return pzero_impl::run(a); +} + +/** \internal \returns a <= b as a bit mask */ +template +EIGEN_DEVICE_FUNC inline Packet pcmp_le(const Packet& a, const Packet& b) { + return a <= b ? ptrue(a) : pzero(a); +} + +/** \internal \returns a < b as a bit mask */ +template +EIGEN_DEVICE_FUNC inline Packet pcmp_lt(const Packet& a, const Packet& b) { + return a < b ? ptrue(a) : pzero(a); +} + +/** \internal \returns a == b as a bit mask */ +template +EIGEN_DEVICE_FUNC inline Packet pcmp_eq(const Packet& a, const Packet& b) { + return a == b ? ptrue(a) : pzero(a); +} + +/** \internal \returns a < b or a==NaN or b==NaN as a bit mask */ +template +EIGEN_DEVICE_FUNC inline Packet pcmp_lt_or_nan(const Packet& a, const Packet& b) { + return a >= b ? pzero(a) : ptrue(a); +} + +template +struct bit_and { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a, const T& b) const { return a & b; } +}; + +template +struct bit_or { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a, const T& b) const { return a | b; } +}; + +template +struct bit_xor { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a, const T& b) const { return a ^ b; } +}; + +template +struct bit_not { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a) const { return ~a; } +}; + +template <> +struct bit_and { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE bool operator()(const bool& a, const bool& b) const { + return a && b; + } +}; + +template <> +struct bit_or { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE bool operator()(const bool& a, const bool& b) const { + return a || b; + } +}; + +template <> +struct bit_xor { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE bool operator()(const bool& a, const bool& b) const { + return a != b; + } +}; + +template <> +struct bit_not { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE bool operator()(const bool& a) const { return !a; } +}; + +// Use operators &, |, ^, ~. +template +struct operator_bitwise_helper { + EIGEN_DEVICE_FUNC static inline T bitwise_and(const T& a, const T& b) { return bit_and()(a, b); } + EIGEN_DEVICE_FUNC static inline T bitwise_or(const T& a, const T& b) { return bit_or()(a, b); } + EIGEN_DEVICE_FUNC static inline T bitwise_xor(const T& a, const T& b) { return bit_xor()(a, b); } + EIGEN_DEVICE_FUNC static inline T bitwise_not(const T& a) { return bit_not()(a); } +}; + +// Apply binary operations byte-by-byte +template +struct bytewise_bitwise_helper { + EIGEN_DEVICE_FUNC static inline T bitwise_and(const T& a, const T& b) { + return binary(a, b, bit_and()); + } + EIGEN_DEVICE_FUNC static inline T bitwise_or(const T& a, const T& b) { return binary(a, b, bit_or()); } + EIGEN_DEVICE_FUNC static inline T bitwise_xor(const T& a, const T& b) { + return binary(a, b, bit_xor()); + } + EIGEN_DEVICE_FUNC static inline T bitwise_not(const T& a) { return unary(a, bit_not()); } + + private: + template + EIGEN_DEVICE_FUNC static inline T unary(const T& a, Op op) { + const unsigned char* a_ptr = reinterpret_cast(&a); + T c; + unsigned char* c_ptr = reinterpret_cast(&c); + for (size_t i = 0; i < sizeof(T); ++i) { + *c_ptr++ = op(*a_ptr++); + } + return c; + } + + template + EIGEN_DEVICE_FUNC static inline T binary(const T& a, const T& b, Op op) { + const unsigned char* a_ptr = reinterpret_cast(&a); + const unsigned char* b_ptr = reinterpret_cast(&b); + T c; + unsigned char* c_ptr = reinterpret_cast(&c); + for (size_t i = 0; i < sizeof(T); ++i) { + *c_ptr++ = op(*a_ptr++, *b_ptr++); + } + return c; + } +}; + +// In the general case, use byte-by-byte manipulation. +template +struct bitwise_helper : public bytewise_bitwise_helper {}; + +// For integers or non-trivial scalars, use binary operators. +template +struct bitwise_helper::value && + (NumTraits::IsInteger || NumTraits::RequireInitialization)>> + : public operator_bitwise_helper {}; + +/** \internal \returns the bitwise and of \a a and \a b */ +template +EIGEN_DEVICE_FUNC inline Packet pand(const Packet& a, const Packet& b) { + return bitwise_helper::bitwise_and(a, b); +} + +/** \internal \returns the bitwise or of \a a and \a b */ +template +EIGEN_DEVICE_FUNC inline Packet por(const Packet& a, const Packet& b) { + return bitwise_helper::bitwise_or(a, b); +} + +/** \internal \returns the bitwise xor of \a a and \a b */ +template +EIGEN_DEVICE_FUNC inline Packet pxor(const Packet& a, const Packet& b) { + return bitwise_helper::bitwise_xor(a, b); +} + +/** \internal \returns the bitwise not of \a a */ +template +EIGEN_DEVICE_FUNC inline Packet pnot(const Packet& a) { + return bitwise_helper::bitwise_not(a); +} + +/** \internal \returns the bitwise and of \a a and not \a b */ +template +EIGEN_DEVICE_FUNC inline Packet pandnot(const Packet& a, const Packet& b) { + return pand(a, pnot(b)); +} + +// In the general case, use bitwise select. +template +struct pselect_impl { + static EIGEN_DEVICE_FUNC inline Packet run(const Packet& mask, const Packet& a, const Packet& b) { + return por(pand(a, mask), pandnot(b, mask)); + } +}; + +// For scalars, use ternary select. +template +struct pselect_impl::value>> { + static EIGEN_DEVICE_FUNC inline Packet run(const Packet& mask, const Packet& a, const Packet& b) { + return numext::equal_strict(mask, Packet(0)) ? b : a; + } +}; + +/** \internal \returns \a or \b for each field in packet according to \mask */ +template +EIGEN_DEVICE_FUNC inline Packet pselect(const Packet& mask, const Packet& a, const Packet& b) { + return pselect_impl::run(mask, a, b); +} + +template <> +EIGEN_DEVICE_FUNC inline bool pselect(const bool& cond, const bool& a, const bool& b) { + return cond ? a : b; +} + +/** \internal \returns the min or of \a a and \a b (coeff-wise) + If either \a a or \a b are NaN, the result is implementation defined. */ +template +struct pminmax_impl { + template + static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) { + return op(a, b); + } +}; + +/** \internal \returns the min or max of \a a and \a b (coeff-wise) + If either \a a or \a b are NaN, NaN is returned. */ +template <> +struct pminmax_impl { + template + static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) { + Packet not_nan_mask_a = pcmp_eq(a, a); + Packet not_nan_mask_b = pcmp_eq(b, b); + return pselect(not_nan_mask_a, pselect(not_nan_mask_b, op(a, b), b), a); + } +}; + +/** \internal \returns the min or max of \a a and \a b (coeff-wise) + If both \a a and \a b are NaN, NaN is returned. + Equivalent to std::fmin(a, b). */ +template <> +struct pminmax_impl { + template + static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) { + Packet not_nan_mask_a = pcmp_eq(a, a); + Packet not_nan_mask_b = pcmp_eq(b, b); + return pselect(not_nan_mask_a, pselect(not_nan_mask_b, op(a, b), a), b); + } +}; + +#define EIGEN_BINARY_OP_NAN_PROPAGATION(Type, Func) [](const Type& a, const Type& b) { return Func(a, b); } + +/** \internal \returns the min of \a a and \a b (coeff-wise). + If \a a or \b b is NaN, the return value is implementation defined. */ +template +EIGEN_DEVICE_FUNC inline Packet pmin(const Packet& a, const Packet& b) { + return numext::mini(a, b); +} + +/** \internal \returns the min of \a a and \a b (coeff-wise). + NaNPropagation determines the NaN propagation semantics. */ +template +EIGEN_DEVICE_FUNC inline Packet pmin(const Packet& a, const Packet& b) { + return pminmax_impl::run(a, b, EIGEN_BINARY_OP_NAN_PROPAGATION(Packet, (pmin))); +} + +/** \internal \returns the max of \a a and \a b (coeff-wise) + If \a a or \b b is NaN, the return value is implementation defined. */ +template +EIGEN_DEVICE_FUNC inline Packet pmax(const Packet& a, const Packet& b) { + return numext::maxi(a, b); +} + +/** \internal \returns the max of \a a and \a b (coeff-wise). + NaNPropagation determines the NaN propagation semantics. */ +template +EIGEN_DEVICE_FUNC inline Packet pmax(const Packet& a, const Packet& b) { + return pminmax_impl::run(a, b, EIGEN_BINARY_OP_NAN_PROPAGATION(Packet, (pmax))); +} + +/** \internal \returns the absolute value of \a a */ +template +EIGEN_DEVICE_FUNC inline Packet pabs(const Packet& a) { + return numext::abs(a); +} +template <> +EIGEN_DEVICE_FUNC inline unsigned int pabs(const unsigned int& a) { + return a; +} +template <> +EIGEN_DEVICE_FUNC inline unsigned long pabs(const unsigned long& a) { + return a; +} +template <> +EIGEN_DEVICE_FUNC inline unsigned long long pabs(const unsigned long long& a) { + return a; +} + +/** \internal \returns the addsub value of \a a,b */ +template +EIGEN_DEVICE_FUNC inline Packet paddsub(const Packet& a, const Packet& b) { + return pselect(peven_mask(a), padd(a, b), psub(a, b)); +} + +/** \internal \returns the phase angle of \a a */ +template +EIGEN_DEVICE_FUNC inline Packet parg(const Packet& a) { + using numext::arg; + return arg(a); +} + +/** \internal \returns \a a arithmetically shifted by N bits to the right */ +template +EIGEN_DEVICE_FUNC inline T parithmetic_shift_right(const T& a) { + return numext::arithmetic_shift_right(a, N); +} + +/** \internal \returns \a a logically shifted by N bits to the right */ +template +EIGEN_DEVICE_FUNC inline T plogical_shift_right(const T& a) { + return numext::logical_shift_right(a, N); +} + +/** \internal \returns \a a shifted by N bits to the left */ +template +EIGEN_DEVICE_FUNC inline T plogical_shift_left(const T& a) { + return numext::logical_shift_left(a, N); +} + +/** \internal \returns the significant and exponent of the underlying floating point numbers + * See https://en.cppreference.com/w/cpp/numeric/math/frexp + */ +template +EIGEN_DEVICE_FUNC inline Packet pfrexp(const Packet& a, Packet& exponent) { + int exp; + EIGEN_USING_STD(frexp); + Packet result = static_cast(frexp(a, &exp)); + exponent = static_cast(exp); + return result; +} + +/** \internal \returns a * 2^((int)exponent) + * See https://en.cppreference.com/w/cpp/numeric/math/ldexp + */ +template +EIGEN_DEVICE_FUNC inline Packet pldexp(const Packet& a, const Packet& exponent) { + EIGEN_USING_STD(ldexp) + return static_cast(ldexp(a, static_cast(exponent))); +} + +/** \internal \returns the min of \a a and \a b (coeff-wise) */ +template +EIGEN_DEVICE_FUNC inline Packet pabsdiff(const Packet& a, const Packet& b) { + return pselect(pcmp_lt(a, b), psub(b, a), psub(a, b)); +} + +/** \internal \returns a packet version of \a *from, from must be properly aligned */ +template +EIGEN_DEVICE_FUNC inline Packet pload(const typename unpacket_traits::type* from) { + return *from; +} + +/** \internal \returns n elements of a packet version of \a *from, from must be properly aligned + * offset indicates the starting element in which to load and + * offset + n <= unpacket_traits::size + * All elements before offset and after the last element loaded will initialized with zero */ +template +EIGEN_DEVICE_FUNC inline Packet pload_partial(const typename unpacket_traits::type* from, const Index n, + const Index offset = 0) { + const Index packet_size = unpacket_traits::size; + eigen_assert(n + offset <= packet_size && "number of elements plus offset will read past end of packet"); + typedef typename unpacket_traits::type Scalar; + EIGEN_ALIGN_MAX Scalar elements[packet_size] = {Scalar(0)}; + for (Index i = offset; i < numext::mini(n + offset, packet_size); i++) { + elements[i] = from[i - offset]; + } + return pload(elements); +} + +/** \internal \returns a packet version of \a *from, (un-aligned load) */ +template +EIGEN_DEVICE_FUNC inline Packet ploadu(const typename unpacket_traits::type* from) { + return *from; +} + +/** \internal \returns n elements of a packet version of \a *from, (un-aligned load) + * All elements after the last element loaded will initialized with zero */ +template +EIGEN_DEVICE_FUNC inline Packet ploadu_partial(const typename unpacket_traits::type* from, const Index n, + const Index offset = 0) { + const Index packet_size = unpacket_traits::size; + eigen_assert(n + offset <= packet_size && "number of elements plus offset will read past end of packet"); + typedef typename unpacket_traits::type Scalar; + EIGEN_ALIGN_MAX Scalar elements[packet_size] = {Scalar(0)}; + for (Index i = offset; i < numext::mini(n + offset, packet_size); i++) { + elements[i] = from[i - offset]; + } + return pload(elements); +} + +/** \internal \returns a packet version of \a *from, (un-aligned masked load) + * There is no generic implementation. We only have implementations for specialized + * cases. Generic case should not be called. + */ +template +EIGEN_DEVICE_FUNC inline std::enable_if_t::masked_load_available, Packet> ploadu( + const typename unpacket_traits::type* from, typename unpacket_traits::mask_t umask); + +/** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */ +template +EIGEN_DEVICE_FUNC inline Packet pset1(const typename unpacket_traits::type& a) { + return a; +} + +/** \internal \returns a packet with constant coefficients set from bits */ +template +EIGEN_DEVICE_FUNC inline Packet pset1frombits(BitsType a); + +/** \internal \returns a packet with constant coefficients \a a[0], e.g.: (a[0],a[0],a[0],a[0]) */ +template +EIGEN_DEVICE_FUNC inline Packet pload1(const typename unpacket_traits::type* a) { + return pset1(*a); +} + +/** \internal \returns a packet with elements of \a *from duplicated. + * For instance, for a packet of 8 elements, 4 scalars will be read from \a *from and + * duplicated to form: {from[0],from[0],from[1],from[1],from[2],from[2],from[3],from[3]} + * Currently, this function is only used for scalar * complex products. + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet ploaddup(const typename unpacket_traits::type* from) { + return *from; +} + +/** \internal \returns a packet with elements of \a *from quadrupled. + * For instance, for a packet of 8 elements, 2 scalars will be read from \a *from and + * replicated to form: {from[0],from[0],from[0],from[0],from[1],from[1],from[1],from[1]} + * Currently, this function is only used in matrix products. + * For packet-size smaller or equal to 4, this function is equivalent to pload1 + */ +template +EIGEN_DEVICE_FUNC inline Packet ploadquad(const typename unpacket_traits::type* from) { + return pload1(from); +} + +/** \internal equivalent to + * \code + * a0 = pload1(a+0); + * a1 = pload1(a+1); + * a2 = pload1(a+2); + * a3 = pload1(a+3); + * \endcode + * \sa pset1, pload1, ploaddup, pbroadcast2 + */ +template +EIGEN_DEVICE_FUNC inline void pbroadcast4(const typename unpacket_traits::type* a, Packet& a0, Packet& a1, + Packet& a2, Packet& a3) { + a0 = pload1(a + 0); + a1 = pload1(a + 1); + a2 = pload1(a + 2); + a3 = pload1(a + 3); +} + +/** \internal equivalent to + * \code + * a0 = pload1(a+0); + * a1 = pload1(a+1); + * \endcode + * \sa pset1, pload1, ploaddup, pbroadcast4 + */ +template +EIGEN_DEVICE_FUNC inline void pbroadcast2(const typename unpacket_traits::type* a, Packet& a0, Packet& a1) { + a0 = pload1(a + 0); + a1 = pload1(a + 1); +} + +/** \internal \brief Returns a packet with coefficients (a,a+1,...,a+packet_size-1). */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet plset(const typename unpacket_traits::type& a) { + return a; +} + +/** \internal \returns a packet with constant coefficients \a a, e.g.: (x, 0, x, 0), + where x is the value of all 1-bits. */ +template +EIGEN_DEVICE_FUNC inline Packet peven_mask(const Packet& /*a*/) { + typedef typename unpacket_traits::type Scalar; + const size_t n = unpacket_traits::size; + EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) Scalar elements[n]; + for (size_t i = 0; i < n; ++i) { + memset(elements + i, ((i & 1) == 0 ? 0xff : 0), sizeof(Scalar)); + } + return ploadu(elements); +} + +/** \internal copy the packet \a from to \a *to, \a to must be properly aligned */ +template +EIGEN_DEVICE_FUNC inline void pstore(Scalar* to, const Packet& from) { + (*to) = from; +} + +/** \internal copy n elements of the packet \a from to \a *to, \a to must be properly aligned + * offset indicates the starting element in which to store and + * offset + n <= unpacket_traits::size */ +template +EIGEN_DEVICE_FUNC inline void pstore_partial(Scalar* to, const Packet& from, const Index n, const Index offset = 0) { + const Index packet_size = unpacket_traits::size; + eigen_assert(n + offset <= packet_size && "number of elements plus offset will write past end of packet"); + EIGEN_ALIGN_MAX Scalar elements[packet_size]; + pstore(elements, from); + for (Index i = 0; i < numext::mini(n, packet_size - offset); i++) { + to[i] = elements[i + offset]; + } +} + +/** \internal copy the packet \a from to \a *to, (un-aligned store) */ +template +EIGEN_DEVICE_FUNC inline void pstoreu(Scalar* to, const Packet& from) { + (*to) = from; +} + +/** \internal copy n elements of the packet \a from to \a *to, (un-aligned store) */ +template +EIGEN_DEVICE_FUNC inline void pstoreu_partial(Scalar* to, const Packet& from, const Index n, const Index offset = 0) { + const Index packet_size = unpacket_traits::size; + eigen_assert(n + offset <= packet_size && "number of elements plus offset will write past end of packet"); + EIGEN_ALIGN_MAX Scalar elements[packet_size]; + pstore(elements, from); + for (Index i = 0; i < numext::mini(n, packet_size - offset); i++) { + to[i] = elements[i + offset]; + } +} + +/** \internal copy the packet \a from to \a *to, (un-aligned store with a mask) + * There is no generic implementation. We only have implementations for specialized + * cases. Generic case should not be called. + */ +template +EIGEN_DEVICE_FUNC inline std::enable_if_t::masked_store_available, void> pstoreu( + Scalar* to, const Packet& from, typename unpacket_traits::mask_t umask); + +template +EIGEN_DEVICE_FUNC inline Packet pgather(const Scalar* from, Index /*stride*/) { + return ploadu(from); +} + +template +EIGEN_DEVICE_FUNC inline Packet pgather_partial(const Scalar* from, Index stride, const Index n) { + const Index packet_size = unpacket_traits::size; + EIGEN_ALIGN_MAX Scalar elements[packet_size] = {Scalar(0)}; + for (Index i = 0; i < numext::mini(n, packet_size); i++) { + elements[i] = from[i * stride]; + } + return pload(elements); +} + +template +EIGEN_DEVICE_FUNC inline void pscatter(Scalar* to, const Packet& from, Index /*stride*/) { + pstore(to, from); +} + +template +EIGEN_DEVICE_FUNC inline void pscatter_partial(Scalar* to, const Packet& from, Index stride, const Index n) { + const Index packet_size = unpacket_traits::size; + EIGEN_ALIGN_MAX Scalar elements[packet_size]; + pstore(elements, from); + for (Index i = 0; i < numext::mini(n, packet_size); i++) { + to[i * stride] = elements[i]; + } +} + +/** \internal tries to do cache prefetching of \a addr */ +template +EIGEN_DEVICE_FUNC inline void prefetch(const Scalar* addr) { +#if defined(EIGEN_HIP_DEVICE_COMPILE) + // do nothing +#elif defined(EIGEN_CUDA_ARCH) +#if defined(__LP64__) || EIGEN_OS_WIN64 + // 64-bit pointer operand constraint for inlined asm + asm(" prefetch.L1 [ %1 ];" : "=l"(addr) : "l"(addr)); +#else + // 32-bit pointer operand constraint for inlined asm + asm(" prefetch.L1 [ %1 ];" : "=r"(addr) : "r"(addr)); +#endif +#elif (!EIGEN_COMP_MSVC) && (EIGEN_COMP_GNUC || EIGEN_COMP_CLANG || EIGEN_COMP_ICC) + __builtin_prefetch(addr); +#endif +} + +/** \internal \returns the reversed elements of \a a*/ +template +EIGEN_DEVICE_FUNC inline Packet preverse(const Packet& a) { + return a; +} + +/** \internal \returns \a a with real and imaginary part flipped (for complex type only) */ +template +EIGEN_DEVICE_FUNC inline Packet pcplxflip(const Packet& a) { + return Packet(numext::imag(a), numext::real(a)); +} + +/************************** + * Special math functions + ***************************/ + +/** \internal \returns isnan(a) */ +template +EIGEN_DEVICE_FUNC inline Packet pisnan(const Packet& a) { + return pandnot(ptrue(a), pcmp_eq(a, a)); +} + +/** \internal \returns isinf(a) */ +template +EIGEN_DEVICE_FUNC inline Packet pisinf(const Packet& a) { + using Scalar = typename unpacket_traits::type; + constexpr Scalar inf = NumTraits::infinity(); + return pcmp_eq(pabs(a), pset1(inf)); +} + +/** \internal \returns the sine of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psin(const Packet& a) { + EIGEN_USING_STD(sin); + return sin(a); +} + +/** \internal \returns the cosine of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pcos(const Packet& a) { + EIGEN_USING_STD(cos); + return cos(a); +} + +/** \internal \returns the tan of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet ptan(const Packet& a) { + EIGEN_USING_STD(tan); + return tan(a); +} + +/** \internal \returns the arc sine of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pasin(const Packet& a) { + EIGEN_USING_STD(asin); + return asin(a); +} + +/** \internal \returns the arc cosine of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pacos(const Packet& a) { + EIGEN_USING_STD(acos); + return acos(a); +} + +/** \internal \returns the hyperbolic sine of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psinh(const Packet& a) { + EIGEN_USING_STD(sinh); + return sinh(a); +} + +/** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pcosh(const Packet& a) { + EIGEN_USING_STD(cosh); + return cosh(a); +} + +/** \internal \returns the arc tangent of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet patan(const Packet& a) { + EIGEN_USING_STD(atan); + return atan(a); +} + +/** \internal \returns the hyperbolic tan of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet ptanh(const Packet& a) { + EIGEN_USING_STD(tanh); + return tanh(a); +} + +/** \internal \returns the arc tangent of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet patanh(const Packet& a) { + EIGEN_USING_STD(atanh); + return atanh(a); +} + +/** \internal \returns the exp of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pexp(const Packet& a) { + return numext::exp(a); +} + +/** \internal \returns the exp2 of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pexp2(const Packet& a) { + return numext::exp2(a); +} + +/** \internal \returns the expm1 of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pexpm1(const Packet& a) { + return numext::expm1(a); +} + +/** \internal \returns the log of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog(const Packet& a) { + EIGEN_USING_STD(log); + return log(a); +} + +/** \internal \returns the log1p of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog1p(const Packet& a) { + return numext::log1p(a); +} + +/** \internal \returns the log10 of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog10(const Packet& a) { + EIGEN_USING_STD(log10); + return log10(a); +} + +/** \internal \returns the log2 of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog2(const Packet& a) { + using Scalar = typename internal::unpacket_traits::type; + using RealScalar = typename NumTraits::Real; + return pmul(pset1(Scalar(RealScalar(EIGEN_LOG2E))), plog(a)); +} + +/** \internal \returns the square-root of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psqrt(const Packet& a) { + return numext::sqrt(a); +} + +/** \internal \returns the cube-root of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pcbrt(const Packet& a) { + return numext::cbrt(a); +} + +template ::value, + bool IsInteger = NumTraits::type>::IsInteger> +struct nearest_integer_packetop_impl { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run_floor(const Packet& x) { return numext::floor(x); } + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run_ceil(const Packet& x) { return numext::ceil(x); } + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run_rint(const Packet& x) { return numext::rint(x); } + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run_round(const Packet& x) { return numext::round(x); } + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run_trunc(const Packet& x) { return numext::trunc(x); } +}; + +/** \internal \returns the rounded value of \a a (coeff-wise) */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pround(const Packet& a) { + return nearest_integer_packetop_impl::run_round(a); +} + +/** \internal \returns the floor of \a a (coeff-wise) */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pfloor(const Packet& a) { + return nearest_integer_packetop_impl::run_floor(a); +} + +/** \internal \returns the rounded value of \a a (coeff-wise) with current + * rounding mode */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet print(const Packet& a) { + return nearest_integer_packetop_impl::run_rint(a); +} + +/** \internal \returns the ceil of \a a (coeff-wise) */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pceil(const Packet& a) { + return nearest_integer_packetop_impl::run_ceil(a); +} + +/** \internal \returns the truncation of \a a (coeff-wise) */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet ptrunc(const Packet& a) { + return nearest_integer_packetop_impl::run_trunc(a); +} + +template +struct psign_impl { + static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a) { return numext::sign(a); } +}; + +/** \internal \returns the sign of \a a (coeff-wise) */ +template +EIGEN_DEVICE_FUNC inline Packet psign(const Packet& a) { + return psign_impl::run(a); +} + +template <> +EIGEN_DEVICE_FUNC inline bool psign(const bool& a) { + return a; +} + +/** \internal \returns the first element of a packet */ +template +EIGEN_DEVICE_FUNC inline typename unpacket_traits::type pfirst(const Packet& a) { + return a; +} + +/** \internal \returns the sum of the elements of upper and lower half of \a a if \a a is larger than 4. + * For a packet {a0, a1, a2, a3, a4, a5, a6, a7}, it returns a half packet {a0+a4, a1+a5, a2+a6, a3+a7} + * For packet-size smaller or equal to 4, this boils down to a noop. + */ +template +EIGEN_DEVICE_FUNC inline std::conditional_t<(unpacket_traits::size % 8) == 0, + typename unpacket_traits::half, Packet> +predux_half_dowto4(const Packet& a) { + return a; +} + +// Slow generic implementation of Packet reduction. +template +EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_helper(const Packet& a, Op op) { + typedef typename unpacket_traits::type Scalar; + const size_t n = unpacket_traits::size; + EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) Scalar elements[n]; + pstoreu(elements, a); + for (size_t k = n / 2; k > 0; k /= 2) { + for (size_t i = 0; i < k; ++i) { + elements[i] = op(elements[i], elements[i + k]); + } + } + return elements[0]; +} + +/** \internal \returns the sum of the elements of \a a*/ +template +EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux(const Packet& a) { + return a; +} + +/** \internal \returns the product of the elements of \a a */ +template +EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_mul(const Packet& a) { + typedef typename unpacket_traits::type Scalar; + return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmul))); +} + +/** \internal \returns the min of the elements of \a a */ +template +EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_min(const Packet& a) { + typedef typename unpacket_traits::type Scalar; + return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmin))); +} + +template +EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_min(const Packet& a) { + typedef typename unpacket_traits::type Scalar; + return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmin))); +} + +/** \internal \returns the min of the elements of \a a */ +template +EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_max(const Packet& a) { + typedef typename unpacket_traits::type Scalar; + return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmax))); +} + +template +EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_max(const Packet& a) { + typedef typename unpacket_traits::type Scalar; + return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmax))); +} + +#undef EIGEN_BINARY_OP_NAN_PROPAGATION + +/** \internal \returns true if all coeffs of \a a means "true" + * It is supposed to be called on values returned by pcmp_*. + */ +// not needed yet +// template EIGEN_DEVICE_FUNC inline bool predux_all(const Packet& a) +// { return bool(a); } + +/** \internal \returns true if any coeffs of \a a means "true" + * It is supposed to be called on values returned by pcmp_*. + */ +template +EIGEN_DEVICE_FUNC inline bool predux_any(const Packet& a) { + // Dirty but generic implementation where "true" is assumed to be non 0 and all the sames. + // It is expected that "true" is either: + // - Scalar(1) + // - bits full of ones (NaN for floats), + // - or first bit equals to 1 (1 for ints, smallest denormal for floats). + // For all these cases, taking the sum is just fine, and this boils down to a no-op for scalars. + typedef typename unpacket_traits::type Scalar; + return numext::not_equal_strict(predux(a), Scalar(0)); +} + +/*************************************************************************** + * The following functions might not have to be overwritten for vectorized types + ***************************************************************************/ + +// FMA instructions. +/** \internal \returns a * b + c (coeff-wise) */ +template +EIGEN_DEVICE_FUNC inline Packet pmadd(const Packet& a, const Packet& b, const Packet& c) { + return padd(pmul(a, b), c); +} + +/** \internal \returns a * b - c (coeff-wise) */ +template +EIGEN_DEVICE_FUNC inline Packet pmsub(const Packet& a, const Packet& b, const Packet& c) { + return psub(pmul(a, b), c); +} + +/** \internal \returns -(a * b) + c (coeff-wise) */ +template +EIGEN_DEVICE_FUNC inline Packet pnmadd(const Packet& a, const Packet& b, const Packet& c) { + return psub(c, pmul(a, b)); +} + +/** \internal \returns -((a * b + c) (coeff-wise) */ +template +EIGEN_DEVICE_FUNC inline Packet pnmsub(const Packet& a, const Packet& b, const Packet& c) { + return pnegate(pmadd(a, b, c)); +} + +/** \internal copy a packet with constant coefficient \a a (e.g., [a,a,a,a]) to \a *to. \a to must be 16 bytes aligned + */ +// NOTE: this function must really be templated on the packet type (think about different packet types for the same +// scalar type) +template +inline void pstore1(typename unpacket_traits::type* to, const typename unpacket_traits::type& a) { + pstore(to, pset1(a)); +} + +/** \internal \returns a packet version of \a *from. + * The pointer \a from must be aligned on a \a Alignment bytes boundary. */ +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt(const typename unpacket_traits::type* from) { + if (Alignment >= unpacket_traits::alignment) + return pload(from); + else + return ploadu(from); +} + +/** \internal \returns n elements of a packet version of \a *from. + * The pointer \a from must be aligned on a \a Alignment bytes boundary. */ +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt_partial(const typename unpacket_traits::type* from, + const Index n, const Index offset = 0) { + if (Alignment >= unpacket_traits::alignment) + return pload_partial(from, n, offset); + else + return ploadu_partial(from, n, offset); +} + +/** \internal copy the packet \a from to \a *to. + * The pointer \a from must be aligned on a \a Alignment bytes boundary. */ +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret(Scalar* to, const Packet& from) { + if (Alignment >= unpacket_traits::alignment) + pstore(to, from); + else + pstoreu(to, from); +} + +/** \internal copy n elements of the packet \a from to \a *to. + * The pointer \a from must be aligned on a \a Alignment bytes boundary. */ +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret_partial(Scalar* to, const Packet& from, const Index n, + const Index offset = 0) { + if (Alignment >= unpacket_traits::alignment) + pstore_partial(to, from, n, offset); + else + pstoreu_partial(to, from, n, offset); +} + +/** \internal \returns a packet version of \a *from. + * Unlike ploadt, ploadt_ro takes advantage of the read-only memory path on the + * hardware if available to speedup the loading of data that won't be modified + * by the current computation. + */ +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt_ro(const typename unpacket_traits::type* from) { + return ploadt(from); +} + +/*************************************************************************** + * Fast complex products (GCC generates a function call which is very slow) + ***************************************************************************/ + +// Eigen+CUDA does not support complexes. +#if !defined(EIGEN_GPUCC) + +template <> +inline std::complex pmul(const std::complex& a, const std::complex& b) { + return std::complex(a.real() * b.real() - a.imag() * b.imag(), a.imag() * b.real() + a.real() * b.imag()); +} + +template <> +inline std::complex pmul(const std::complex& a, const std::complex& b) { + return std::complex(a.real() * b.real() - a.imag() * b.imag(), a.imag() * b.real() + a.real() * b.imag()); +} + +#endif + +/*************************************************************************** + * PacketBlock, that is a collection of N packets where the number of words + * in the packet is a multiple of N. + ***************************************************************************/ +template ::size> +struct PacketBlock { + Packet packet[N]; +}; + +template +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& /*kernel*/) { + // Nothing to do in the scalar case, i.e. a 1x1 matrix. +} + +/*************************************************************************** + * Selector, i.e. vector of N boolean values used to select (i.e. blend) + * words from 2 packets. + ***************************************************************************/ +template +struct Selector { + bool select[N]; +}; + +template +EIGEN_DEVICE_FUNC inline Packet pblend(const Selector::size>& ifPacket, + const Packet& thenPacket, const Packet& elsePacket) { + return ifPacket.select[0] ? thenPacket : elsePacket; +} + +/** \internal \returns 1 / a (coeff-wise) */ +template +EIGEN_DEVICE_FUNC inline Packet preciprocal(const Packet& a) { + using Scalar = typename unpacket_traits::type; + return pdiv(pset1(Scalar(1)), a); +} + +/** \internal \returns the reciprocal square-root of \a a (coeff-wise) */ +template +EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet prsqrt(const Packet& a) { + return preciprocal(psqrt(a)); +} + +template ::value, + bool IsInteger = NumTraits::type>::IsInteger> +struct psignbit_impl; +template +struct psignbit_impl { + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static constexpr Packet run(const Packet& a) { return numext::signbit(a); } +}; +template +struct psignbit_impl { + // generic implementation if not specialized in PacketMath.h + // slower than arithmetic shift + typedef typename unpacket_traits::type Scalar; + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static Packet run(const Packet& a) { + const Packet cst_pos_one = pset1(Scalar(1)); + const Packet cst_neg_one = pset1(Scalar(-1)); + return pcmp_eq(por(pand(a, cst_neg_one), cst_pos_one), cst_neg_one); + } +}; +template +struct psignbit_impl { + // generic implementation for integer packets + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static constexpr Packet run(const Packet& a) { return pcmp_lt(a, pzero(a)); } +}; +/** \internal \returns the sign bit of \a a as a bitmask*/ +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE constexpr Packet psignbit(const Packet& a) { + return psignbit_impl::run(a); +} + +/** \internal \returns the 2-argument arc tangent of \a y and \a x (coeff-wise) */ +template ::value, int> = 0> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet patan2(const Packet& y, const Packet& x) { + return numext::atan2(y, x); +} + +/** \internal \returns the 2-argument arc tangent of \a y and \a x (coeff-wise) */ +template ::value, int> = 0> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet patan2(const Packet& y, const Packet& x) { + typedef typename internal::unpacket_traits::type Scalar; + + // See https://en.cppreference.com/w/cpp/numeric/math/atan2 + // for how corner cases are supposed to be handled according to the + // IEEE floating-point standard (IEC 60559). + const Packet kSignMask = pset1(-Scalar(0)); + const Packet kZero = pzero(x); + const Packet kOne = pset1(Scalar(1)); + const Packet kPi = pset1(Scalar(EIGEN_PI)); + + const Packet x_has_signbit = psignbit(x); + const Packet y_signmask = pand(y, kSignMask); + const Packet x_signmask = pand(x, kSignMask); + const Packet result_signmask = pxor(y_signmask, x_signmask); + const Packet shift = por(pand(x_has_signbit, kPi), y_signmask); + + const Packet x_and_y_are_same = pcmp_eq(pabs(x), pabs(y)); + const Packet x_and_y_are_zero = pcmp_eq(por(x, y), kZero); + + Packet arg = pdiv(y, x); + arg = pselect(x_and_y_are_same, por(kOne, result_signmask), arg); + arg = pselect(x_and_y_are_zero, result_signmask, arg); + + Packet result = patan(arg); + result = padd(result, shift); + return result; +} + +/** \internal \returns the argument of \a a as a complex number */ +template ::value, int> = 0> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet pcarg(const Packet& a) { + return Packet(numext::arg(a)); +} + +/** \internal \returns the argument of \a a as a complex number */ +template ::value, int> = 0> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet pcarg(const Packet& a) { + EIGEN_STATIC_ASSERT(NumTraits::type>::IsComplex, + THIS METHOD IS FOR COMPLEX TYPES ONLY) + using RealPacket = typename unpacket_traits::as_real; + // a // r i r i ... + RealPacket aflip = pcplxflip(a).v; // i r i r ... + RealPacket result = patan2(aflip, a.v); // atan2 crap atan2 crap ... + return (Packet)pand(result, peven_mask(result)); // atan2 0 atan2 0 ... +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_GENERIC_PACKET_MATH_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/GlobalFunctions.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/GlobalFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..df1098e27e6555a921b562ccfd31c721e140152c --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/GlobalFunctions.h @@ -0,0 +1,230 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010-2016 Gael Guennebaud +// Copyright (C) 2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GLOBAL_FUNCTIONS_H +#define EIGEN_GLOBAL_FUNCTIONS_H + +#ifdef EIGEN_PARSED_BY_DOXYGEN + +#define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME, FUNCTOR, DOC_OP, DOC_DETAILS) \ + /** \returns an expression of the coefficient-wise DOC_OP of \a x \ + \ \ + DOC_DETAILS \ + \ \ + \sa Math functions, class CwiseUnaryOp \ + */ \ + template \ + inline const Eigen::CwiseUnaryOp, const Derived> NAME( \ + const Eigen::ArrayBase& x); + +#else + +#define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME, FUNCTOR, DOC_OP, DOC_DETAILS) \ + template \ + inline const Eigen::CwiseUnaryOp, const Derived>(NAME)( \ + const Eigen::ArrayBase& x) { \ + return Eigen::CwiseUnaryOp, const Derived>(x.derived()); \ + } + +#endif // EIGEN_PARSED_BY_DOXYGEN + +#define EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(NAME, FUNCTOR) \ + \ + template \ + struct NAME##_retval > { \ + typedef const Eigen::CwiseUnaryOp, const Derived> type; \ + }; \ + template \ + struct NAME##_impl > { \ + static inline typename NAME##_retval >::type run(const Eigen::ArrayBase& x) { \ + return typename NAME##_retval >::type(x.derived()); \ + } \ + }; + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(real, scalar_real_op, real part,\sa ArrayBase::real) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(imag, scalar_imag_op, imaginary part,\sa ArrayBase::imag) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(conj, scalar_conjugate_op, complex conjugate,\sa ArrayBase::conjugate) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(inverse, scalar_inverse_op, inverse,\sa ArrayBase::inverse) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sin, scalar_sin_op, sine,\sa ArrayBase::sin) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cos, scalar_cos_op, cosine,\sa ArrayBase::cos) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tan, scalar_tan_op, tangent,\sa ArrayBase::tan) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(atan, scalar_atan_op, arc - tangent,\sa ArrayBase::atan) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asin, scalar_asin_op, arc - sine,\sa ArrayBase::asin) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acos, scalar_acos_op, arc - consine,\sa ArrayBase::acos) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sinh, scalar_sinh_op, hyperbolic sine,\sa ArrayBase::sinh) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cosh, scalar_cosh_op, hyperbolic cosine,\sa ArrayBase::cosh) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tanh, scalar_tanh_op, hyperbolic tangent,\sa ArrayBase::tanh) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asinh, scalar_asinh_op, inverse hyperbolic sine,\sa ArrayBase::asinh) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acosh, scalar_acosh_op, inverse hyperbolic cosine,\sa ArrayBase::acosh) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(atanh, scalar_atanh_op, inverse hyperbolic tangent,\sa ArrayBase::atanh) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(logistic, scalar_logistic_op, logistic function,\sa ArrayBase::logistic) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(lgamma, scalar_lgamma_op, + natural logarithm of the gamma function,\sa ArrayBase::lgamma) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(digamma, scalar_digamma_op, derivative of lgamma,\sa ArrayBase::digamma) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erf, scalar_erf_op, error function,\sa ArrayBase::erf) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erfc, scalar_erfc_op, complement error function,\sa ArrayBase::erfc) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(ndtri, scalar_ndtri_op, inverse normal distribution function,\sa ArrayBase::ndtri) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(exp, scalar_exp_op, exponential,\sa ArrayBase::exp) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(exp2, scalar_exp2_op, exponential,\sa ArrayBase::exp2) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(expm1, scalar_expm1_op, exponential of a value minus 1,\sa ArrayBase::expm1) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log, scalar_log_op, natural logarithm,\sa Eigen::log10 DOXCOMMA ArrayBase::log) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log1p, scalar_log1p_op, natural logarithm of 1 plus the value,\sa ArrayBase::log1p) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log10, scalar_log10_op, base 10 logarithm,\sa Eigen::log DOXCOMMA ArrayBase::log10) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log2, scalar_log2_op, base 2 logarithm,\sa Eigen::log DOXCOMMA ArrayBase::log2) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs, scalar_abs_op, absolute value,\sa ArrayBase::abs DOXCOMMA MatrixBase::cwiseAbs) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs2, scalar_abs2_op, + squared absolute value,\sa ArrayBase::abs2 DOXCOMMA MatrixBase::cwiseAbs2) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(arg, scalar_arg_op, complex argument,\sa ArrayBase::arg DOXCOMMA MatrixBase::cwiseArg) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(carg, scalar_carg_op, + complex argument, \sa ArrayBase::carg DOXCOMMA MatrixBase::cwiseCArg) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sqrt, scalar_sqrt_op, square root,\sa ArrayBase::sqrt DOXCOMMA MatrixBase::cwiseSqrt) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cbrt, scalar_cbrt_op, cube root,\sa ArrayBase::cbrt DOXCOMMA MatrixBase::cwiseCbrt) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(rsqrt, scalar_rsqrt_op, reciprocal square root,\sa ArrayBase::rsqrt) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(square, scalar_square_op, + square(power 2),\sa Eigen::abs2 DOXCOMMA Eigen::pow DOXCOMMA ArrayBase::square) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cube, scalar_cube_op, cube(power 3),\sa Eigen::pow DOXCOMMA ArrayBase::cube) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(rint, scalar_rint_op, + nearest integer,\sa Eigen::floor DOXCOMMA Eigen::ceil DOXCOMMA ArrayBase::round) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(round, scalar_round_op, + nearest integer,\sa Eigen::floor DOXCOMMA Eigen::ceil DOXCOMMA ArrayBase::round) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY( + floor, scalar_floor_op, nearest integer not greater than the given value,\sa Eigen::ceil DOXCOMMA ArrayBase::floor) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY( + ceil, scalar_ceil_op, nearest integer not less than the given value,\sa Eigen::floor DOXCOMMA ArrayBase::ceil) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(trunc, scalar_trunc_op, + nearest integer not greater in magnitude than the given value,\sa Eigen::trunc DOXCOMMA + ArrayBase::trunc) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY( + isnan, scalar_isnan_op, not -a - number test,\sa Eigen::isinf DOXCOMMA Eigen::isfinite DOXCOMMA ArrayBase::isnan) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY( + isinf, scalar_isinf_op, infinite value test,\sa Eigen::isnan DOXCOMMA Eigen::isfinite DOXCOMMA ArrayBase::isinf) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isfinite, scalar_isfinite_op, + finite value test,\sa Eigen::isinf DOXCOMMA Eigen::isnan DOXCOMMA ArrayBase::isfinite) +EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sign, scalar_sign_op, sign(or 0),\sa ArrayBase::sign) + +template +using GlobalUnaryPowReturnType = std::enable_if_t< + !internal::is_arithmetic::Real>::value && + internal::is_arithmetic::Real>::value, + CwiseUnaryOp, const Derived> >; + +/** \returns an expression of the coefficient-wise power of \a x to the given constant \a exponent. + * + * \tparam ScalarExponent is the scalar type of \a exponent. It must be compatible with the scalar type of the given + * expression (\c Derived::Scalar). + * + * \sa ArrayBase::pow() + * + * \relates ArrayBase + */ +#ifdef EIGEN_PARSED_BY_DOXYGEN +template +EIGEN_DEVICE_FUNC inline const GlobalUnaryPowReturnType pow(const Eigen::ArrayBase& x, + const ScalarExponent& exponent); +#else +template +EIGEN_DEVICE_FUNC inline const GlobalUnaryPowReturnType pow(const Eigen::ArrayBase& x, + const ScalarExponent& exponent) { + return GlobalUnaryPowReturnType( + x.derived(), internal::scalar_unary_pow_op(exponent)); +} +#endif + +/** \returns an expression of the coefficient-wise power of \a x to the given array of \a exponents. + * + * This function computes the coefficient-wise power. + * + * Example: \include Cwise_array_power_array.cpp + * Output: \verbinclude Cwise_array_power_array.out + * + * \sa ArrayBase::pow() + * + * \relates ArrayBase + */ +template +inline const Eigen::CwiseBinaryOp< + Eigen::internal::scalar_pow_op, const Derived, + const ExponentDerived> +pow(const Eigen::ArrayBase& x, const Eigen::ArrayBase& exponents) { + return Eigen::CwiseBinaryOp< + Eigen::internal::scalar_pow_op, const Derived, + const ExponentDerived>(x.derived(), exponents.derived()); +} + +/** \returns an expression of the coefficient-wise power of the scalar \a x to the given array of \a exponents. + * + * This function computes the coefficient-wise power between a scalar and an array of exponents. + * + * \tparam Scalar is the scalar type of \a x. It must be compatible with the scalar type of the given array expression + * (\c Derived::Scalar). + * + * Example: \include Cwise_scalar_power_array.cpp + * Output: \verbinclude Cwise_scalar_power_array.out + * + * \sa ArrayBase::pow() + * + * \relates ArrayBase + */ +#ifdef EIGEN_PARSED_BY_DOXYGEN +template +inline const CwiseBinaryOp, Constant, Derived> pow( + const Scalar& x, const Eigen::ArrayBase& x); +#else +template +EIGEN_DEVICE_FUNC inline const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE( + typename internal::promote_scalar_arg::type, + Derived, pow) pow(const Scalar& x, const Eigen::ArrayBase& exponents) { + typedef + typename internal::promote_scalar_arg::type + PromotedScalar; + return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(PromotedScalar, Derived, pow)( + typename internal::plain_constant_type::type( + exponents.derived().rows(), exponents.derived().cols(), internal::scalar_constant_op(x)), + exponents.derived()); +} +#endif + +/** \returns an expression of the coefficient-wise atan2(\a x, \a y). \a x and \a y must be of the same type. + * + * This function computes the coefficient-wise atan2(). + * + * \sa ArrayBase::atan2() + * + * \relates ArrayBase + */ +template +inline const std::enable_if_t< + std::is_same::value, + Eigen::CwiseBinaryOp, + const LhsDerived, const RhsDerived> > +atan2(const Eigen::ArrayBase& x, const Eigen::ArrayBase& exponents) { + return Eigen::CwiseBinaryOp< + Eigen::internal::scalar_atan2_op, const LhsDerived, + const RhsDerived>(x.derived(), exponents.derived()); +} + +namespace internal { +EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(real, scalar_real_op) +EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(imag, scalar_imag_op) +EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs2, scalar_abs2_op) +} // namespace internal +} // namespace Eigen + +// TODO: cleanly disable those functions that are not supported on Array (numext::real_ref, internal::random, +// internal::isApprox...) + +#endif // EIGEN_GLOBAL_FUNCTIONS_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/IO.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/IO.h new file mode 100644 index 0000000000000000000000000000000000000000..0a1b583d6ccda31bb6318883a0188030b24927fe --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/IO.h @@ -0,0 +1,233 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_IO_H +#define EIGEN_IO_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +enum { DontAlignCols = 1 }; +enum { StreamPrecision = -1, FullPrecision = -2 }; + +namespace internal { +template +std::ostream& print_matrix(std::ostream& s, const Derived& _m, const IOFormat& fmt); +} + +/** \class IOFormat + * \ingroup Core_Module + * + * \brief Stores a set of parameters controlling the way matrices are printed + * + * List of available parameters: + * - \b precision number of digits for floating point values, or one of the special constants \c StreamPrecision and \c + * FullPrecision. The default is the special value \c StreamPrecision which means to use the stream's own precision + * setting, as set for instance using \c cout.precision(3). The other special value \c FullPrecision means that the + * number of digits will be computed to match the full precision of each floating-point type. + * - \b flags an OR-ed combination of flags, the default value is 0, the only currently available flag is \c + * DontAlignCols which allows to disable the alignment of columns, resulting in faster code. + * - \b coeffSeparator string printed between two coefficients of the same row + * - \b rowSeparator string printed between two rows + * - \b rowPrefix string printed at the beginning of each row + * - \b rowSuffix string printed at the end of each row + * - \b matPrefix string printed at the beginning of the matrix + * - \b matSuffix string printed at the end of the matrix + * - \b fill character printed to fill the empty space in aligned columns + * + * Example: \include IOFormat.cpp + * Output: \verbinclude IOFormat.out + * + * \sa DenseBase::format(), class WithFormat + */ +struct IOFormat { + /** Default constructor, see class IOFormat for the meaning of the parameters */ + IOFormat(int _precision = StreamPrecision, int _flags = 0, const std::string& _coeffSeparator = " ", + const std::string& _rowSeparator = "\n", const std::string& _rowPrefix = "", + const std::string& _rowSuffix = "", const std::string& _matPrefix = "", const std::string& _matSuffix = "", + const char _fill = ' ') + : matPrefix(_matPrefix), + matSuffix(_matSuffix), + rowPrefix(_rowPrefix), + rowSuffix(_rowSuffix), + rowSeparator(_rowSeparator), + rowSpacer(""), + coeffSeparator(_coeffSeparator), + fill(_fill), + precision(_precision), + flags(_flags) { + // TODO check if rowPrefix, rowSuffix or rowSeparator contains a newline + // don't add rowSpacer if columns are not to be aligned + if ((flags & DontAlignCols)) return; + int i = int(matPrefix.length()) - 1; + while (i >= 0 && matPrefix[i] != '\n') { + rowSpacer += ' '; + i--; + } + } + std::string matPrefix, matSuffix; + std::string rowPrefix, rowSuffix, rowSeparator, rowSpacer; + std::string coeffSeparator; + char fill; + int precision; + int flags; +}; + +/** \class WithFormat + * \ingroup Core_Module + * + * \brief Pseudo expression providing matrix output with given format + * + * \tparam ExpressionType the type of the object on which IO stream operations are performed + * + * This class represents an expression with stream operators controlled by a given IOFormat. + * It is the return type of DenseBase::format() + * and most of the time this is the only way it is used. + * + * See class IOFormat for some examples. + * + * \sa DenseBase::format(), class IOFormat + */ +template +class WithFormat { + public: + WithFormat(const ExpressionType& matrix, const IOFormat& format) : m_matrix(matrix), m_format(format) {} + + friend std::ostream& operator<<(std::ostream& s, const WithFormat& wf) { + return internal::print_matrix(s, wf.m_matrix.eval(), wf.m_format); + } + + protected: + typename ExpressionType::Nested m_matrix; + IOFormat m_format; +}; + +namespace internal { + +// NOTE: This helper is kept for backward compatibility with previous code specializing +// this internal::significant_decimals_impl structure. In the future we should directly +// call max_digits10(). +template +struct significant_decimals_impl { + static inline int run() { return NumTraits::max_digits10(); } +}; + +/** \internal + * print the matrix \a _m to the output stream \a s using the output format \a fmt */ +template +std::ostream& print_matrix(std::ostream& s, const Derived& _m, const IOFormat& fmt) { + using internal::is_same; + + if (_m.size() == 0) { + s << fmt.matPrefix << fmt.matSuffix; + return s; + } + + typename Derived::Nested m = _m; + typedef typename Derived::Scalar Scalar; + typedef std::conditional_t::value || is_same::value || + is_same::value || is_same::value, + int, + std::conditional_t >::value || + is_same >::value || + is_same >::value || + is_same >::value, + std::complex, const Scalar&> > + PrintType; + + Index width = 0; + + std::streamsize explicit_precision; + if (fmt.precision == StreamPrecision) { + explicit_precision = 0; + } else if (fmt.precision == FullPrecision) { + if (NumTraits::IsInteger) { + explicit_precision = 0; + } else { + explicit_precision = significant_decimals_impl::run(); + } + } else { + explicit_precision = fmt.precision; + } + + std::streamsize old_precision = 0; + if (explicit_precision) old_precision = s.precision(explicit_precision); + + bool align_cols = !(fmt.flags & DontAlignCols); + if (align_cols) { + // compute the largest width + for (Index j = 0; j < m.cols(); ++j) + for (Index i = 0; i < m.rows(); ++i) { + std::stringstream sstr; + sstr.copyfmt(s); + sstr << static_cast(m.coeff(i, j)); + width = std::max(width, Index(sstr.str().length())); + } + } + std::streamsize old_width = s.width(); + char old_fill_character = s.fill(); + s << fmt.matPrefix; + for (Index i = 0; i < m.rows(); ++i) { + if (i) s << fmt.rowSpacer; + s << fmt.rowPrefix; + if (width) { + s.fill(fmt.fill); + s.width(width); + } + s << static_cast(m.coeff(i, 0)); + for (Index j = 1; j < m.cols(); ++j) { + s << fmt.coeffSeparator; + if (width) { + s.fill(fmt.fill); + s.width(width); + } + s << static_cast(m.coeff(i, j)); + } + s << fmt.rowSuffix; + if (i < m.rows() - 1) s << fmt.rowSeparator; + } + s << fmt.matSuffix; + if (explicit_precision) s.precision(old_precision); + if (width) { + s.fill(old_fill_character); + s.width(old_width); + } + return s; +} + +} // end namespace internal + +/** \relates DenseBase + * + * Outputs the matrix, to the given stream. + * + * If you wish to print the matrix with a format different than the default, use DenseBase::format(). + * + * It is also possible to change the default format by defining EIGEN_DEFAULT_IO_FORMAT before including Eigen headers. + * If not defined, this will automatically be defined to Eigen::IOFormat(), that is the Eigen::IOFormat with default + * parameters. + * + * \sa DenseBase::format() + */ +template +std::ostream& operator<<(std::ostream& s, const DenseBase& m) { + return internal::print_matrix(s, m.eval(), EIGEN_DEFAULT_IO_FORMAT); +} + +template +std::ostream& operator<<(std::ostream& s, const DiagonalBase& m) { + return internal::print_matrix(s, m.derived(), EIGEN_DEFAULT_IO_FORMAT); +} + +} // end namespace Eigen + +#endif // EIGEN_IO_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/IndexedView.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/IndexedView.h new file mode 100644 index 0000000000000000000000000000000000000000..454e560e4b974418c60f2d7d5b5800985a49a89d --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/IndexedView.h @@ -0,0 +1,315 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_INDEXED_VIEW_H +#define EIGEN_INDEXED_VIEW_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +template +struct traits> : traits { + enum { + RowsAtCompileTime = int(IndexedViewHelper::SizeAtCompileTime), + ColsAtCompileTime = int(IndexedViewHelper::SizeAtCompileTime), + MaxRowsAtCompileTime = RowsAtCompileTime, + MaxColsAtCompileTime = ColsAtCompileTime, + + XprTypeIsRowMajor = (int(traits::Flags) & RowMajorBit) != 0, + IsRowMajor = (MaxRowsAtCompileTime == 1 && MaxColsAtCompileTime != 1) ? 1 + : (MaxColsAtCompileTime == 1 && MaxRowsAtCompileTime != 1) ? 0 + : XprTypeIsRowMajor, + + RowIncr = int(IndexedViewHelper::IncrAtCompileTime), + ColIncr = int(IndexedViewHelper::IncrAtCompileTime), + InnerIncr = IsRowMajor ? ColIncr : RowIncr, + OuterIncr = IsRowMajor ? RowIncr : ColIncr, + + HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor), + XprInnerStride = HasSameStorageOrderAsXprType ? int(inner_stride_at_compile_time::ret) + : int(outer_stride_at_compile_time::ret), + XprOuterstride = HasSameStorageOrderAsXprType ? int(outer_stride_at_compile_time::ret) + : int(inner_stride_at_compile_time::ret), + + InnerSize = XprTypeIsRowMajor ? ColsAtCompileTime : RowsAtCompileTime, + IsBlockAlike = InnerIncr == 1 && OuterIncr == 1, + IsInnerPannel = HasSameStorageOrderAsXprType && + is_same, std::conditional_t>::value, + + InnerStrideAtCompileTime = + InnerIncr < 0 || InnerIncr == DynamicIndex || XprInnerStride == Dynamic || InnerIncr == Undefined + ? Dynamic + : XprInnerStride * InnerIncr, + OuterStrideAtCompileTime = + OuterIncr < 0 || OuterIncr == DynamicIndex || XprOuterstride == Dynamic || OuterIncr == Undefined + ? Dynamic + : XprOuterstride * OuterIncr, + + ReturnAsScalar = is_single_range::value && is_single_range::value, + ReturnAsBlock = (!ReturnAsScalar) && IsBlockAlike, + ReturnAsIndexedView = (!ReturnAsScalar) && (!ReturnAsBlock), + + // FIXME we deal with compile-time strides if and only if we have DirectAccessBit flag, + // but this is too strict regarding negative strides... + DirectAccessMask = (int(InnerIncr) != Undefined && int(OuterIncr) != Undefined && InnerIncr >= 0 && OuterIncr >= 0) + ? DirectAccessBit + : 0, + FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, + FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, + FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0, + Flags = (traits::Flags & (HereditaryBits | DirectAccessMask)) | FlagsLvalueBit | FlagsRowMajorBit | + FlagsLinearAccessBit + }; + + typedef Block BlockType; +}; + +template +class IndexedViewImpl; + +} // namespace internal + +/** \class IndexedView + * \ingroup Core_Module + * + * \brief Expression of a non-sequential sub-matrix defined by arbitrary sequences of row and column indices + * + * \tparam XprType the type of the expression in which we are taking the intersections of sub-rows and sub-columns + * \tparam RowIndices the type of the object defining the sequence of row indices + * \tparam ColIndices the type of the object defining the sequence of column indices + * + * This class represents an expression of a sub-matrix (or sub-vector) defined as the intersection + * of sub-sets of rows and columns, that are themself defined by generic sequences of row indices \f$ + * \{r_0,r_1,..r_{m-1}\} \f$ and column indices \f$ \{c_0,c_1,..c_{n-1} \}\f$. Let \f$ A \f$ be the nested matrix, then + * the resulting matrix \f$ B \f$ has \c m rows and \c n columns, and its entries are given by: \f$ B(i,j) = A(r_i,c_j) + * \f$. + * + * The \c RowIndices and \c ColIndices types must be compatible with the following API: + * \code + * operator[](Index) const; + * Index size() const; + * \endcode + * + * Typical supported types thus include: + * - std::vector + * - std::valarray + * - std::array + * - Eigen::ArrayXi + * - decltype(ArrayXi::LinSpaced(...)) + * - Any view/expressions of the previous types + * - Eigen::ArithmeticSequence + * - Eigen::internal::AllRange (helper for Eigen::placeholders::all) + * - Eigen::internal::SingleRange (helper for single index) + * - etc. + * + * In typical usages of %Eigen, this class should never be used directly. It is the return type of + * DenseBase::operator()(const RowIndices&, const ColIndices&). + * + * \sa class Block + */ +template +class IndexedView + : public internal::IndexedViewImpl::StorageKind, + (internal::traits>::Flags & + DirectAccessBit) != 0> { + public: + typedef typename internal::IndexedViewImpl< + XprType, RowIndices, ColIndices, typename internal::traits::StorageKind, + (internal::traits>::Flags & DirectAccessBit) != 0> + Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(IndexedView) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(IndexedView) + + template + IndexedView(XprType& xpr, const T0& rowIndices, const T1& colIndices) : Base(xpr, rowIndices, colIndices) {} +}; + +namespace internal { + +// Generic API dispatcher +template +class IndexedViewImpl : public internal::generic_xpr_base>::type { + public: + typedef typename internal::generic_xpr_base>::type Base; + typedef typename internal::ref_selector::non_const_type MatrixTypeNested; + typedef internal::remove_all_t NestedExpression; + typedef typename XprType::Scalar Scalar; + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(IndexedViewImpl) + + template + IndexedViewImpl(XprType& xpr, const T0& rowIndices, const T1& colIndices) + : m_xpr(xpr), m_rowIndices(rowIndices), m_colIndices(colIndices) {} + + /** \returns number of rows */ + Index rows() const { return IndexedViewHelper::size(m_rowIndices); } + + /** \returns number of columns */ + Index cols() const { return IndexedViewHelper::size(m_colIndices); } + + /** \returns the nested expression */ + const internal::remove_all_t& nestedExpression() const { return m_xpr; } + + /** \returns the nested expression */ + std::remove_reference_t& nestedExpression() { return m_xpr; } + + /** \returns a const reference to the object storing/generating the row indices */ + const RowIndices& rowIndices() const { return m_rowIndices; } + + /** \returns a const reference to the object storing/generating the column indices */ + const ColIndices& colIndices() const { return m_colIndices; } + + constexpr Scalar& coeffRef(Index rowId, Index colId) { + return nestedExpression().coeffRef(m_rowIndices[rowId], m_colIndices[colId]); + } + + constexpr const Scalar& coeffRef(Index rowId, Index colId) const { + return nestedExpression().coeffRef(m_rowIndices[rowId], m_colIndices[colId]); + } + + protected: + MatrixTypeNested m_xpr; + RowIndices m_rowIndices; + ColIndices m_colIndices; +}; + +template +class IndexedViewImpl + : public IndexedViewImpl { + public: + using Base = internal::IndexedViewImpl::StorageKind, false>; + using Derived = IndexedView; + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(IndexedViewImpl) + + template + IndexedViewImpl(XprType& xpr, const T0& rowIndices, const T1& colIndices) : Base(xpr, rowIndices, colIndices) {} + + Index rowIncrement() const { + if (traits::RowIncr != DynamicIndex && traits::RowIncr != Undefined) { + return traits::RowIncr; + } + return IndexedViewHelper::incr(this->rowIndices()); + } + Index colIncrement() const { + if (traits::ColIncr != DynamicIndex && traits::ColIncr != Undefined) { + return traits::ColIncr; + } + return IndexedViewHelper::incr(this->colIndices()); + } + + Index innerIncrement() const { return traits::IsRowMajor ? colIncrement() : rowIncrement(); } + + Index outerIncrement() const { return traits::IsRowMajor ? rowIncrement() : colIncrement(); } + + std::decay_t* data() { + Index row_offset = this->rowIndices()[0] * this->nestedExpression().rowStride(); + Index col_offset = this->colIndices()[0] * this->nestedExpression().colStride(); + return this->nestedExpression().data() + row_offset + col_offset; + } + + const std::decay_t* data() const { + Index row_offset = this->rowIndices()[0] * this->nestedExpression().rowStride(); + Index col_offset = this->colIndices()[0] * this->nestedExpression().colStride(); + return this->nestedExpression().data() + row_offset + col_offset; + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { + if (traits::InnerStrideAtCompileTime != Dynamic) { + return traits::InnerStrideAtCompileTime; + } + return innerIncrement() * this->nestedExpression().innerStride(); + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { + if (traits::OuterStrideAtCompileTime != Dynamic) { + return traits::OuterStrideAtCompileTime; + } + return outerIncrement() * this->nestedExpression().outerStride(); + } +}; + +template +struct unary_evaluator, IndexBased> + : evaluator_base> { + typedef IndexedView XprType; + + enum { + CoeffReadCost = evaluator::CoeffReadCost /* TODO + cost of row/col index */, + + FlagsLinearAccessBit = + (traits::RowsAtCompileTime == 1 || traits::ColsAtCompileTime == 1) ? LinearAccessBit : 0, + + FlagsRowMajorBit = traits::FlagsRowMajorBit, + + Flags = (evaluator::Flags & (HereditaryBits & ~RowMajorBit /*| LinearAccessBit | DirectAccessBit*/)) | + FlagsLinearAccessBit | FlagsRowMajorBit, + + Alignment = 0 + }; + + EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_xpr(xpr) { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { + eigen_assert(m_xpr.rowIndices()[row] >= 0 && m_xpr.rowIndices()[row] < m_xpr.nestedExpression().rows() && + m_xpr.colIndices()[col] >= 0 && m_xpr.colIndices()[col] < m_xpr.nestedExpression().cols()); + return m_argImpl.coeff(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { + eigen_assert(m_xpr.rowIndices()[row] >= 0 && m_xpr.rowIndices()[row] < m_xpr.nestedExpression().rows() && + m_xpr.colIndices()[col] >= 0 && m_xpr.colIndices()[col] < m_xpr.nestedExpression().cols()); + return m_argImpl.coeffRef(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { + EIGEN_STATIC_ASSERT_LVALUE(XprType) + Index row = XprType::RowsAtCompileTime == 1 ? 0 : index; + Index col = XprType::RowsAtCompileTime == 1 ? index : 0; + eigen_assert(m_xpr.rowIndices()[row] >= 0 && m_xpr.rowIndices()[row] < m_xpr.nestedExpression().rows() && + m_xpr.colIndices()[col] >= 0 && m_xpr.colIndices()[col] < m_xpr.nestedExpression().cols()); + return m_argImpl.coeffRef(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const { + Index row = XprType::RowsAtCompileTime == 1 ? 0 : index; + Index col = XprType::RowsAtCompileTime == 1 ? index : 0; + eigen_assert(m_xpr.rowIndices()[row] >= 0 && m_xpr.rowIndices()[row] < m_xpr.nestedExpression().rows() && + m_xpr.colIndices()[col] >= 0 && m_xpr.colIndices()[col] < m_xpr.nestedExpression().cols()); + return m_argImpl.coeffRef(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index index) const { + Index row = XprType::RowsAtCompileTime == 1 ? 0 : index; + Index col = XprType::RowsAtCompileTime == 1 ? index : 0; + eigen_assert(m_xpr.rowIndices()[row] >= 0 && m_xpr.rowIndices()[row] < m_xpr.nestedExpression().rows() && + m_xpr.colIndices()[col] >= 0 && m_xpr.colIndices()[col] < m_xpr.nestedExpression().cols()); + return m_argImpl.coeff(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]); + } + + protected: + evaluator m_argImpl; + const XprType& m_xpr; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_INDEXED_VIEW_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/InnerProduct.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/InnerProduct.h new file mode 100644 index 0000000000000000000000000000000000000000..1e16942c238f914409f84aa6e7de7f124f136115 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/InnerProduct.h @@ -0,0 +1,250 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2024 Charlie Schlosser +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_INNER_PRODUCT_EVAL_H +#define EIGEN_INNER_PRODUCT_EVAL_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +// recursively searches for the largest simd type that does not exceed Size, or the smallest if no such type exists +template ::type, + bool Stop = + (unpacket_traits::size <= Size) || is_same::half>::value> +struct find_inner_product_packet_helper; + +template +struct find_inner_product_packet_helper { + using type = typename find_inner_product_packet_helper::half>::type; +}; + +template +struct find_inner_product_packet_helper { + using type = Packet; +}; + +template +struct find_inner_product_packet : find_inner_product_packet_helper {}; + +template +struct find_inner_product_packet { + using type = typename packet_traits::type; +}; + +template +struct inner_product_assert { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Lhs) + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Rhs) + EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Lhs, Rhs) +#ifndef EIGEN_NO_DEBUG + static EIGEN_DEVICE_FUNC void run(const Lhs& lhs, const Rhs& rhs) { + eigen_assert((lhs.size() == rhs.size()) && "Inner product: lhs and rhs vectors must have same size"); + } +#else + static EIGEN_DEVICE_FUNC void run(const Lhs&, const Rhs&) {} +#endif +}; + +template +struct inner_product_evaluator { + static constexpr int LhsFlags = evaluator::Flags, RhsFlags = evaluator::Flags, + SizeAtCompileTime = min_size_prefer_fixed(Lhs::SizeAtCompileTime, Rhs::SizeAtCompileTime), + LhsAlignment = evaluator::Alignment, RhsAlignment = evaluator::Alignment; + + using Scalar = typename Func::result_type; + using Packet = typename find_inner_product_packet::type; + + static constexpr bool Vectorize = + bool(LhsFlags & RhsFlags & PacketAccessBit) && Func::PacketAccess && + ((SizeAtCompileTime == Dynamic) || (unpacket_traits::size <= SizeAtCompileTime)); + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit inner_product_evaluator(const Lhs& lhs, const Rhs& rhs, + Func func = Func()) + : m_func(func), m_lhs(lhs), m_rhs(rhs), m_size(lhs.size()) { + inner_product_assert::run(lhs, rhs); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_size.value(); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index index) const { + return m_func.coeff(m_lhs.coeff(index), m_rhs.coeff(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(const Scalar& value, Index index) const { + return m_func.coeff(value, m_lhs.coeff(index), m_rhs.coeff(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index index) const { + return m_func.packet(m_lhs.template packet(index), + m_rhs.template packet(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(const PacketType& value, Index index) const { + return m_func.packet(value, m_lhs.template packet(index), + m_rhs.template packet(index)); + } + + const Func m_func; + const evaluator m_lhs; + const evaluator m_rhs; + const variable_if_dynamic m_size; +}; + +template +struct inner_product_impl; + +// scalar loop +template +struct inner_product_impl { + using Scalar = typename Evaluator::Scalar; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval) { + const Index size = eval.size(); + if (size == 0) return Scalar(0); + + Scalar result = eval.coeff(0); + for (Index k = 1; k < size; k++) { + result = eval.coeff(result, k); + } + + return result; + } +}; + +// vector loop +template +struct inner_product_impl { + using UnsignedIndex = std::make_unsigned_t; + using Scalar = typename Evaluator::Scalar; + using Packet = typename Evaluator::Packet; + static constexpr int PacketSize = unpacket_traits::size; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval) { + const UnsignedIndex size = static_cast(eval.size()); + if (size < PacketSize) return inner_product_impl::run(eval); + + const UnsignedIndex packetEnd = numext::round_down(size, PacketSize); + const UnsignedIndex quadEnd = numext::round_down(size, 4 * PacketSize); + const UnsignedIndex numPackets = size / PacketSize; + const UnsignedIndex numRemPackets = (packetEnd - quadEnd) / PacketSize; + + Packet presult0, presult1, presult2, presult3; + + presult0 = eval.template packet(0 * PacketSize); + if (numPackets >= 2) presult1 = eval.template packet(1 * PacketSize); + if (numPackets >= 3) presult2 = eval.template packet(2 * PacketSize); + if (numPackets >= 4) { + presult3 = eval.template packet(3 * PacketSize); + + for (UnsignedIndex k = 4 * PacketSize; k < quadEnd; k += 4 * PacketSize) { + presult0 = eval.packet(presult0, k + 0 * PacketSize); + presult1 = eval.packet(presult1, k + 1 * PacketSize); + presult2 = eval.packet(presult2, k + 2 * PacketSize); + presult3 = eval.packet(presult3, k + 3 * PacketSize); + } + + if (numRemPackets >= 1) presult0 = eval.packet(presult0, quadEnd + 0 * PacketSize); + if (numRemPackets >= 2) presult1 = eval.packet(presult1, quadEnd + 1 * PacketSize); + if (numRemPackets == 3) presult2 = eval.packet(presult2, quadEnd + 2 * PacketSize); + + presult2 = padd(presult2, presult3); + } + + if (numPackets >= 3) presult1 = padd(presult1, presult2); + if (numPackets >= 2) presult0 = padd(presult0, presult1); + + Scalar result = predux(presult0); + for (UnsignedIndex k = packetEnd; k < size; k++) { + result = eval.coeff(result, k); + } + + return result; + } +}; + +template +struct conditional_conj; + +template +struct conditional_conj { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(const Scalar& a) { return numext::conj(a); } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packet(const Packet& a) { + return pconj(a); + } +}; + +template +struct conditional_conj { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(const Scalar& a) { return a; } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packet(const Packet& a) { + return a; + } +}; + +template +struct scalar_inner_product_op { + using result_type = typename ScalarBinaryOpTraits::ReturnType; + using conj_helper = conditional_conj; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type coeff(const LhsScalar& a, const RhsScalar& b) const { + return (conj_helper::coeff(a) * b); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type coeff(const result_type& accum, const LhsScalar& a, + const RhsScalar& b) const { + return (conj_helper::coeff(a) * b) + accum; + } + static constexpr bool PacketAccess = false; +}; + +template +struct scalar_inner_product_op { + using result_type = Scalar; + using conj_helper = conditional_conj; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(const Scalar& a, const Scalar& b) const { + return pmul(conj_helper::coeff(a), b); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(const Scalar& accum, const Scalar& a, const Scalar& b) const { + return pmadd(conj_helper::coeff(a), b, accum); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packet(const Packet& a, const Packet& b) const { + return pmul(conj_helper::packet(a), b); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packet(const Packet& accum, const Packet& a, const Packet& b) const { + return pmadd(conj_helper::packet(a), b, accum); + } + static constexpr bool PacketAccess = packet_traits::HasMul && packet_traits::HasAdd; +}; + +template +struct default_inner_product_impl { + using LhsScalar = typename traits::Scalar; + using RhsScalar = typename traits::Scalar; + using Op = scalar_inner_product_op; + using Evaluator = inner_product_evaluator; + using result_type = typename Evaluator::Scalar; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type run(const MatrixBase& a, const MatrixBase& b) { + Evaluator eval(a.derived(), b.derived(), Op()); + return inner_product_impl::run(eval); + } +}; + +template +struct dot_impl : default_inner_product_impl {}; + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_INNER_PRODUCT_EVAL_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/InternalHeaderCheck.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/InternalHeaderCheck.h new file mode 100644 index 0000000000000000000000000000000000000000..1cea572dcf9875c3abf175513397c2a0be053c90 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/InternalHeaderCheck.h @@ -0,0 +1,3 @@ +#ifndef EIGEN_CORE_MODULE_H +#error "Please include Eigen/Core instead of including headers inside the src directory directly." +#endif diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Inverse.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Inverse.h new file mode 100644 index 0000000000000000000000000000000000000000..013ad0a942664cdf4e61c969d28454f2e320b1a7 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Inverse.h @@ -0,0 +1,108 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014-2019 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_INVERSE_H +#define EIGEN_INVERSE_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +template +class InverseImpl; + +namespace internal { + +template +struct traits > : traits { + typedef typename XprType::PlainObject PlainObject; + typedef traits BaseTraits; + enum { Flags = BaseTraits::Flags & RowMajorBit }; +}; + +} // end namespace internal + +/** \class Inverse + * + * \brief Expression of the inverse of another expression + * + * \tparam XprType the type of the expression we are taking the inverse + * + * This class represents an abstract expression of A.inverse() + * and most of the time this is the only way it is used. + * + */ +template +class Inverse : public InverseImpl::StorageKind> { + public: + typedef typename XprType::StorageIndex StorageIndex; + typedef typename XprType::Scalar Scalar; + typedef typename internal::ref_selector::type XprTypeNested; + typedef internal::remove_all_t XprTypeNestedCleaned; + typedef typename internal::ref_selector::type Nested; + typedef internal::remove_all_t NestedExpression; + + explicit EIGEN_DEVICE_FUNC Inverse(const XprType& xpr) : m_xpr(xpr) {} + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_xpr.cols(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_xpr.rows(); } + + EIGEN_DEVICE_FUNC const XprTypeNestedCleaned& nestedExpression() const { return m_xpr; } + + protected: + XprTypeNested m_xpr; +}; + +// Generic API dispatcher +template +class InverseImpl : public internal::generic_xpr_base >::type { + public: + typedef typename internal::generic_xpr_base >::type Base; + typedef typename XprType::Scalar Scalar; + + private: + Scalar coeff(Index row, Index col) const; + Scalar coeff(Index i) const; +}; + +namespace internal { + +/** \internal + * \brief Default evaluator for Inverse expression. + * + * This default evaluator for Inverse expression simply evaluate the inverse into a temporary + * by a call to internal::call_assignment_no_alias. + * Therefore, inverse implementers only have to specialize Assignment, ...> for + * there own nested expression. + * + * \sa class Inverse + */ +template +struct unary_evaluator > : public evaluator::PlainObject> { + typedef Inverse InverseType; + typedef typename InverseType::PlainObject PlainObject; + typedef evaluator Base; + + enum { Flags = Base::Flags | EvalBeforeNestingBit }; + + EIGEN_DEVICE_FUNC unary_evaluator(const InverseType& inv_xpr) : m_result(inv_xpr.rows(), inv_xpr.cols()) { + internal::construct_at(this, m_result); + internal::call_assignment_no_alias(m_result, inv_xpr); + } + + protected: + PlainObject m_result; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_INVERSE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Map.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Map.h new file mode 100644 index 0000000000000000000000000000000000000000..df7b7ca77819879324b2688cfb18d8321b5679a0 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Map.h @@ -0,0 +1,153 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007-2010 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MAP_H +#define EIGEN_MAP_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { +template +struct traits > : public traits { + typedef traits TraitsBase; + enum { + PlainObjectTypeInnerSize = ((traits::Flags & RowMajorBit) == RowMajorBit) + ? PlainObjectType::ColsAtCompileTime + : PlainObjectType::RowsAtCompileTime, + + InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0 + ? int(PlainObjectType::InnerStrideAtCompileTime) + : int(StrideType::InnerStrideAtCompileTime), + OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0 + ? (InnerStrideAtCompileTime == Dynamic || PlainObjectTypeInnerSize == Dynamic + ? Dynamic + : int(InnerStrideAtCompileTime) * int(PlainObjectTypeInnerSize)) + : int(StrideType::OuterStrideAtCompileTime), + Alignment = int(MapOptions) & int(AlignedMask), + Flags0 = TraitsBase::Flags & (~NestByRefBit), + Flags = is_lvalue::value ? int(Flags0) : (int(Flags0) & ~LvalueBit) + }; + + private: + enum { Options }; // Expressions don't have Options +}; +} // namespace internal + +/** \class Map + * \ingroup Core_Module + * + * \brief A matrix or vector expression mapping an existing array of data. + * + * \tparam PlainObjectType the equivalent matrix type of the mapped data + * \tparam MapOptions specifies the pointer alignment in bytes. It can be: \c #Aligned128, \c #Aligned64, \c #Aligned32, + * \c #Aligned16, \c #Aligned8 or \c #Unaligned. The default is \c #Unaligned. \tparam StrideType optionally specifies + * strides. By default, Map assumes the memory layout of an ordinary, contiguous array. This can be overridden by + * specifying strides. The type passed here must be a specialization of the Stride template, see examples below. + * + * This class represents a matrix or vector expression mapping an existing array of data. + * It can be used to let Eigen interface without any overhead with non-Eigen data structures, + * such as plain C arrays or structures from other libraries. By default, it assumes that the + * data is laid out contiguously in memory. You can however override this by explicitly specifying + * inner and outer strides. + * + * Here's an example of simply mapping a contiguous array as a \ref TopicStorageOrders "column-major" matrix: + * \include Map_simple.cpp + * Output: \verbinclude Map_simple.out + * + * If you need to map non-contiguous arrays, you can do so by specifying strides: + * + * Here's an example of mapping an array as a vector, specifying an inner stride, that is, the pointer + * increment between two consecutive coefficients. Here, we're specifying the inner stride as a compile-time + * fixed value. + * \include Map_inner_stride.cpp + * Output: \verbinclude Map_inner_stride.out + * + * Here's an example of mapping an array while specifying an outer stride. Here, since we're mapping + * as a column-major matrix, 'outer stride' means the pointer increment between two consecutive columns. + * Here, we're specifying the outer stride as a runtime parameter. Note that here \c OuterStride<> is + * a short version of \c OuterStride because the default template parameter of OuterStride + * is \c Dynamic + * \include Map_outer_stride.cpp + * Output: \verbinclude Map_outer_stride.out + * + * For more details and for an example of specifying both an inner and an outer stride, see class Stride. + * + * \b Tip: to change the array of data mapped by a Map object, you can use the C++ + * placement new syntax: + * + * Example: \include Map_placement_new.cpp + * Output: \verbinclude Map_placement_new.out + * + * This class is the return type of PlainObjectBase::Map() but can also be used directly. + * + * \sa PlainObjectBase::Map(), \ref TopicStorageOrders + */ +template +class Map : public MapBase > { + public: + typedef MapBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Map) + + typedef typename Base::PointerType PointerType; + typedef PointerType PointerArgType; + EIGEN_DEVICE_FUNC inline PointerType cast_to_pointer_type(PointerArgType ptr) { return ptr; } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const { + return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1; + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const { + return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer() + : internal::traits::OuterStrideAtCompileTime != Dynamic + ? Index(internal::traits::OuterStrideAtCompileTime) + : IsVectorAtCompileTime ? (this->size() * innerStride()) + : int(Flags) & RowMajorBit ? (this->cols() * innerStride()) + : (this->rows() * innerStride()); + } + + /** Constructor in the fixed-size case. + * + * \param dataPtr pointer to the array to map + * \param stride optional Stride object, passing the strides. + */ + EIGEN_DEVICE_FUNC explicit inline Map(PointerArgType dataPtr, const StrideType& stride = StrideType()) + : Base(cast_to_pointer_type(dataPtr)), m_stride(stride) {} + + /** Constructor in the dynamic-size vector case. + * + * \param dataPtr pointer to the array to map + * \param size the size of the vector expression + * \param stride optional Stride object, passing the strides. + */ + EIGEN_DEVICE_FUNC inline Map(PointerArgType dataPtr, Index size, const StrideType& stride = StrideType()) + : Base(cast_to_pointer_type(dataPtr), size), m_stride(stride) {} + + /** Constructor in the dynamic-size matrix case. + * + * \param dataPtr pointer to the array to map + * \param rows the number of rows of the matrix expression + * \param cols the number of columns of the matrix expression + * \param stride optional Stride object, passing the strides. + */ + EIGEN_DEVICE_FUNC inline Map(PointerArgType dataPtr, Index rows, Index cols, const StrideType& stride = StrideType()) + : Base(cast_to_pointer_type(dataPtr), rows, cols), m_stride(stride) {} + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map) + + protected: + StrideType m_stride; +}; + +} // end namespace Eigen + +#endif // EIGEN_MAP_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/MapBase.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/MapBase.h new file mode 100644 index 0000000000000000000000000000000000000000..1e83fdf70fe3c0e67a392550760425a78c5fdec5 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/MapBase.h @@ -0,0 +1,283 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007-2010 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MAPBASE_H +#define EIGEN_MAPBASE_H + +#define EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) \ + EIGEN_STATIC_ASSERT((int(internal::evaluator::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \ + YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT) + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +/** \ingroup Core_Module + * + * \brief Base class for dense Map and Block expression with direct access + * + * This base class provides the const low-level accessors (e.g. coeff, coeffRef) of dense + * Map and Block objects with direct access. + * Typical users do not have to directly deal with this class. + * + * This class can be extended by through the macro plugin \c EIGEN_MAPBASE_PLUGIN. + * See \link TopicCustomizing_Plugins customizing Eigen \endlink for details. + * + * The \c Derived class has to provide the following two methods describing the memory layout: + * \code Index innerStride() const; \endcode + * \code Index outerStride() const; \endcode + * + * \sa class Map, class Block + */ +template +class MapBase : public internal::dense_xpr_base::type { + public: + typedef typename internal::dense_xpr_base::type Base; + enum { + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + InnerStrideAtCompileTime = internal::traits::InnerStrideAtCompileTime, + SizeAtCompileTime = Base::SizeAtCompileTime + }; + + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + typedef std::conditional_t::value), Scalar*, const Scalar*> PointerType; + + using Base::derived; + // using Base::RowsAtCompileTime; + // using Base::ColsAtCompileTime; + // using Base::SizeAtCompileTime; + using Base::Flags; + using Base::IsRowMajor; + using Base::IsVectorAtCompileTime; + using Base::MaxColsAtCompileTime; + using Base::MaxRowsAtCompileTime; + using Base::MaxSizeAtCompileTime; + + using Base::coeff; + using Base::coeffRef; + using Base::cols; + using Base::eval; + using Base::lazyAssign; + using Base::rows; + using Base::size; + + using Base::colStride; + using Base::innerStride; + using Base::outerStride; + using Base::rowStride; + + // bug 217 - compile error on ICC 11.1 + using Base::operator=; + + typedef typename Base::CoeffReturnType CoeffReturnType; + + /** \copydoc DenseBase::rows() */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_rows.value(); } + /** \copydoc DenseBase::cols() */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_cols.value(); } + + /** Returns a pointer to the first coefficient of the matrix or vector. + * + * \note When addressing this data, make sure to honor the strides returned by innerStride() and outerStride(). + * + * \sa innerStride(), outerStride() + */ + EIGEN_DEVICE_FUNC constexpr const Scalar* data() const { return m_data; } + + /** \copydoc PlainObjectBase::coeff(Index,Index) const */ + EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index rowId, Index colId) const { + return m_data[colId * colStride() + rowId * rowStride()]; + } + + /** \copydoc PlainObjectBase::coeff(Index) const */ + EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index index) const { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + return m_data[index * innerStride()]; + } + + /** \copydoc PlainObjectBase::coeffRef(Index,Index) const */ + EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { + return this->m_data[colId * colStride() + rowId * rowStride()]; + } + + /** \copydoc PlainObjectBase::coeffRef(Index) const */ + EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + return this->m_data[index * innerStride()]; + } + + /** \internal */ + template + inline PacketScalar packet(Index rowId, Index colId) const { + return internal::ploadt(m_data + (colId * colStride() + rowId * rowStride())); + } + + /** \internal */ + template + inline PacketScalar packet(Index index) const { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + return internal::ploadt(m_data + index * innerStride()); + } + + /** \internal Constructor for fixed size matrices or vectors */ + EIGEN_DEVICE_FUNC explicit inline MapBase(PointerType dataPtr) + : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime) { + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + checkSanity(); + } + + /** \internal Constructor for dynamically sized vectors */ + EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index vecSize) + : m_data(dataPtr), + m_rows(RowsAtCompileTime == Dynamic ? vecSize : Index(RowsAtCompileTime)), + m_cols(ColsAtCompileTime == Dynamic ? vecSize : Index(ColsAtCompileTime)) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + eigen_assert(vecSize >= 0); + eigen_assert(dataPtr == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == vecSize); + checkSanity(); + } + + /** \internal Constructor for dynamically sized matrices */ + EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index rows, Index cols) + : m_data(dataPtr), m_rows(rows), m_cols(cols) { + eigen_assert((dataPtr == 0) || (rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) && + cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols))); + checkSanity(); + } + +#ifdef EIGEN_MAPBASE_PLUGIN +#include EIGEN_MAPBASE_PLUGIN +#endif + + protected: + EIGEN_DEFAULT_COPY_CONSTRUCTOR(MapBase) + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MapBase) + + template + EIGEN_DEVICE_FUNC void checkSanity(std::enable_if_t<(internal::traits::Alignment > 0), void*> = 0) const { +// Temporary macro to allow scalars to not be properly aligned. This is while we sort out failures +// in TensorFlow Lite that are currently relying on this UB. +#ifndef EIGEN_ALLOW_UNALIGNED_SCALARS + // Pointer must be aligned to the Scalar type, otherwise we get UB. + eigen_assert((std::uintptr_t(m_data) % alignof(Scalar) == 0) && "data is not scalar-aligned"); +#endif +#if EIGEN_MAX_ALIGN_BYTES > 0 + // innerStride() is not set yet when this function is called, so we optimistically assume the lowest plausible + // value: + const Index minInnerStride = InnerStrideAtCompileTime == Dynamic ? 1 : Index(InnerStrideAtCompileTime); + EIGEN_ONLY_USED_FOR_DEBUG(minInnerStride); + eigen_assert((((std::uintptr_t(m_data) % internal::traits::Alignment) == 0) || + (cols() * rows() * minInnerStride * sizeof(Scalar)) < internal::traits::Alignment) && + "data is not aligned"); +#endif + } + + template + EIGEN_DEVICE_FUNC void checkSanity(std::enable_if_t::Alignment == 0, void*> = 0) const { +#ifndef EIGEN_ALLOW_UNALIGNED_SCALARS + // Pointer must be aligned to the Scalar type, otherwise we get UB. + eigen_assert((std::uintptr_t(m_data) % alignof(Scalar) == 0) && "data is not scalar-aligned"); +#endif + } + + PointerType m_data; + const internal::variable_if_dynamic m_rows; + const internal::variable_if_dynamic m_cols; +}; + +/** \ingroup Core_Module + * + * \brief Base class for non-const dense Map and Block expression with direct access + * + * This base class provides the non-const low-level accessors (e.g. coeff and coeffRef) of + * dense Map and Block objects with direct access. + * It inherits MapBase which defines the const variant for reading specific entries. + * + * \sa class Map, class Block + */ +template +class MapBase : public MapBase { + typedef MapBase ReadOnlyMapBase; + + public: + typedef MapBase Base; + + typedef typename Base::Scalar Scalar; + typedef typename Base::PacketScalar PacketScalar; + typedef typename Base::StorageIndex StorageIndex; + typedef typename Base::PointerType PointerType; + + using Base::coeff; + using Base::coeffRef; + using Base::cols; + using Base::derived; + using Base::rows; + using Base::size; + + using Base::colStride; + using Base::innerStride; + using Base::outerStride; + using Base::rowStride; + + typedef std::conditional_t::value, Scalar, const Scalar> ScalarWithConstIfNotLvalue; + + EIGEN_DEVICE_FUNC constexpr const Scalar* data() const { return this->m_data; } + EIGEN_DEVICE_FUNC constexpr ScalarWithConstIfNotLvalue* data() { + return this->m_data; + } // no const-cast here so non-const-correct code will give a compile error + + EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col) { + return this->m_data[col * colStride() + row * rowStride()]; + } + + EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue& coeffRef(Index index) { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + return this->m_data[index * innerStride()]; + } + + template + inline void writePacket(Index row, Index col, const PacketScalar& val) { + internal::pstoret(this->m_data + (col * colStride() + row * rowStride()), val); + } + + template + inline void writePacket(Index index, const PacketScalar& val) { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + internal::pstoret(this->m_data + index * innerStride(), val); + } + + EIGEN_DEVICE_FUNC explicit inline MapBase(PointerType dataPtr) : Base(dataPtr) {} + EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index vecSize) : Base(dataPtr, vecSize) {} + EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index rows, Index cols) : Base(dataPtr, rows, cols) {} + + EIGEN_DEVICE_FUNC Derived& operator=(const MapBase& other) { + ReadOnlyMapBase::Base::operator=(other); + return derived(); + } + + // In theory we could simply refer to Base:Base::operator=, but MSVC does not like Base::Base, + // see bugs 821 and 920. + using ReadOnlyMapBase::Base::operator=; + + protected: + EIGEN_DEFAULT_COPY_CONSTRUCTOR(MapBase) + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MapBase) +}; + +#undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS + +} // end namespace Eigen + +#endif // EIGEN_MAPBASE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/MathFunctions.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/MathFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..57fb3bde3041fcbe5bfd6cac2ad0a27743e18822 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/MathFunctions.h @@ -0,0 +1,2006 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2010 Benoit Jacob +// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATHFUNCTIONS_H +#define EIGEN_MATHFUNCTIONS_H + +// TODO this should better be moved to NumTraits +// Source: WolframAlpha +#define EIGEN_PI 3.141592653589793238462643383279502884197169399375105820974944592307816406L +#define EIGEN_LOG2E 1.442695040888963407359924681001892137426645954152985934135449406931109219L +#define EIGEN_LN2 0.693147180559945309417232121458176568075500134360255254120680009493393621L + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +/** \internal \class global_math_functions_filtering_base + * + * What it does: + * Defines a typedef 'type' as follows: + * - if type T has a member typedef Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl, then + * global_math_functions_filtering_base::type is a typedef for it. + * - otherwise, global_math_functions_filtering_base::type is a typedef for T. + * + * How it's used: + * To allow to defined the global math functions (like sin...) in certain cases, like the Array expressions. + * When you do sin(array1+array2), the object array1+array2 has a complicated expression type, all what you want to know + * is that it inherits ArrayBase. So we implement a partial specialization of sin_impl for ArrayBase. + * So we must make sure to use sin_impl > and not sin_impl, otherwise our partial + * specialization won't be used. How does sin know that? That's exactly what global_math_functions_filtering_base tells + * it. + * + * How it's implemented: + * SFINAE in the style of enable_if. Highly susceptible of breaking compilers. With GCC, it sure does work, but if you + * replace the typename dummy by an integer template parameter, it doesn't work anymore! + */ + +template +struct global_math_functions_filtering_base { + typedef T type; +}; + +template +struct always_void { + typedef void type; +}; + +template +struct global_math_functions_filtering_base< + T, typename always_void::type> { + typedef typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type; +}; + +#define EIGEN_MATHFUNC_IMPL(func, scalar) \ + Eigen::internal::func##_impl::type> +#define EIGEN_MATHFUNC_RETVAL(func, scalar) \ + typename Eigen::internal::func##_retval< \ + typename Eigen::internal::global_math_functions_filtering_base::type>::type + +/**************************************************************************** + * Implementation of real * + ****************************************************************************/ + +template ::IsComplex> +struct real_default_impl { + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { return x; } +}; + +template +struct real_default_impl { + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { + using std::real; + return real(x); + } +}; + +template +struct real_impl : real_default_impl {}; + +#if defined(EIGEN_GPU_COMPILE_PHASE) +template +struct real_impl> { + typedef T RealScalar; + EIGEN_DEVICE_FUNC static inline T run(const std::complex& x) { return x.real(); } +}; +#endif + +template +struct real_retval { + typedef typename NumTraits::Real type; +}; + +/**************************************************************************** + * Implementation of imag * + ****************************************************************************/ + +template ::IsComplex> +struct imag_default_impl { + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar&) { return RealScalar(0); } +}; + +template +struct imag_default_impl { + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { + using std::imag; + return imag(x); + } +}; + +template +struct imag_impl : imag_default_impl {}; + +#if defined(EIGEN_GPU_COMPILE_PHASE) +template +struct imag_impl> { + typedef T RealScalar; + EIGEN_DEVICE_FUNC static inline T run(const std::complex& x) { return x.imag(); } +}; +#endif + +template +struct imag_retval { + typedef typename NumTraits::Real type; +}; + +/**************************************************************************** + * Implementation of real_ref * + ****************************************************************************/ + +template +struct real_ref_impl { + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC static inline RealScalar& run(Scalar& x) { return reinterpret_cast(&x)[0]; } + EIGEN_DEVICE_FUNC static inline const RealScalar& run(const Scalar& x) { + return reinterpret_cast(&x)[0]; + } +}; + +template +struct real_ref_retval { + typedef typename NumTraits::Real& type; +}; + +/**************************************************************************** + * Implementation of imag_ref * + ****************************************************************************/ + +template +struct imag_ref_default_impl { + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC static inline RealScalar& run(Scalar& x) { return reinterpret_cast(&x)[1]; } + EIGEN_DEVICE_FUNC static inline const RealScalar& run(const Scalar& x) { + return reinterpret_cast(&x)[1]; + } +}; + +template +struct imag_ref_default_impl { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline Scalar run(Scalar&) { return Scalar(0); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline const Scalar run(const Scalar&) { return Scalar(0); } +}; + +template +struct imag_ref_impl : imag_ref_default_impl::IsComplex> {}; + +template +struct imag_ref_retval { + typedef typename NumTraits::Real& type; +}; + +/**************************************************************************** + * Implementation of conj * + ****************************************************************************/ + +template ::IsComplex> +struct conj_default_impl { + EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { return x; } +}; + +template +struct conj_default_impl { + EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { + using std::conj; + return conj(x); + } +}; + +template ::IsComplex> +struct conj_impl : conj_default_impl {}; + +template +struct conj_retval { + typedef Scalar type; +}; + +/**************************************************************************** + * Implementation of abs2 * + ****************************************************************************/ + +template +struct abs2_impl_default { + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { return x * x; } +}; + +template +struct abs2_impl_default // IsComplex +{ + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { return x.real() * x.real() + x.imag() * x.imag(); } +}; + +template +struct abs2_impl { + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { + return abs2_impl_default::IsComplex>::run(x); + } +}; + +template +struct abs2_retval { + typedef typename NumTraits::Real type; +}; + +/**************************************************************************** + * Implementation of sqrt/rsqrt * + ****************************************************************************/ + +template +struct sqrt_impl { + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE Scalar run(const Scalar& x) { + EIGEN_USING_STD(sqrt); + return sqrt(x); + } +}; + +// Complex sqrt defined in MathFunctionsImpl.h. +template +EIGEN_DEVICE_FUNC std::complex complex_sqrt(const std::complex& a_x); + +// Custom implementation is faster than `std::sqrt`, works on +// GPU, and correctly handles special cases (unlike MSVC). +template +struct sqrt_impl> { + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE std::complex run(const std::complex& x) { + return complex_sqrt(x); + } +}; + +template +struct sqrt_retval { + typedef Scalar type; +}; + +// Default implementation relies on numext::sqrt, at bottom of file. +template +struct rsqrt_impl; + +// Complex rsqrt defined in MathFunctionsImpl.h. +template +EIGEN_DEVICE_FUNC std::complex complex_rsqrt(const std::complex& a_x); + +template +struct rsqrt_impl> { + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE std::complex run(const std::complex& x) { + return complex_rsqrt(x); + } +}; + +template +struct rsqrt_retval { + typedef Scalar type; +}; + +/**************************************************************************** + * Implementation of norm1 * + ****************************************************************************/ + +template +struct norm1_default_impl; + +template +struct norm1_default_impl { + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { + EIGEN_USING_STD(abs); + return abs(x.real()) + abs(x.imag()); + } +}; + +template +struct norm1_default_impl { + EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { + EIGEN_USING_STD(abs); + return abs(x); + } +}; + +template +struct norm1_impl : norm1_default_impl::IsComplex> {}; + +template +struct norm1_retval { + typedef typename NumTraits::Real type; +}; + +/**************************************************************************** + * Implementation of hypot * + ****************************************************************************/ + +template +struct hypot_impl; + +template +struct hypot_retval { + typedef typename NumTraits::Real type; +}; + +/**************************************************************************** + * Implementation of cast * + ****************************************************************************/ + +template +struct cast_impl { + EIGEN_DEVICE_FUNC static inline NewType run(const OldType& x) { return static_cast(x); } +}; + +template +struct cast_impl { + EIGEN_DEVICE_FUNC static inline bool run(const OldType& x) { return x != OldType(0); } +}; + +// Casting from S -> Complex leads to an implicit conversion from S to T, +// generating warnings on clang. Here we explicitly cast the real component. +template +struct cast_impl::IsComplex && NumTraits::IsComplex>> { + EIGEN_DEVICE_FUNC static inline NewType run(const OldType& x) { + typedef typename NumTraits::Real NewReal; + return static_cast(static_cast(x)); + } +}; + +// here, for once, we're plainly returning NewType: we don't want cast to do weird things. + +template +EIGEN_DEVICE_FUNC inline NewType cast(const OldType& x) { + return cast_impl::run(x); +} + +/**************************************************************************** + * Implementation of arg * + ****************************************************************************/ + +// Visual Studio 2017 has a bug where arg(float) returns 0 for negative inputs. +// This seems to be fixed in VS 2019. +#if (!EIGEN_COMP_MSVC || EIGEN_COMP_MSVC >= 1920) +// std::arg is only defined for types of std::complex, or integer types or float/double/long double +template ::IsComplex || is_integral::value || + is_same::value || is_same::value || + is_same::value> +struct arg_default_impl; + +template +struct arg_default_impl { + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { + // There is no official ::arg on device in CUDA/HIP, so we always need to use std::arg. + using std::arg; + return static_cast(arg(x)); + } +}; + +// Must be non-complex floating-point type (e.g. half/bfloat16). +template +struct arg_default_impl { + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { + return (x < Scalar(0)) ? RealScalar(EIGEN_PI) : RealScalar(0); + } +}; +#else +template ::IsComplex> +struct arg_default_impl { + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { + return (x < RealScalar(0)) ? RealScalar(EIGEN_PI) : RealScalar(0); + } +}; + +template +struct arg_default_impl { + typedef typename NumTraits::Real RealScalar; + EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { + EIGEN_USING_STD(arg); + return arg(x); + } +}; +#endif +template +struct arg_impl : arg_default_impl {}; + +template +struct arg_retval { + typedef typename NumTraits::Real type; +}; + +/**************************************************************************** + * Implementation of expm1 * + ****************************************************************************/ + +// This implementation is based on GSL Math's expm1. +namespace std_fallback { +// fallback expm1 implementation in case there is no expm1(Scalar) function in namespace of Scalar, +// or that there is no suitable std::expm1 function available. Implementation +// attributed to Kahan. See: http://www.plunk.org/~hatch/rightway.php. +template +EIGEN_DEVICE_FUNC inline Scalar expm1(const Scalar& x) { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) + typedef typename NumTraits::Real RealScalar; + + EIGEN_USING_STD(exp); + Scalar u = exp(x); + if (numext::equal_strict(u, Scalar(1))) { + return x; + } + Scalar um1 = u - RealScalar(1); + if (numext::equal_strict(um1, Scalar(-1))) { + return RealScalar(-1); + } + + EIGEN_USING_STD(log); + Scalar logu = log(u); + return numext::equal_strict(u, logu) ? u : (u - RealScalar(1)) * x / logu; +} +} // namespace std_fallback + +template +struct expm1_impl { + EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) + EIGEN_USING_STD(expm1); + return expm1(x); + } +}; + +template +struct expm1_retval { + typedef Scalar type; +}; + +/**************************************************************************** + * Implementation of log * + ****************************************************************************/ + +// Complex log defined in MathFunctionsImpl.h. +template +EIGEN_DEVICE_FUNC std::complex complex_log(const std::complex& z); + +template +struct log_impl { + EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { + EIGEN_USING_STD(log); + return static_cast(log(x)); + } +}; + +template +struct log_impl> { + EIGEN_DEVICE_FUNC static inline std::complex run(const std::complex& z) { return complex_log(z); } +}; + +/**************************************************************************** + * Implementation of log1p * + ****************************************************************************/ + +namespace std_fallback { +// fallback log1p implementation in case there is no log1p(Scalar) function in namespace of Scalar, +// or that there is no suitable std::log1p function available +template +EIGEN_DEVICE_FUNC inline Scalar log1p(const Scalar& x) { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) + typedef typename NumTraits::Real RealScalar; + EIGEN_USING_STD(log); + Scalar x1p = RealScalar(1) + x; + Scalar log_1p = log_impl::run(x1p); + const bool is_small = numext::equal_strict(x1p, Scalar(1)); + const bool is_inf = numext::equal_strict(x1p, log_1p); + return (is_small || is_inf) ? x : x * (log_1p / (x1p - RealScalar(1))); +} +} // namespace std_fallback + +template +struct log1p_impl { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) + + EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { + EIGEN_USING_STD(log1p); + return log1p(x); + } +}; + +// Specialization for complex types that are not supported by std::log1p. +template +struct log1p_impl> { + EIGEN_STATIC_ASSERT_NON_INTEGER(RealScalar) + + EIGEN_DEVICE_FUNC static inline std::complex run(const std::complex& x) { + return std_fallback::log1p(x); + } +}; + +template +struct log1p_retval { + typedef Scalar type; +}; + +/**************************************************************************** + * Implementation of pow * + ****************************************************************************/ + +template ::IsInteger && NumTraits::IsInteger> +struct pow_impl { + // typedef Scalar retval; + typedef typename ScalarBinaryOpTraits>::ReturnType + result_type; + static EIGEN_DEVICE_FUNC inline result_type run(const ScalarX& x, const ScalarY& y) { + EIGEN_USING_STD(pow); + return pow(x, y); + } +}; + +template +struct pow_impl { + typedef ScalarX result_type; + static EIGEN_DEVICE_FUNC inline ScalarX run(ScalarX x, ScalarY y) { + ScalarX res(1); + eigen_assert(!NumTraits::IsSigned || y >= 0); + if (y & 1) res *= x; + y >>= 1; + while (y) { + x *= x; + if (y & 1) res *= x; + y >>= 1; + } + return res; + } +}; + +enum { meta_floor_log2_terminate, meta_floor_log2_move_up, meta_floor_log2_move_down, meta_floor_log2_bogus }; + +template +struct meta_floor_log2_selector { + enum { + middle = (lower + upper) / 2, + value = (upper <= lower + 1) ? int(meta_floor_log2_terminate) + : (n < (1 << middle)) ? int(meta_floor_log2_move_down) + : (n == 0) ? int(meta_floor_log2_bogus) + : int(meta_floor_log2_move_up) + }; +}; + +template ::value> +struct meta_floor_log2 {}; + +template +struct meta_floor_log2 { + enum { value = meta_floor_log2::middle>::value }; +}; + +template +struct meta_floor_log2 { + enum { value = meta_floor_log2::middle, upper>::value }; +}; + +template +struct meta_floor_log2 { + enum { value = (n >= ((unsigned int)(1) << (lower + 1))) ? lower + 1 : lower }; +}; + +template +struct meta_floor_log2 { + // no value, error at compile time +}; + +template +struct count_bits_impl { + static_assert(std::is_integral::value && std::is_unsigned::value, + "BitsType must be an unsigned integer"); + static EIGEN_DEVICE_FUNC inline int clz(BitsType bits) { + int n = CHAR_BIT * sizeof(BitsType); + int shift = n / 2; + while (bits > 0 && shift > 0) { + BitsType y = bits >> shift; + if (y > 0) { + n -= shift; + bits = y; + } + shift /= 2; + } + if (shift == 0) { + --n; + } + return n; + } + + static EIGEN_DEVICE_FUNC inline int ctz(BitsType bits) { + int n = CHAR_BIT * sizeof(BitsType); + int shift = n / 2; + while (bits > 0 && shift > 0) { + BitsType y = bits << shift; + if (y > 0) { + n -= shift; + bits = y; + } + shift /= 2; + } + if (shift == 0) { + --n; + } + return n; + } +}; + +// Count leading zeros. +template +EIGEN_DEVICE_FUNC inline int clz(BitsType bits) { + return count_bits_impl::clz(bits); +} + +// Count trailing zeros. +template +EIGEN_DEVICE_FUNC inline int ctz(BitsType bits) { + return count_bits_impl::ctz(bits); +} + +#if EIGEN_COMP_GNUC || EIGEN_COMP_CLANG + +template +struct count_bits_impl< + BitsType, std::enable_if_t::value && sizeof(BitsType) <= sizeof(unsigned int)>> { + static constexpr int kNumBits = static_cast(sizeof(BitsType) * CHAR_BIT); + static EIGEN_DEVICE_FUNC inline int clz(BitsType bits) { + static constexpr int kLeadingBitsOffset = (sizeof(unsigned int) - sizeof(BitsType)) * CHAR_BIT; + return bits == 0 ? kNumBits : __builtin_clz(static_cast(bits)) - kLeadingBitsOffset; + } + + static EIGEN_DEVICE_FUNC inline int ctz(BitsType bits) { + return bits == 0 ? kNumBits : __builtin_ctz(static_cast(bits)); + } +}; + +template +struct count_bits_impl::value && sizeof(unsigned int) < sizeof(BitsType) && + sizeof(BitsType) <= sizeof(unsigned long)>> { + static constexpr int kNumBits = static_cast(sizeof(BitsType) * CHAR_BIT); + static EIGEN_DEVICE_FUNC inline int clz(BitsType bits) { + static constexpr int kLeadingBitsOffset = (sizeof(unsigned long) - sizeof(BitsType)) * CHAR_BIT; + return bits == 0 ? kNumBits : __builtin_clzl(static_cast(bits)) - kLeadingBitsOffset; + } + + static EIGEN_DEVICE_FUNC inline int ctz(BitsType bits) { + return bits == 0 ? kNumBits : __builtin_ctzl(static_cast(bits)); + } +}; + +template +struct count_bits_impl::value && sizeof(unsigned long) < sizeof(BitsType) && + sizeof(BitsType) <= sizeof(unsigned long long)>> { + static constexpr int kNumBits = static_cast(sizeof(BitsType) * CHAR_BIT); + static EIGEN_DEVICE_FUNC inline int clz(BitsType bits) { + static constexpr int kLeadingBitsOffset = (sizeof(unsigned long long) - sizeof(BitsType)) * CHAR_BIT; + return bits == 0 ? kNumBits : __builtin_clzll(static_cast(bits)) - kLeadingBitsOffset; + } + + static EIGEN_DEVICE_FUNC inline int ctz(BitsType bits) { + return bits == 0 ? kNumBits : __builtin_ctzll(static_cast(bits)); + } +}; + +#elif EIGEN_COMP_MSVC + +template +struct count_bits_impl< + BitsType, std::enable_if_t::value && sizeof(BitsType) <= sizeof(unsigned long)>> { + static constexpr int kNumBits = static_cast(sizeof(BitsType) * CHAR_BIT); + static EIGEN_DEVICE_FUNC inline int clz(BitsType bits) { + unsigned long out; + _BitScanReverse(&out, static_cast(bits)); + return bits == 0 ? kNumBits : (kNumBits - 1) - static_cast(out); + } + + static EIGEN_DEVICE_FUNC inline int ctz(BitsType bits) { + unsigned long out; + _BitScanForward(&out, static_cast(bits)); + return bits == 0 ? kNumBits : static_cast(out); + } +}; + +#ifdef _WIN64 + +template +struct count_bits_impl::value && sizeof(unsigned long) < sizeof(BitsType) && + sizeof(BitsType) <= sizeof(__int64)>> { + static constexpr int kNumBits = static_cast(sizeof(BitsType) * CHAR_BIT); + static EIGEN_DEVICE_FUNC inline int clz(BitsType bits) { + unsigned long out; + _BitScanReverse64(&out, static_cast(bits)); + return bits == 0 ? kNumBits : (kNumBits - 1) - static_cast(out); + } + + static EIGEN_DEVICE_FUNC inline int ctz(BitsType bits) { + unsigned long out; + _BitScanForward64(&out, static_cast(bits)); + return bits == 0 ? kNumBits : static_cast(out); + } +}; + +#endif // _WIN64 + +#endif // EIGEN_COMP_GNUC || EIGEN_COMP_CLANG + +template +struct log_2_impl { + static constexpr int kTotalBits = sizeof(BitsType) * CHAR_BIT; + static EIGEN_DEVICE_FUNC inline int run_ceil(const BitsType& x) { + const int n = kTotalBits - clz(x); + bool power_of_two = (x & (x - 1)) == 0; + return x == 0 ? 0 : power_of_two ? (n - 1) : n; + } + static EIGEN_DEVICE_FUNC inline int run_floor(const BitsType& x) { + const int n = kTotalBits - clz(x); + return x == 0 ? 0 : n - 1; + } +}; + +template +int log2_ceil(const BitsType& x) { + return log_2_impl::run_ceil(x); +} + +template +int log2_floor(const BitsType& x) { + return log_2_impl::run_floor(x); +} + +// Implementation of is* functions + +template +EIGEN_DEVICE_FUNC std::enable_if_t::has_infinity || std::numeric_limits::has_quiet_NaN || + std::numeric_limits::has_signaling_NaN), + bool> +isfinite_impl(const T&) { + return true; +} + +template +EIGEN_DEVICE_FUNC std::enable_if_t<(std::numeric_limits::has_infinity || std::numeric_limits::has_quiet_NaN || + std::numeric_limits::has_signaling_NaN) && + (!NumTraits::IsComplex), + bool> +isfinite_impl(const T& x) { + EIGEN_USING_STD(isfinite); + return isfinite EIGEN_NOT_A_MACRO(x); +} + +template +EIGEN_DEVICE_FUNC std::enable_if_t::has_infinity, bool> isinf_impl(const T&) { + return false; +} + +template +EIGEN_DEVICE_FUNC std::enable_if_t<(std::numeric_limits::has_infinity && !NumTraits::IsComplex), bool> isinf_impl( + const T& x) { + EIGEN_USING_STD(isinf); + return isinf EIGEN_NOT_A_MACRO(x); +} + +template +EIGEN_DEVICE_FUNC + std::enable_if_t::has_quiet_NaN || std::numeric_limits::has_signaling_NaN), bool> + isnan_impl(const T&) { + return false; +} + +template +EIGEN_DEVICE_FUNC std::enable_if_t< + (std::numeric_limits::has_quiet_NaN || std::numeric_limits::has_signaling_NaN) && (!NumTraits::IsComplex), + bool> +isnan_impl(const T& x) { + EIGEN_USING_STD(isnan); + return isnan EIGEN_NOT_A_MACRO(x); +} + +// The following overload are defined at the end of this file +template +EIGEN_DEVICE_FUNC bool isfinite_impl(const std::complex& x); +template +EIGEN_DEVICE_FUNC bool isnan_impl(const std::complex& x); +template +EIGEN_DEVICE_FUNC bool isinf_impl(const std::complex& x); +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS T ptanh_float(const T& a_x); + +/**************************************************************************** + * Implementation of sign * + ****************************************************************************/ +template ::IsComplex != 0), + bool IsInteger = (NumTraits::IsInteger != 0)> +struct sign_impl { + EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& a) { return Scalar((a > Scalar(0)) - (a < Scalar(0))); } +}; + +template +struct sign_impl { + EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& a) { + return (isnan_impl)(a) ? a : Scalar((a > Scalar(0)) - (a < Scalar(0))); + } +}; + +template +struct sign_impl { + EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& a) { + using real_type = typename NumTraits::Real; + EIGEN_USING_STD(abs); + real_type aa = abs(a); + if (aa == real_type(0)) return Scalar(0); + aa = real_type(1) / aa; + return Scalar(a.real() * aa, a.imag() * aa); + } +}; + +// The sign function for bool is the identity. +template <> +struct sign_impl { + EIGEN_DEVICE_FUNC static inline bool run(const bool& a) { return a; } +}; + +template +struct sign_retval { + typedef Scalar type; +}; + +// suppress "unary minus operator applied to unsigned type, result still unsigned" warnings on MSVC +// note: `0 - a` is distinct from `-a` when Scalar is a floating point type and `a` is zero + +template ::IsInteger> +struct negate_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar run(const Scalar& a) { return -a; } +}; + +template +struct negate_impl { + EIGEN_STATIC_ASSERT((!is_same::value), NEGATE IS NOT DEFINED FOR BOOLEAN TYPES) + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar run(const Scalar& a) { return Scalar(0) - a; } +}; + +template +struct negate_retval { + typedef Scalar type; +}; + +template ::type>::IsInteger> +struct nearest_integer_impl { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar run_floor(const Scalar& x) { + EIGEN_USING_STD(floor) return floor(x); + } + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar run_ceil(const Scalar& x) { + EIGEN_USING_STD(ceil) return ceil(x); + } + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar run_rint(const Scalar& x) { + EIGEN_USING_STD(rint) return rint(x); + } + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar run_round(const Scalar& x) { + EIGEN_USING_STD(round) return round(x); + } + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar run_trunc(const Scalar& x) { + EIGEN_USING_STD(trunc) return trunc(x); + } +}; +template +struct nearest_integer_impl { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar run_floor(const Scalar& x) { return x; } + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar run_ceil(const Scalar& x) { return x; } + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar run_rint(const Scalar& x) { return x; } + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar run_round(const Scalar& x) { return x; } + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar run_trunc(const Scalar& x) { return x; } +}; + +} // end namespace internal + +/**************************************************************************** + * Generic math functions * + ****************************************************************************/ + +namespace numext { + +#if (!defined(EIGEN_GPUCC) || defined(EIGEN_CONSTEXPR_ARE_DEVICE_FUNC)) +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T mini(const T& x, const T& y) { + EIGEN_USING_STD(min) + return min EIGEN_NOT_A_MACRO(x, y); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y) { + EIGEN_USING_STD(max) + return max EIGEN_NOT_A_MACRO(x, y); +} +#else +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T mini(const T& x, const T& y) { + return y < x ? y : x; +} +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float mini(const float& x, const float& y) { + return fminf(x, y); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double mini(const double& x, const double& y) { + return fmin(x, y); +} + +#ifndef EIGEN_GPU_COMPILE_PHASE +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE long double mini(const long double& x, const long double& y) { +#if defined(EIGEN_HIPCC) + // no "fminl" on HIP yet + return (x < y) ? x : y; +#else + return fminl(x, y); +#endif +} +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y) { + return x < y ? y : x; +} +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float maxi(const float& x, const float& y) { + return fmaxf(x, y); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double maxi(const double& x, const double& y) { + return fmax(x, y); +} +#ifndef EIGEN_GPU_COMPILE_PHASE +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE long double maxi(const long double& x, const long double& y) { +#if defined(EIGEN_HIPCC) + // no "fmaxl" on HIP yet + return (x > y) ? x : y; +#else + return fmaxl(x, y); +#endif +} +#endif +#endif + +#if defined(SYCL_DEVICE_ONLY) + +#define SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_char) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_short) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_int) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_long) +#define SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_char) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_short) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_int) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_long) +#define SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_uchar) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_ushort) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_uint) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_ulong) +#define SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_uchar) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_ushort) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_uint) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_ulong) +#define SYCL_SPECIALIZE_INTEGER_TYPES_BINARY(NAME, FUNC) \ + SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) \ + SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) +#define SYCL_SPECIALIZE_INTEGER_TYPES_UNARY(NAME, FUNC) \ + SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) \ + SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) +#define SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(NAME, FUNC) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_float) \ + SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_double) +#define SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(NAME, FUNC) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_float) \ + SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_double) +#define SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(NAME, FUNC, RET_TYPE) \ + SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, RET_TYPE, cl::sycl::cl_float) \ + SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, RET_TYPE, cl::sycl::cl_double) + +#define SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE RET_TYPE NAME(const ARG_TYPE& x) { \ + return cl::sycl::FUNC(x); \ + } + +#define SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, TYPE) SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, TYPE, TYPE) + +#define SYCL_SPECIALIZE_GEN1_BINARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE1, ARG_TYPE2) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE RET_TYPE NAME(const ARG_TYPE1& x, const ARG_TYPE2& y) { \ + return cl::sycl::FUNC(x, y); \ + } + +#define SYCL_SPECIALIZE_GEN2_BINARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE) \ + SYCL_SPECIALIZE_GEN1_BINARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE, ARG_TYPE) + +#define SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, TYPE) SYCL_SPECIALIZE_GEN2_BINARY_FUNC(NAME, FUNC, TYPE, TYPE) + +SYCL_SPECIALIZE_INTEGER_TYPES_BINARY(mini, min) +SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(mini, fmin) +SYCL_SPECIALIZE_INTEGER_TYPES_BINARY(maxi, max) +SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(maxi, fmax) + +#endif + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline internal::add_const_on_value_type_t real_ref( + const Scalar& x) { + return internal::real_ref_impl::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x) { + return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(arg, Scalar) arg(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(arg, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline internal::add_const_on_value_type_t imag_ref( + const Scalar& x) { + return internal::imag_ref_impl::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x) { + return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(sign, Scalar) sign(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(sign, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(negate, Scalar) negate(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(negate, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x); +} + +EIGEN_DEVICE_FUNC inline bool abs2(bool x) { return x; } + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T absdiff(const T& x, const T& y) { + return x > y ? x - y : y - x; +} +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float absdiff(const float& x, const float& y) { + return fabsf(x - y); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double absdiff(const double& x, const double& y) { + return fabs(x - y); +} + +// HIP and CUDA do not support long double. +#ifndef EIGEN_GPU_COMPILE_PHASE +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE long double absdiff(const long double& x, const long double& y) { + return fabsl(x - y); +} +#endif + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y) { + return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(hypot, hypot) +#endif + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(log1p, Scalar) log1p(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(log1p, Scalar)::run(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(log1p, log1p) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float log1p(const float& x) { + return ::log1pf(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double log1p(const double& x) { + return ::log1p(x); +} +#endif + +template +EIGEN_DEVICE_FUNC inline typename internal::pow_impl::result_type pow(const ScalarX& x, + const ScalarY& y) { + return internal::pow_impl::run(x, y); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(pow, pow) +#endif + +template +EIGEN_DEVICE_FUNC bool(isnan)(const T& x) { + return internal::isnan_impl(x); +} +template +EIGEN_DEVICE_FUNC bool(isinf)(const T& x) { + return internal::isinf_impl(x); +} +template +EIGEN_DEVICE_FUNC bool(isfinite)(const T& x) { + return internal::isfinite_impl(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(isnan, isnan, bool) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(isinf, isinf, bool) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(isfinite, isfinite, bool) +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar rint(const Scalar& x) { + return internal::nearest_integer_impl::run_rint(x); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar round(const Scalar& x) { + return internal::nearest_integer_impl::run_round(x); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar(floor)(const Scalar& x) { + return internal::nearest_integer_impl::run_floor(x); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar(ceil)(const Scalar& x) { + return internal::nearest_integer_impl::run_ceil(x); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar(trunc)(const Scalar& x) { + return internal::nearest_integer_impl::run_trunc(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(round, round) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(floor, floor) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(ceil, ceil) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(trunc, trunc) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float floor(const float& x) { + return ::floorf(x); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double floor(const double& x) { + return ::floor(x); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float ceil(const float& x) { + return ::ceilf(x); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double ceil(const double& x) { + return ::ceil(x); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float trunc(const float& x) { + return ::truncf(x); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double trunc(const double& x) { + return ::trunc(x); +} +#endif + +// Integer division with rounding up. +// T is assumed to be an integer type with a>=0, and b>0 +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE EIGEN_CONSTEXPR T div_ceil(T a, T b) { + using UnsignedT = typename internal::make_unsigned::type; + EIGEN_STATIC_ASSERT((NumTraits::IsInteger), THIS FUNCTION IS FOR INTEGER TYPES) + eigen_assert(a >= 0); + eigen_assert(b > 0); + // Note: explicitly declaring a and b as non-negative values allows the compiler to use better optimizations + const UnsignedT ua = UnsignedT(a); + const UnsignedT ub = UnsignedT(b); + // Note: This form is used because it cannot overflow. + return ua == 0 ? 0 : (ua - 1) / ub + 1; +} + +// Integer round down to nearest power of b +// T is assumed to be an integer type with a>=0, and b>0 +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE EIGEN_CONSTEXPR T round_down(T a, U b) { + using UnsignedT = typename internal::make_unsigned::type; + using UnsignedU = typename internal::make_unsigned::type; + EIGEN_STATIC_ASSERT((NumTraits::IsInteger), THIS FUNCTION IS FOR INTEGER TYPES) + EIGEN_STATIC_ASSERT((NumTraits::IsInteger), THIS FUNCTION IS FOR INTEGER TYPES) + eigen_assert(a >= 0); + eigen_assert(b > 0); + // Note: explicitly declaring a and b as non-negative values allows the compiler to use better optimizations + const UnsignedT ua = UnsignedT(a); + const UnsignedU ub = UnsignedU(b); + return ub * (ua / ub); +} + +/** Log base 2 for 32 bits positive integers. + * Conveniently returns 0 for x==0. */ +EIGEN_CONSTEXPR inline int log2(int x) { + eigen_assert(x >= 0); + unsigned int v(x); + constexpr int table[32] = {0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31}; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + return table[(v * 0x07C4ACDDU) >> 27]; +} + +/** \returns the square root of \a x. + * + * It is essentially equivalent to + * \code using std::sqrt; return sqrt(x); \endcode + * but slightly faster for float/double and some compilers (e.g., gcc), thanks to + * specializations when SSE is enabled. + * + * It's usage is justified in performance critical functions, like norm/normalize. + */ +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE EIGEN_MATHFUNC_RETVAL(sqrt, Scalar) sqrt(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(sqrt, Scalar)::run(x); +} + +// Boolean specialization, avoids implicit float to bool conversion (-Wimplicit-conversion-floating-point-to-bool). +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_DEVICE_FUNC bool sqrt(const bool& x) { + return x; +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(sqrt, sqrt) +#endif + +/** \returns the cube root of \a x. **/ +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T cbrt(const T& x) { + EIGEN_USING_STD(cbrt); + return static_cast(cbrt(x)); +} + +/** \returns the reciprocal square root of \a x. **/ +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T rsqrt(const T& x) { + return internal::rsqrt_impl::run(x); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T log(const T& x) { + return internal::log_impl::run(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(log, log) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float log(const float& x) { + return ::logf(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double log(const double& x) { + return ::log(x); +} +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + std::enable_if_t::IsSigned || NumTraits::IsComplex, typename NumTraits::Real> + abs(const T& x) { + EIGEN_USING_STD(abs); + return abs(x); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + std::enable_if_t::IsSigned || NumTraits::IsComplex), typename NumTraits::Real> + abs(const T& x) { + return x; +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_INTEGER_TYPES_UNARY(abs, abs) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(abs, fabs) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float abs(const float& x) { + return ::fabsf(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double abs(const double& x) { + return ::fabs(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float abs(const std::complex& x) { + return ::hypotf(x.real(), x.imag()); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double abs(const std::complex& x) { + return ::hypot(x.real(), x.imag()); +} +#endif + +template ::IsInteger, bool IsSigned = NumTraits::IsSigned> +struct signbit_impl; +template +struct signbit_impl { + static constexpr size_t Size = sizeof(Scalar); + static constexpr size_t Shift = (CHAR_BIT * Size) - 1; + using intSize_t = typename get_integer_by_size::signed_type; + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static Scalar run(const Scalar& x) { + intSize_t a = bit_cast(x); + a = a >> Shift; + Scalar result = bit_cast(a); + return result; + } +}; +template +struct signbit_impl { + static constexpr size_t Size = sizeof(Scalar); + static constexpr size_t Shift = (CHAR_BIT * Size) - 1; + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static constexpr Scalar run(const Scalar& x) { return x >> Shift; } +}; +template +struct signbit_impl { + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static constexpr Scalar run(const Scalar&) { return Scalar(0); } +}; +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static constexpr Scalar signbit(const Scalar& x) { + return signbit_impl::run(x); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T exp(const T& x) { + EIGEN_USING_STD(exp); + return exp(x); +} + +// MSVC screws up some edge-cases for std::exp(complex). +#ifdef EIGEN_COMP_MSVC +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::complex exp(const std::complex& x) { + EIGEN_USING_STD(exp); + // If z is (x,±∞) (for any finite x), the result is (NaN,NaN) and FE_INVALID is raised. + // If z is (x,NaN) (for any finite x), the result is (NaN,NaN) and FE_INVALID may be raised. + if ((isfinite)(real_ref(x)) && !(isfinite)(imag_ref(x))) { + return std::complex(NumTraits::quiet_NaN(), NumTraits::quiet_NaN()); + } + // If z is (+∞,±∞), the result is (±∞,NaN) and FE_INVALID is raised (the sign of the real part is unspecified) + // If z is (+∞,NaN), the result is (±∞,NaN) (the sign of the real part is unspecified) + if ((real_ref(x) == NumTraits::infinity() && !(isfinite)(imag_ref(x)))) { + return std::complex(NumTraits::infinity(), NumTraits::quiet_NaN()); + } + return exp(x); +} +#endif + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(exp, exp) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float exp(const float& x) { + return ::expf(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double exp(const double& x) { + return ::exp(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::complex exp(const std::complex& x) { + float com = ::expf(x.real()); + float res_real = com * ::cosf(x.imag()); + float res_imag = com * ::sinf(x.imag()); + return std::complex(res_real, res_imag); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::complex exp(const std::complex& x) { + double com = ::exp(x.real()); + double res_real = com * ::cos(x.imag()); + double res_imag = com * ::sin(x.imag()); + return std::complex(res_real, res_imag); +} +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T exp2(const T& x) { + EIGEN_USING_STD(exp2); + return exp2(x); +} + +// MSVC screws up some edge-cases for std::exp2(complex). +#ifdef EIGEN_COMP_MSVC +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::complex exp2(const std::complex& x) { + EIGEN_USING_STD(exp); + // If z is (x,±∞) (for any finite x), the result is (NaN,NaN) and FE_INVALID is raised. + // If z is (x,NaN) (for any finite x), the result is (NaN,NaN) and FE_INVALID may be raised. + if ((isfinite)(real_ref(x)) && !(isfinite)(imag_ref(x))) { + return std::complex(NumTraits::quiet_NaN(), NumTraits::quiet_NaN()); + } + // If z is (+∞,±∞), the result is (±∞,NaN) and FE_INVALID is raised (the sign of the real part is unspecified) + // If z is (+∞,NaN), the result is (±∞,NaN) (the sign of the real part is unspecified) + if ((real_ref(x) == NumTraits::infinity() && !(isfinite)(imag_ref(x)))) { + return std::complex(NumTraits::infinity(), NumTraits::quiet_NaN()); + } + return exp2(x); +} +#endif + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(exp2, exp2) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float exp2(const float& x) { + return ::exp2f(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double exp2(const double& x) { + return ::exp2(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::complex exp2(const std::complex& x) { + float com = ::exp2f(x.real()); + float res_real = com * ::cosf(static_cast(EIGEN_LN2) * x.imag()); + float res_imag = com * ::sinf(static_cast(EIGEN_LN2) * x.imag()); + return std::complex(res_real, res_imag); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::complex exp2(const std::complex& x) { + double com = ::exp2(x.real()); + double res_real = com * ::cos(static_cast(EIGEN_LN2) * x.imag()); + double res_imag = com * ::sin(static_cast(EIGEN_LN2) * x.imag()); + return std::complex(res_real, res_imag); +} +#endif + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(expm1, Scalar) expm1(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(expm1, Scalar)::run(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(expm1, expm1) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float expm1(const float& x) { + return ::expm1f(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double expm1(const double& x) { + return ::expm1(x); +} +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T cos(const T& x) { + EIGEN_USING_STD(cos); + return cos(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(cos, cos) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float cos(const float& x) { + return ::cosf(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double cos(const double& x) { + return ::cos(x); +} +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T sin(const T& x) { + EIGEN_USING_STD(sin); + return sin(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(sin, sin) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float sin(const float& x) { + return ::sinf(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double sin(const double& x) { + return ::sin(x); +} +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T tan(const T& x) { + EIGEN_USING_STD(tan); + return tan(x); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(tan, tan) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float tan(const float& x) { + return ::tanf(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double tan(const double& x) { + return ::tan(x); +} +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T acos(const T& x) { + EIGEN_USING_STD(acos); + return acos(x); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T acosh(const T& x) { + EIGEN_USING_STD(acosh); + return static_cast(acosh(x)); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(acos, acos) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(acosh, acosh) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float acos(const float& x) { + return ::acosf(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double acos(const double& x) { + return ::acos(x); +} +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T asin(const T& x) { + EIGEN_USING_STD(asin); + return asin(x); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T asinh(const T& x) { + EIGEN_USING_STD(asinh); + return static_cast(asinh(x)); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(asin, asin) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(asinh, asinh) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float asin(const float& x) { + return ::asinf(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double asin(const double& x) { + return ::asin(x); +} +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T atan(const T& x) { + EIGEN_USING_STD(atan); + return static_cast(atan(x)); +} + +template ::IsComplex, int> = 0> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T atan2(const T& y, const T& x) { + EIGEN_USING_STD(atan2); + return static_cast(atan2(y, x)); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T atanh(const T& x) { + EIGEN_USING_STD(atanh); + return static_cast(atanh(x)); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(atan, atan) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(atanh, atanh) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float atan(const float& x) { + return ::atanf(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double atan(const double& x) { + return ::atan(x); +} +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T cosh(const T& x) { + EIGEN_USING_STD(cosh); + return static_cast(cosh(x)); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(cosh, cosh) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float cosh(const float& x) { + return ::coshf(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double cosh(const double& x) { + return ::cosh(x); +} +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T sinh(const T& x) { + EIGEN_USING_STD(sinh); + return static_cast(sinh(x)); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(sinh, sinh) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float sinh(const float& x) { + return ::sinhf(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double sinh(const double& x) { + return ::sinh(x); +} +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T tanh(const T& x) { + EIGEN_USING_STD(tanh); + return tanh(x); +} + +#if (!defined(EIGEN_GPUCC)) && EIGEN_FAST_MATH && !defined(SYCL_DEVICE_ONLY) +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float tanh(float x) { return internal::ptanh_float(x); } +#endif + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(tanh, tanh) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float tanh(const float& x) { + return ::tanhf(x); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double tanh(const double& x) { + return ::tanh(x); +} +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T fmod(const T& a, const T& b) { + EIGEN_USING_STD(fmod); + return fmod(a, b); +} + +#if defined(SYCL_DEVICE_ONLY) +SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(fmod, fmod) +#endif + +#if defined(EIGEN_GPUCC) +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float fmod(const float& a, const float& b) { + return ::fmodf(a, b); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double fmod(const double& a, const double& b) { + return ::fmod(a, b); +} +#endif + +#if defined(SYCL_DEVICE_ONLY) +#undef SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_BINARY +#undef SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_UNARY +#undef SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_BINARY +#undef SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY +#undef SYCL_SPECIALIZE_INTEGER_TYPES_BINARY +#undef SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY +#undef SYCL_SPECIALIZE_FLOATING_TYPES_BINARY +#undef SYCL_SPECIALIZE_FLOATING_TYPES_UNARY +#undef SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE +#undef SYCL_SPECIALIZE_GEN_UNARY_FUNC +#undef SYCL_SPECIALIZE_UNARY_FUNC +#undef SYCL_SPECIALIZE_GEN1_BINARY_FUNC +#undef SYCL_SPECIALIZE_GEN2_BINARY_FUNC +#undef SYCL_SPECIALIZE_BINARY_FUNC +#endif + +template ::value>> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar logical_shift_left(const Scalar& a, int n) { + return a << n; +} + +template ::value>> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar logical_shift_right(const Scalar& a, int n) { + using UnsignedScalar = typename numext::get_integer_by_size::unsigned_type; + return bit_cast(bit_cast(a) >> n); +} + +template ::value>> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar arithmetic_shift_right(const Scalar& a, int n) { + using SignedScalar = typename numext::get_integer_by_size::signed_type; + return bit_cast(bit_cast(a) >> n); +} + +} // end namespace numext + +namespace internal { + +template +EIGEN_DEVICE_FUNC bool isfinite_impl(const std::complex& x) { + return (numext::isfinite)(numext::real(x)) && (numext::isfinite)(numext::imag(x)); +} + +template +EIGEN_DEVICE_FUNC bool isnan_impl(const std::complex& x) { + return (numext::isnan)(numext::real(x)) || (numext::isnan)(numext::imag(x)); +} + +template +EIGEN_DEVICE_FUNC bool isinf_impl(const std::complex& x) { + return ((numext::isinf)(numext::real(x)) || (numext::isinf)(numext::imag(x))) && (!(numext::isnan)(x)); +} + +/**************************************************************************** + * Implementation of fuzzy comparisons * + ****************************************************************************/ + +template +struct scalar_fuzzy_default_impl {}; + +template +struct scalar_fuzzy_default_impl { + typedef typename NumTraits::Real RealScalar; + template + EIGEN_DEVICE_FUNC static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, + const RealScalar& prec) { + return numext::abs(x) <= numext::abs(y) * prec; + } + EIGEN_DEVICE_FUNC static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) { + return numext::abs(x - y) <= numext::mini(numext::abs(x), numext::abs(y)) * prec; + } + EIGEN_DEVICE_FUNC static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec) { + return x <= y || isApprox(x, y, prec); + } +}; + +template +struct scalar_fuzzy_default_impl { + typedef typename NumTraits::Real RealScalar; + template + EIGEN_DEVICE_FUNC static inline bool isMuchSmallerThan(const Scalar& x, const Scalar&, const RealScalar&) { + return x == Scalar(0); + } + EIGEN_DEVICE_FUNC static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar&) { return x == y; } + EIGEN_DEVICE_FUNC static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar&) { + return x <= y; + } +}; + +template +struct scalar_fuzzy_default_impl { + typedef typename NumTraits::Real RealScalar; + template + EIGEN_DEVICE_FUNC static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, + const RealScalar& prec) { + return numext::abs2(x) <= numext::abs2(y) * prec * prec; + } + EIGEN_DEVICE_FUNC static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) { + return numext::abs2(x - y) <= numext::mini(numext::abs2(x), numext::abs2(y)) * prec * prec; + } +}; + +template +struct scalar_fuzzy_impl + : scalar_fuzzy_default_impl::IsComplex, NumTraits::IsInteger> {}; + +template +EIGEN_DEVICE_FUNC inline bool isMuchSmallerThan( + const Scalar& x, const OtherScalar& y, + const typename NumTraits::Real& precision = NumTraits::dummy_precision()) { + return scalar_fuzzy_impl::template isMuchSmallerThan(x, y, precision); +} + +template +EIGEN_DEVICE_FUNC inline bool isApprox( + const Scalar& x, const Scalar& y, + const typename NumTraits::Real& precision = NumTraits::dummy_precision()) { + return scalar_fuzzy_impl::isApprox(x, y, precision); +} + +template +EIGEN_DEVICE_FUNC inline bool isApproxOrLessThan( + const Scalar& x, const Scalar& y, + const typename NumTraits::Real& precision = NumTraits::dummy_precision()) { + return scalar_fuzzy_impl::isApproxOrLessThan(x, y, precision); +} + +/****************************************** +*** The special case of the bool type *** +******************************************/ + +template <> +struct scalar_fuzzy_impl { + typedef bool RealScalar; + + template + EIGEN_DEVICE_FUNC static inline bool isMuchSmallerThan(const bool& x, const bool&, const bool&) { + return !x; + } + + EIGEN_DEVICE_FUNC static inline bool isApprox(bool x, bool y, bool) { return x == y; } + + EIGEN_DEVICE_FUNC static inline bool isApproxOrLessThan(const bool& x, const bool& y, const bool&) { + return (!x) || y; + } +}; + +} // end namespace internal + +// Default implementations that rely on other numext implementations +namespace internal { + +// Specialization for complex types that are not supported by std::expm1. +template +struct expm1_impl> { + EIGEN_STATIC_ASSERT_NON_INTEGER(RealScalar) + + EIGEN_DEVICE_FUNC static inline std::complex run(const std::complex& x) { + RealScalar xr = x.real(); + RealScalar xi = x.imag(); + // expm1(z) = exp(z) - 1 + // = exp(x + i * y) - 1 + // = exp(x) * (cos(y) + i * sin(y)) - 1 + // = exp(x) * cos(y) - 1 + i * exp(x) * sin(y) + // Imag(expm1(z)) = exp(x) * sin(y) + // Real(expm1(z)) = exp(x) * cos(y) - 1 + // = exp(x) * cos(y) - 1. + // = expm1(x) + exp(x) * (cos(y) - 1) + // = expm1(x) + exp(x) * (2 * sin(y / 2) ** 2) + RealScalar erm1 = numext::expm1(xr); + RealScalar er = erm1 + RealScalar(1.); + RealScalar sin2 = numext::sin(xi / RealScalar(2.)); + sin2 = sin2 * sin2; + RealScalar s = numext::sin(xi); + RealScalar real_part = erm1 - RealScalar(2.) * er * sin2; + return std::complex(real_part, er * s); + } +}; + +template +struct rsqrt_impl { + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE T run(const T& x) { return T(1) / numext::sqrt(x); } +}; + +#if defined(EIGEN_GPU_COMPILE_PHASE) +template +struct conj_impl, true> { + EIGEN_DEVICE_FUNC static inline std::complex run(const std::complex& x) { + return std::complex(numext::real(x), -numext::imag(x)); + } +}; +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATHFUNCTIONS_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/MathFunctionsImpl.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/MathFunctionsImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..10ddabd7952162c17ab4920277157bb66c486f52 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/MathFunctionsImpl.h @@ -0,0 +1,262 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com) +// Copyright (C) 2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATHFUNCTIONSIMPL_H +#define EIGEN_MATHFUNCTIONSIMPL_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +/** \internal Fast reciprocal using Newton-Raphson's method. + + Preconditions: + 1. The starting guess provided in approx_a_recip must have at least half + the leading mantissa bits in the correct result, such that a single + Newton-Raphson step is sufficient to get within 1-2 ulps of the correct + result. + 2. If a is zero, approx_a_recip must be infinite with the same sign as a. + 3. If a is infinite, approx_a_recip must be zero with the same sign as a. + + If the preconditions are satisfied, which they are for for the _*_rcp_ps + instructions on x86, the result has a maximum relative error of 2 ulps, + and correctly handles reciprocals of zero, infinity, and NaN. +*/ +template +struct generic_reciprocal_newton_step { + static_assert(Steps > 0, "Steps must be at least 1."); + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Packet run(const Packet& a, const Packet& approx_a_recip) { + using Scalar = typename unpacket_traits::type; + const Packet two = pset1(Scalar(2)); + // Refine the approximation using one Newton-Raphson step: + // x_{i} = x_{i-1} * (2 - a * x_{i-1}) + const Packet x = generic_reciprocal_newton_step::run(a, approx_a_recip); + const Packet tmp = pnmadd(a, x, two); + // If tmp is NaN, it means that a is either +/-0 or +/-Inf. + // In this case return the approximation directly. + const Packet is_not_nan = pcmp_eq(tmp, tmp); + return pselect(is_not_nan, pmul(x, tmp), x); + } +}; + +template +struct generic_reciprocal_newton_step { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Packet run(const Packet& /*unused*/, const Packet& approx_rsqrt) { + return approx_rsqrt; + } +}; + +/** \internal Fast reciprocal sqrt using Newton-Raphson's method. + + Preconditions: + 1. The starting guess provided in approx_a_recip must have at least half + the leading mantissa bits in the correct result, such that a single + Newton-Raphson step is sufficient to get within 1-2 ulps of the correct + result. + 2. If a is zero, approx_a_recip must be infinite with the same sign as a. + 3. If a is infinite, approx_a_recip must be zero with the same sign as a. + + If the preconditions are satisfied, which they are for for the _*_rcp_ps + instructions on x86, the result has a maximum relative error of 2 ulps, + and correctly handles zero, infinity, and NaN. Positive denormals are + treated as zero. +*/ +template +struct generic_rsqrt_newton_step { + static_assert(Steps > 0, "Steps must be at least 1."); + using Scalar = typename unpacket_traits::type; + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Packet run(const Packet& a, const Packet& approx_rsqrt) { + constexpr Scalar kMinusHalf = Scalar(-1) / Scalar(2); + const Packet cst_minus_half = pset1(kMinusHalf); + const Packet cst_minus_one = pset1(Scalar(-1)); + + Packet inv_sqrt = approx_rsqrt; + for (int step = 0; step < Steps; ++step) { + // Refine the approximation using one Newton-Raphson step: + // h_n = (x * inv_sqrt) * inv_sqrt - 1 (so that h_n is nearly 0). + // inv_sqrt = inv_sqrt - 0.5 * inv_sqrt * h_n + Packet r2 = pmul(a, inv_sqrt); + Packet half_r = pmul(inv_sqrt, cst_minus_half); + Packet h_n = pmadd(r2, inv_sqrt, cst_minus_one); + inv_sqrt = pmadd(half_r, h_n, inv_sqrt); + } + + // If x is NaN, then either: + // 1) the input is NaN + // 2) zero and infinity were multiplied + // In either of these cases, return approx_rsqrt + return pselect(pisnan(inv_sqrt), approx_rsqrt, inv_sqrt); + } +}; + +template +struct generic_rsqrt_newton_step { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Packet run(const Packet& /*unused*/, const Packet& approx_rsqrt) { + return approx_rsqrt; + } +}; + +/** \internal Fast sqrt using Newton-Raphson's method. + + Preconditions: + 1. The starting guess for the reciprocal sqrt provided in approx_rsqrt must + have at least half the leading mantissa bits in the correct result, such + that a single Newton-Raphson step is sufficient to get within 1-2 ulps of + the correct result. + 2. If a is zero, approx_rsqrt must be infinite. + 3. If a is infinite, approx_rsqrt must be zero. + + If the preconditions are satisfied, which they are for for the _*_rsqrt_ps + instructions on x86, the result has a maximum relative error of 2 ulps, + and correctly handles zero and infinity, and NaN. Positive denormal inputs + are treated as zero. +*/ +template +struct generic_sqrt_newton_step { + static_assert(Steps > 0, "Steps must be at least 1."); + + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Packet run(const Packet& a, const Packet& approx_rsqrt) { + using Scalar = typename unpacket_traits::type; + const Packet one_point_five = pset1(Scalar(1.5)); + const Packet minus_half = pset1(Scalar(-0.5)); + // If a is inf or zero, return a directly. + const Packet inf_mask = pcmp_eq(a, pset1(NumTraits::infinity())); + const Packet return_a = por(pcmp_eq(a, pzero(a)), inf_mask); + // Do a single step of Newton's iteration for reciprocal square root: + // x_{n+1} = x_n * (1.5 + (-0.5 * x_n) * (a * x_n))). + // The Newton's step is computed this way to avoid over/under-flows. + Packet rsqrt = pmul(approx_rsqrt, pmadd(pmul(minus_half, approx_rsqrt), pmul(a, approx_rsqrt), one_point_five)); + for (int step = 1; step < Steps; ++step) { + rsqrt = pmul(rsqrt, pmadd(pmul(minus_half, rsqrt), pmul(a, rsqrt), one_point_five)); + } + + // Return sqrt(x) = x * rsqrt(x) for non-zero finite positive arguments. + // Return a itself for 0 or +inf, NaN for negative arguments. + return pselect(return_a, a, pmul(a, rsqrt)); + } +}; + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE RealScalar positive_real_hypot(const RealScalar& x, const RealScalar& y) { + // IEEE IEC 6059 special cases. + if ((numext::isinf)(x) || (numext::isinf)(y)) return NumTraits::infinity(); + if ((numext::isnan)(x) || (numext::isnan)(y)) return NumTraits::quiet_NaN(); + + EIGEN_USING_STD(sqrt); + RealScalar p, qp; + p = numext::maxi(x, y); + if (numext::is_exactly_zero(p)) return RealScalar(0); + qp = numext::mini(y, x) / p; + return p * sqrt(RealScalar(1) + qp * qp); +} + +template +struct hypot_impl { + typedef typename NumTraits::Real RealScalar; + static EIGEN_DEVICE_FUNC inline RealScalar run(const Scalar& x, const Scalar& y) { + EIGEN_USING_STD(abs); + return positive_real_hypot(abs(x), abs(y)); + } +}; + +// Generic complex sqrt implementation that correctly handles corner cases +// according to https://en.cppreference.com/w/cpp/numeric/complex/sqrt +template +EIGEN_DEVICE_FUNC std::complex complex_sqrt(const std::complex& z) { + // Computes the principal sqrt of the input. + // + // For a complex square root of the number x + i*y. We want to find real + // numbers u and v such that + // (u + i*v)^2 = x + i*y <=> + // u^2 - v^2 + i*2*u*v = x + i*v. + // By equating the real and imaginary parts we get: + // u^2 - v^2 = x + // 2*u*v = y. + // + // For x >= 0, this has the numerically stable solution + // u = sqrt(0.5 * (x + sqrt(x^2 + y^2))) + // v = y / (2 * u) + // and for x < 0, + // v = sign(y) * sqrt(0.5 * (-x + sqrt(x^2 + y^2))) + // u = y / (2 * v) + // + // Letting w = sqrt(0.5 * (|x| + |z|)), + // if x == 0: u = w, v = sign(y) * w + // if x > 0: u = w, v = y / (2 * w) + // if x < 0: u = |y| / (2 * w), v = sign(y) * w + + const T x = numext::real(z); + const T y = numext::imag(z); + const T zero = T(0); + const T w = numext::sqrt(T(0.5) * (numext::abs(x) + numext::hypot(x, y))); + + return (numext::isinf)(y) ? std::complex(NumTraits::infinity(), y) + : numext::is_exactly_zero(x) ? std::complex(w, y < zero ? -w : w) + : x > zero ? std::complex(w, y / (2 * w)) + : std::complex(numext::abs(y) / (2 * w), y < zero ? -w : w); +} + +// Generic complex rsqrt implementation. +template +EIGEN_DEVICE_FUNC std::complex complex_rsqrt(const std::complex& z) { + // Computes the principal reciprocal sqrt of the input. + // + // For a complex reciprocal square root of the number z = x + i*y. We want to + // find real numbers u and v such that + // (u + i*v)^2 = 1 / (x + i*y) <=> + // u^2 - v^2 + i*2*u*v = x/|z|^2 - i*v/|z|^2. + // By equating the real and imaginary parts we get: + // u^2 - v^2 = x/|z|^2 + // 2*u*v = y/|z|^2. + // + // For x >= 0, this has the numerically stable solution + // u = sqrt(0.5 * (x + |z|)) / |z| + // v = -y / (2 * u * |z|) + // and for x < 0, + // v = -sign(y) * sqrt(0.5 * (-x + |z|)) / |z| + // u = -y / (2 * v * |z|) + // + // Letting w = sqrt(0.5 * (|x| + |z|)), + // if x == 0: u = w / |z|, v = -sign(y) * w / |z| + // if x > 0: u = w / |z|, v = -y / (2 * w * |z|) + // if x < 0: u = |y| / (2 * w * |z|), v = -sign(y) * w / |z| + + const T x = numext::real(z); + const T y = numext::imag(z); + const T zero = T(0); + + const T abs_z = numext::hypot(x, y); + const T w = numext::sqrt(T(0.5) * (numext::abs(x) + abs_z)); + const T woz = w / abs_z; + // Corner cases consistent with 1/sqrt(z) on gcc/clang. + return numext::is_exactly_zero(abs_z) ? std::complex(NumTraits::infinity(), NumTraits::quiet_NaN()) + : ((numext::isinf)(x) || (numext::isinf)(y)) ? std::complex(zero, zero) + : numext::is_exactly_zero(x) ? std::complex(woz, y < zero ? woz : -woz) + : x > zero ? std::complex(woz, -y / (2 * w * abs_z)) + : std::complex(numext::abs(y) / (2 * w * abs_z), y < zero ? woz : -woz); +} + +template +EIGEN_DEVICE_FUNC std::complex complex_log(const std::complex& z) { + // Computes complex log. + T a = numext::abs(z); + EIGEN_USING_STD(atan2); + T b = atan2(z.imag(), z.real()); + return std::complex(numext::log(a), b); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATHFUNCTIONSIMPL_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Matrix.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Matrix.h new file mode 100644 index 0000000000000000000000000000000000000000..1ea1a664d310145e25acb0598c221b1dce23528d --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Matrix.h @@ -0,0 +1,527 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2010 Benoit Jacob +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIX_H +#define EIGEN_MATRIX_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { +template +struct traits> { + private: + constexpr static int size = internal::size_at_compile_time(Rows_, Cols_); + typedef typename find_best_packet::type PacketScalar; + enum { + row_major_bit = Options_ & RowMajor ? RowMajorBit : 0, + is_dynamic_size_storage = MaxRows_ == Dynamic || MaxCols_ == Dynamic, + max_size = is_dynamic_size_storage ? Dynamic : MaxRows_ * MaxCols_, + default_alignment = compute_default_alignment::value, + actual_alignment = ((Options_ & DontAlign) == 0) ? default_alignment : 0, + required_alignment = unpacket_traits::alignment, + packet_access_bit = (packet_traits::Vectorizable && + (EIGEN_UNALIGNED_VECTORIZE || (int(actual_alignment) >= int(required_alignment)))) + ? PacketAccessBit + : 0 + }; + + public: + typedef Scalar_ Scalar; + typedef Dense StorageKind; + typedef Eigen::Index StorageIndex; + typedef MatrixXpr XprKind; + enum { + RowsAtCompileTime = Rows_, + ColsAtCompileTime = Cols_, + MaxRowsAtCompileTime = MaxRows_, + MaxColsAtCompileTime = MaxCols_, + Flags = compute_matrix_flags(Options_), + Options = Options_, + InnerStrideAtCompileTime = 1, + OuterStrideAtCompileTime = (int(Options) & int(RowMajor)) ? ColsAtCompileTime : RowsAtCompileTime, + + // FIXME, the following flag in only used to define NeedsToAlign in PlainObjectBase + EvaluatorFlags = LinearAccessBit | DirectAccessBit | packet_access_bit | row_major_bit, + Alignment = actual_alignment + }; +}; +} // namespace internal + +/** \class Matrix + * \ingroup Core_Module + * + * \brief The matrix class, also used for vectors and row-vectors + * + * The %Matrix class is the work-horse for all \em dense (\ref dense "note") matrices and vectors within Eigen. + * Vectors are matrices with one column, and row-vectors are matrices with one row. + * + * The %Matrix class encompasses \em both fixed-size and dynamic-size objects (\ref fixedsize "note"). + * + * The first three template parameters are required: + * \tparam Scalar_ Numeric type, e.g. float, double, int or std::complex. + * User defined scalar types are supported as well (see \ref user_defined_scalars "here"). + * \tparam Rows_ Number of rows, or \b Dynamic + * \tparam Cols_ Number of columns, or \b Dynamic + * + * The remaining template parameters are optional -- in most cases you don't have to worry about them. + * \tparam Options_ A combination of either \b #RowMajor or \b #ColMajor, and of either + * \b #AutoAlign or \b #DontAlign. + * The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter + * controls alignment, which is required for vectorization. It defaults to aligning matrices except for fixed sizes that + * aren't a multiple of the packet size. \tparam MaxRows_ Maximum number of rows. Defaults to \a Rows_ (\ref maxrows + * "note"). \tparam MaxCols_ Maximum number of columns. Defaults to \a Cols_ (\ref maxrows "note"). + * + * Eigen provides a number of typedefs covering the usual cases. Here are some examples: + * + * \li \c Matrix2d is a 2x2 square matrix of doubles (\c Matrix) + * \li \c Vector4f is a vector of 4 floats (\c Matrix) + * \li \c RowVector3i is a row-vector of 3 ints (\c Matrix) + * + * \li \c MatrixXf is a dynamic-size matrix of floats (\c Matrix) + * \li \c VectorXf is a dynamic-size vector of floats (\c Matrix) + * + * \li \c Matrix2Xf is a partially fixed-size (dynamic-size) matrix of floats (\c Matrix) + * \li \c MatrixX3d is a partially dynamic-size (fixed-size) matrix of double (\c Matrix) + * + * See \link matrixtypedefs this page \endlink for a complete list of predefined \em %Matrix and \em Vector typedefs. + * + * You can access elements of vectors and matrices using normal subscripting: + * + * \code + * Eigen::VectorXd v(10); + * v[0] = 0.1; + * v[1] = 0.2; + * v(0) = 0.3; + * v(1) = 0.4; + * + * Eigen::MatrixXi m(10, 10); + * m(0, 1) = 1; + * m(0, 2) = 2; + * m(0, 3) = 3; + * \endcode + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_MATRIX_PLUGIN. + * + * Some notes: + * + *
+ *
\anchor dense Dense versus sparse:
+ *
This %Matrix class handles dense, not sparse matrices and vectors. For sparse matrices and vectors, see the + * Sparse module. + * + * Dense matrices and vectors are plain usual arrays of coefficients. All the coefficients are stored, in an ordinary + * contiguous array. This is unlike Sparse matrices and vectors where the coefficients are stored as a list of nonzero + * coefficients.
+ * + *
\anchor fixedsize Fixed-size versus dynamic-size:
+ *
Fixed-size means that the numbers of rows and columns are known at compile-time. In this case, Eigen allocates + * the array of coefficients as a fixed-size array, as a class member. This makes sense for very small matrices, + * typically up to 4x4, sometimes up to 16x16. Larger matrices should be declared as dynamic-size even if one happens to + * know their size at compile-time. + * + * Dynamic-size means that the numbers of rows or columns are not necessarily known at compile-time. In this case they + * are runtime variables, and the array of coefficients is allocated dynamically on the heap. + * + * Note that \em dense matrices, be they Fixed-size or Dynamic-size, do not expand dynamically in the sense of + * a std::map. If you want this behavior, see the Sparse module.
+ * + *
\anchor maxrows MaxRows_ and MaxCols_:
+ *
In most cases, one just leaves these parameters to the default values. + * These parameters mean the maximum size of rows and columns that the matrix may have. They are useful in cases + * when the exact numbers of rows and columns are not known at compile-time, but it is known at compile-time that they + * cannot exceed a certain value. This happens when taking dynamic-size blocks inside fixed-size matrices: in this case + * MaxRows_ and MaxCols_ are the dimensions of the original matrix, while Rows_ and Cols_ are Dynamic.
+ *
+ * + * ABI and storage layout + * + * The table below summarizes the ABI of some possible Matrix instances which is fixed thorough the lifetime of Eigen 3. + * + * + * + * + * + * + *
Matrix typeEquivalent C structure
\code Matrix \endcode\code + * struct { + * T *data; // with (size_t(data)%EIGEN_MAX_ALIGN_BYTES)==0 + * Eigen::Index rows, cols; + * }; + * \endcode
\code + * Matrix + * Matrix \endcode\code + * struct { + * T *data; // with (size_t(data)%EIGEN_MAX_ALIGN_BYTES)==0 + * Eigen::Index size; + * }; + * \endcode
\code Matrix \endcode\code + * struct { + * T data[Rows*Cols]; // with (size_t(data)%A(Rows*Cols*sizeof(T)))==0 + * }; + * \endcode
\code Matrix \endcode\code + * struct { + * T data[MaxRows*MaxCols]; // with (size_t(data)%A(MaxRows*MaxCols*sizeof(T)))==0 + * Eigen::Index rows, cols; + * }; + * \endcode
+ * Note that in this table Rows, Cols, MaxRows and MaxCols are all positive integers. A(S) is defined to the largest + * possible power-of-two smaller to EIGEN_MAX_STATIC_ALIGN_BYTES. + * + * \see MatrixBase for the majority of the API methods for matrices, \ref TopicClassHierarchy, + * \ref TopicStorageOrders + */ + +template +class Matrix : public PlainObjectBase> { + public: + /** \brief Base class typedef. + * \sa PlainObjectBase + */ + typedef PlainObjectBase Base; + + enum { Options = Options_ }; + + EIGEN_DENSE_PUBLIC_INTERFACE(Matrix) + + typedef typename Base::PlainObject PlainObject; + + using Base::base; + using Base::coeffRef; + + /** + * \brief Assigns matrices to each other. + * + * \note This is a special case of the templated operator=. Its purpose is + * to prevent a default operator= from hiding the templated operator=. + * + * \callgraph + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Matrix& operator=(const Matrix& other) { return Base::_set(other); } + + /** \internal + * \brief Copies the value of the expression \a other into \c *this with automatic resizing. + * + * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), + * it will be initialized. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const DenseBase& other) { + return Base::_set(other); + } + + /* Here, doxygen failed to copy the brief information when using \copydoc */ + + /** + * \brief Copies the generic expression \a other into *this. + * \copydetails DenseBase::operator=(const EigenBase &other) + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const EigenBase& other) { + return Base::operator=(other); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const ReturnByValue& func) { + return Base::operator=(func); + } + + /** \brief Default constructor. + * + * For fixed-size matrices, does nothing. + * + * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix + * is called a null matrix. This constructor is the unique way to create null matrices: resizing + * a matrix to 0 is not supported. + * + * \sa resize(Index,Index) + */ +#if defined(EIGEN_INITIALIZE_COEFFS) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Matrix() { EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } +#else + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Matrix() = default; +#endif + /** \brief Move constructor */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Matrix(Matrix&&) = default; + /** \brief Moves the matrix into the other one. + * + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Matrix& operator=(Matrix&& other) + EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable::value) { + Base::operator=(std::move(other)); + return *this; + } + + /** \copydoc PlainObjectBase(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&... args) + * + * Example: \include Matrix_variadic_ctor_cxx11.cpp + * Output: \verbinclude Matrix_variadic_ctor_cxx11.out + * + * \sa Matrix(const std::initializer_list>&) + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, + const ArgTypes&... args) + : Base(a0, a1, a2, a3, args...) {} + + /** \brief Constructs a Matrix and initializes it from the coefficients given as initializer-lists grouped by row. + * \cpp11 + * + * In the general case, the constructor takes a list of rows, each row being represented as a list of coefficients: + * + * Example: \include Matrix_initializer_list_23_cxx11.cpp + * Output: \verbinclude Matrix_initializer_list_23_cxx11.out + * + * Each of the inner initializer lists must contain the exact same number of elements, otherwise an assertion is + * triggered. + * + * In the case of a compile-time column vector, implicit transposition from a single row is allowed. + * Therefore VectorXd{{1,2,3,4,5}} is legal and the more verbose syntax + * RowVectorXd{{1},{2},{3},{4},{5}} can be avoided: + * + * Example: \include Matrix_initializer_list_vector_cxx11.cpp + * Output: \verbinclude Matrix_initializer_list_vector_cxx11.out + * + * In the case of fixed-sized matrices, the initializer list sizes must exactly match the matrix sizes, + * and implicit transposition is allowed for compile-time vectors only. + * + * \sa Matrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) + */ + EIGEN_DEVICE_FUNC explicit constexpr EIGEN_STRONG_INLINE Matrix( + const std::initializer_list>& list) + : Base(list) {} + +#ifndef EIGEN_PARSED_BY_DOXYGEN + + // This constructor is for both 1x1 matrices and dynamic vectors + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Matrix(const T& x) { + Base::template _init1(x); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const T0& x, const T1& y) { + Base::template _init2(x, y); + } + +#else + /** \brief Constructs a fixed-sized matrix initialized with coefficients starting at \a data */ + EIGEN_DEVICE_FUNC explicit Matrix(const Scalar* data); + + /** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors + * + * This is useful for dynamic-size vectors. For fixed-size vectors, + * it is redundant to pass these parameters, so one should use the default constructor + * Matrix() instead. + * + * \warning This constructor is disabled for fixed-size \c 1x1 matrices. For instance, + * calling Matrix(1) will call the initialization constructor: Matrix(const Scalar&). + * For fixed-size \c 1x1 matrices it is therefore recommended to use the default + * constructor Matrix() instead, especially when using one of the non standard + * \c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\c NAN} macros (see \ref TopicPreprocessorDirectives). + */ + EIGEN_STRONG_INLINE explicit Matrix(Index dim); + /** \brief Constructs an initialized 1x1 matrix with the given coefficient + * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) */ + Matrix(const Scalar& x); + /** \brief Constructs an uninitialized matrix with \a rows rows and \a cols columns. + * + * This is useful for dynamic-size matrices. For fixed-size matrices, + * it is redundant to pass these parameters, so one should use the default constructor + * Matrix() instead. + * + * \warning This constructor is disabled for fixed-size \c 1x2 and \c 2x1 vectors. For instance, + * calling Matrix2f(2,1) will call the initialization constructor: Matrix(const Scalar& x, const Scalar& y). + * For fixed-size \c 1x2 or \c 2x1 vectors it is therefore recommended to use the default + * constructor Matrix() instead, especially when using one of the non standard + * \c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\c NAN} macros (see \ref TopicPreprocessorDirectives). + */ + EIGEN_DEVICE_FUNC Matrix(Index rows, Index cols); + + /** \brief Constructs an initialized 2D vector with given coefficients + * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) */ + Matrix(const Scalar& x, const Scalar& y); +#endif // end EIGEN_PARSED_BY_DOXYGEN + + /** \brief Constructs an initialized 3D vector with given coefficients + * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z) { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 3) + m_storage.data()[0] = x; + m_storage.data()[1] = y; + m_storage.data()[2] = z; + } + /** \brief Constructs an initialized 4D vector with given coefficients + * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w) { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 4) + m_storage.data()[0] = x; + m_storage.data()[1] = y; + m_storage.data()[2] = z; + m_storage.data()[3] = w; + } + + /** \brief Copy constructor */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Matrix(const Matrix&) = default; + + /** \brief Copy constructor for generic expressions. + * \sa MatrixBase::operator=(const EigenBase&) + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const EigenBase& other) : Base(other.derived()) {} + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { return 1; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { return this->innerSize(); } + + /////////// Geometry module /////////// + + template + EIGEN_DEVICE_FUNC explicit Matrix(const RotationBase& r); + template + EIGEN_DEVICE_FUNC Matrix& operator=(const RotationBase& r); + +// allow to extend Matrix outside Eigen +#ifdef EIGEN_MATRIX_PLUGIN +#include EIGEN_MATRIX_PLUGIN +#endif + + protected: + template + friend struct internal::conservative_resize_like_impl; + + using Base::m_storage; +}; + +/** \defgroup matrixtypedefs Global matrix typedefs + * + * \ingroup Core_Module + * + * %Eigen defines several typedef shortcuts for most common matrix and vector types. + * + * The general patterns are the following: + * + * \c MatrixSizeType where \c Size can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size, + * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd + * for complex double. + * + * For example, \c Matrix3d is a fixed-size 3x3 matrix type of doubles, and \c MatrixXf is a dynamic-size matrix of + * floats. + * + * There are also \c VectorSizeType and \c RowVectorSizeType which are self-explanatory. For example, \c Vector4cf is + * a fixed-size vector of 4 complex floats. + * + * With \cpp11, template alias are also defined for common sizes. + * They follow the same pattern as above except that the scalar type suffix is replaced by a + * template parameter, i.e.: + * - `MatrixSize` where `Size` can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size. + * - `MatrixXSize` and `MatrixSizeX` where `Size` can be \c 2,\c 3,\c 4 for hybrid dynamic/fixed matrices. + * - `VectorSize` and `RowVectorSize` for column and row vectors. + * + * With \cpp11, you can also use fully generic column and row vector types: `Vector` and + * `RowVector`. + * + * \sa class Matrix + */ + +#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ + /** \ingroup matrixtypedefs */ \ + /** \brief `Size`×`Size` matrix of type `Type`. */ \ + typedef Matrix Matrix##SizeSuffix##TypeSuffix; \ + /** \ingroup matrixtypedefs */ \ + /** \brief `Size`×`1` vector of type `Type`. */ \ + typedef Matrix Vector##SizeSuffix##TypeSuffix; \ + /** \ingroup matrixtypedefs */ \ + /** \brief `1`×`Size` vector of type `Type`. */ \ + typedef Matrix RowVector##SizeSuffix##TypeSuffix; + +#define EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \ + /** \ingroup matrixtypedefs */ \ + /** \brief `Size`×`Dynamic` matrix of type `Type`. */ \ + typedef Matrix Matrix##Size##X##TypeSuffix; \ + /** \ingroup matrixtypedefs */ \ + /** \brief `Dynamic`×`Size` matrix of type `Type`. */ \ + typedef Matrix Matrix##X##Size##TypeSuffix; + +#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ + EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \ + EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \ + EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \ + EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \ + EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \ + EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \ + EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 4) + +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex, cf) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex, cd) + +#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES +#undef EIGEN_MAKE_TYPEDEFS +#undef EIGEN_MAKE_FIXED_TYPEDEFS + +#define EIGEN_MAKE_TYPEDEFS(Size, SizeSuffix) \ + /** \ingroup matrixtypedefs */ \ + /** \brief \cpp11 `Size`×`Size` matrix of type `Type`.*/ \ + template \ + using Matrix##SizeSuffix = Matrix; \ + /** \ingroup matrixtypedefs */ \ + /** \brief \cpp11 `Size`×`1` vector of type `Type`.*/ \ + template \ + using Vector##SizeSuffix = Matrix; \ + /** \ingroup matrixtypedefs */ \ + /** \brief \cpp11 `1`×`Size` vector of type `Type`.*/ \ + template \ + using RowVector##SizeSuffix = Matrix; + +#define EIGEN_MAKE_FIXED_TYPEDEFS(Size) \ + /** \ingroup matrixtypedefs */ \ + /** \brief \cpp11 `Size`×`Dynamic` matrix of type `Type` */ \ + template \ + using Matrix##Size##X = Matrix; \ + /** \ingroup matrixtypedefs */ \ + /** \brief \cpp11 `Dynamic`×`Size` matrix of type `Type`. */ \ + template \ + using Matrix##X##Size = Matrix; + +EIGEN_MAKE_TYPEDEFS(2, 2) +EIGEN_MAKE_TYPEDEFS(3, 3) +EIGEN_MAKE_TYPEDEFS(4, 4) +EIGEN_MAKE_TYPEDEFS(Dynamic, X) +EIGEN_MAKE_FIXED_TYPEDEFS(2) +EIGEN_MAKE_FIXED_TYPEDEFS(3) +EIGEN_MAKE_FIXED_TYPEDEFS(4) + +/** \ingroup matrixtypedefs + * \brief \cpp11 `Size`×`1` vector of type `Type`. */ +template +using Vector = Matrix; + +/** \ingroup matrixtypedefs + * \brief \cpp11 `1`×`Size` vector of type `Type`. */ +template +using RowVector = Matrix; + +#undef EIGEN_MAKE_TYPEDEFS +#undef EIGEN_MAKE_FIXED_TYPEDEFS + +} // end namespace Eigen + +#endif // EIGEN_MATRIX_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/MatrixBase.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/MatrixBase.h new file mode 100644 index 0000000000000000000000000000000000000000..5466a57c420ee02b9df4f5e7a510dc9a6b694aef --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/MatrixBase.h @@ -0,0 +1,542 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2009 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIXBASE_H +#define EIGEN_MATRIXBASE_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +/** \class MatrixBase + * \ingroup Core_Module + * + * \brief Base class for all dense matrices, vectors, and expressions + * + * This class is the base that is inherited by all matrix, vector, and related expression + * types. Most of the Eigen API is contained in this class, and its base classes. Other important + * classes for the Eigen API are Matrix, and VectorwiseOp. + * + * Note that some methods are defined in other modules such as the \ref LU_Module LU module + * for all functions related to matrix inversions. + * + * \tparam Derived is the derived type, e.g. a matrix type, or an expression, etc. + * + * When writing a function taking Eigen objects as argument, if you want your function + * to take as argument any matrix, vector, or expression, just let it take a + * MatrixBase argument. As an example, here is a function printFirstRow which, given + * a matrix, vector, or expression \a x, prints the first row of \a x. + * + * \code + template + void printFirstRow(const Eigen::MatrixBase& x) + { + cout << x.row(0) << endl; + } + * \endcode + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_MATRIXBASE_PLUGIN. + * + * \sa \blank \ref TopicClassHierarchy + */ +template +class MatrixBase : public DenseBase { + public: +#ifndef EIGEN_PARSED_BY_DOXYGEN + typedef MatrixBase StorageBaseType; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::StorageIndex StorageIndex; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + + typedef DenseBase Base; + using Base::ColsAtCompileTime; + using Base::Flags; + using Base::IsVectorAtCompileTime; + using Base::MaxColsAtCompileTime; + using Base::MaxRowsAtCompileTime; + using Base::MaxSizeAtCompileTime; + using Base::RowsAtCompileTime; + using Base::SizeAtCompileTime; + + using Base::coeff; + using Base::coeffRef; + using Base::cols; + using Base::const_cast_derived; + using Base::derived; + using Base::eval; + using Base::lazyAssign; + using Base::rows; + using Base::size; + using Base::operator-; + using Base::operator+=; + using Base::operator-=; + using Base::operator*=; + using Base::operator/=; + + typedef typename Base::CoeffReturnType CoeffReturnType; + typedef typename Base::ConstTransposeReturnType ConstTransposeReturnType; + typedef typename Base::RowXpr RowXpr; + typedef typename Base::ColXpr ColXpr; +#endif // not EIGEN_PARSED_BY_DOXYGEN + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** type of the equivalent square matrix */ + typedef Matrix + SquareMatrixType; +#endif // not EIGEN_PARSED_BY_DOXYGEN + + /** \returns the size of the main diagonal, which is min(rows(),cols()). + * \sa rows(), cols(), SizeAtCompileTime. */ + EIGEN_DEVICE_FUNC inline Index diagonalSize() const { return (numext::mini)(rows(), cols()); } + + typedef typename Base::PlainObject PlainObject; + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal Represents a matrix with all coefficients equal to one another*/ + typedef CwiseNullaryOp, PlainObject> ConstantReturnType; + /** \internal the return type of MatrixBase::adjoint() */ + typedef std::conditional_t::IsComplex, + CwiseUnaryOp, ConstTransposeReturnType>, + ConstTransposeReturnType> + AdjointReturnType; + /** \internal Return type of eigenvalues() */ + typedef Matrix, internal::traits::ColsAtCompileTime, 1, ColMajor> + EigenvaluesReturnType; + /** \internal the return type of identity */ + typedef CwiseNullaryOp, PlainObject> IdentityReturnType; + /** \internal the return type of unit vectors */ + typedef Block, SquareMatrixType>, + internal::traits::RowsAtCompileTime, internal::traits::ColsAtCompileTime> + BasisReturnType; +#endif // not EIGEN_PARSED_BY_DOXYGEN + +#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::MatrixBase +#define EIGEN_DOC_UNARY_ADDONS(X, Y) +#include "../plugins/CommonCwiseBinaryOps.inc" +#include "../plugins/MatrixCwiseUnaryOps.inc" +#include "../plugins/MatrixCwiseBinaryOps.inc" +#ifdef EIGEN_MATRIXBASE_PLUGIN +#include EIGEN_MATRIXBASE_PLUGIN +#endif +#undef EIGEN_CURRENT_STORAGE_BASE_CLASS +#undef EIGEN_DOC_UNARY_ADDONS + + /** Special case of the template operator=, in order to prevent the compiler + * from generating a default operator= (issue hit with g++ 4.1) + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const MatrixBase& other); + + // We cannot inherit here via Base::operator= since it is causing + // trouble with MSVC. + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase& other); + + template + EIGEN_DEVICE_FUNC Derived& operator=(const EigenBase& other); + + template + EIGEN_DEVICE_FUNC Derived& operator=(const ReturnByValue& other); + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const MatrixBase& other); + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const MatrixBase& other); + + template + EIGEN_DEVICE_FUNC const Product operator*(const MatrixBase& other) const; + + template + EIGEN_DEVICE_FUNC const Product lazyProduct( + const MatrixBase& other) const; + + template + Derived& operator*=(const EigenBase& other); + + template + void applyOnTheLeft(const EigenBase& other); + + template + void applyOnTheRight(const EigenBase& other); + + template + EIGEN_DEVICE_FUNC const Product operator*( + const DiagonalBase& diagonal) const; + + template + EIGEN_DEVICE_FUNC const Product operator*( + const SkewSymmetricBase& skew) const; + + template + EIGEN_DEVICE_FUNC typename ScalarBinaryOpTraits::Scalar, + typename internal::traits::Scalar>::ReturnType + dot(const MatrixBase& other) const; + + EIGEN_DEVICE_FUNC RealScalar squaredNorm() const; + EIGEN_DEVICE_FUNC RealScalar norm() const; + RealScalar stableNorm() const; + RealScalar blueNorm() const; + RealScalar hypotNorm() const; + EIGEN_DEVICE_FUNC const PlainObject normalized() const; + EIGEN_DEVICE_FUNC const PlainObject stableNormalized() const; + EIGEN_DEVICE_FUNC void normalize(); + EIGEN_DEVICE_FUNC void stableNormalize(); + + EIGEN_DEVICE_FUNC const AdjointReturnType adjoint() const; + EIGEN_DEVICE_FUNC void adjointInPlace(); + + typedef Diagonal DiagonalReturnType; + EIGEN_DEVICE_FUNC DiagonalReturnType diagonal(); + + typedef Diagonal ConstDiagonalReturnType; + EIGEN_DEVICE_FUNC const ConstDiagonalReturnType diagonal() const; + + template + EIGEN_DEVICE_FUNC Diagonal diagonal(); + + template + EIGEN_DEVICE_FUNC const Diagonal diagonal() const; + + EIGEN_DEVICE_FUNC Diagonal diagonal(Index index); + EIGEN_DEVICE_FUNC const Diagonal diagonal(Index index) const; + + template + struct TriangularViewReturnType { + typedef TriangularView Type; + }; + template + struct ConstTriangularViewReturnType { + typedef const TriangularView Type; + }; + + template + EIGEN_DEVICE_FUNC typename TriangularViewReturnType::Type triangularView(); + template + EIGEN_DEVICE_FUNC typename ConstTriangularViewReturnType::Type triangularView() const; + + template + struct SelfAdjointViewReturnType { + typedef SelfAdjointView Type; + }; + template + struct ConstSelfAdjointViewReturnType { + typedef const SelfAdjointView Type; + }; + + template + EIGEN_DEVICE_FUNC typename SelfAdjointViewReturnType::Type selfadjointView(); + template + EIGEN_DEVICE_FUNC typename ConstSelfAdjointViewReturnType::Type selfadjointView() const; + + const SparseView sparseView( + const Scalar& m_reference = Scalar(0), + const typename NumTraits::Real& m_epsilon = NumTraits::dummy_precision()) const; + EIGEN_DEVICE_FUNC static const IdentityReturnType Identity(); + EIGEN_DEVICE_FUNC static const IdentityReturnType Identity(Index rows, Index cols); + EIGEN_DEVICE_FUNC static const BasisReturnType Unit(Index size, Index i); + EIGEN_DEVICE_FUNC static const BasisReturnType Unit(Index i); + EIGEN_DEVICE_FUNC static const BasisReturnType UnitX(); + EIGEN_DEVICE_FUNC static const BasisReturnType UnitY(); + EIGEN_DEVICE_FUNC static const BasisReturnType UnitZ(); + EIGEN_DEVICE_FUNC static const BasisReturnType UnitW(); + + EIGEN_DEVICE_FUNC const DiagonalWrapper asDiagonal() const; + const PermutationWrapper asPermutation() const; + EIGEN_DEVICE_FUNC const SkewSymmetricWrapper asSkewSymmetric() const; + + EIGEN_DEVICE_FUNC Derived& setIdentity(); + EIGEN_DEVICE_FUNC Derived& setIdentity(Index rows, Index cols); + EIGEN_DEVICE_FUNC Derived& setUnit(Index i); + EIGEN_DEVICE_FUNC Derived& setUnit(Index newSize, Index i); + + bool isIdentity(const RealScalar& prec = NumTraits::dummy_precision()) const; + bool isDiagonal(const RealScalar& prec = NumTraits::dummy_precision()) const; + + bool isUpperTriangular(const RealScalar& prec = NumTraits::dummy_precision()) const; + bool isLowerTriangular(const RealScalar& prec = NumTraits::dummy_precision()) const; + + bool isSkewSymmetric(const RealScalar& prec = NumTraits::dummy_precision()) const; + + template + bool isOrthogonal(const MatrixBase& other, + const RealScalar& prec = NumTraits::dummy_precision()) const; + bool isUnitary(const RealScalar& prec = NumTraits::dummy_precision()) const; + + /** \returns true if each coefficients of \c *this and \a other are all exactly equal. + * \warning When using floating point scalar values you probably should rather use a + * fuzzy comparison such as isApprox() + * \sa isApprox(), operator!= */ + template + EIGEN_DEVICE_FUNC inline bool operator==(const MatrixBase& other) const { + return (this->rows() == other.rows()) && (this->cols() == other.cols()) && cwiseEqual(other).all(); + } + + /** \returns true if at least one pair of coefficients of \c *this and \a other are not exactly equal to each other. + * \warning When using floating point scalar values you probably should rather use a + * fuzzy comparison such as isApprox() + * \sa isApprox(), operator== */ + template + EIGEN_DEVICE_FUNC inline bool operator!=(const MatrixBase& other) const { + return !(*this == other); + } + + NoAlias EIGEN_DEVICE_FUNC noalias(); + + // TODO forceAlignedAccess is temporarily disabled + // Need to find a nicer workaround. + inline const Derived& forceAlignedAccess() const { return derived(); } + inline Derived& forceAlignedAccess() { return derived(); } + template + inline const Derived& forceAlignedAccessIf() const { + return derived(); + } + template + inline Derived& forceAlignedAccessIf() { + return derived(); + } + + EIGEN_DEVICE_FUNC Scalar trace() const; + + template + EIGEN_DEVICE_FUNC RealScalar lpNorm() const; + + EIGEN_DEVICE_FUNC MatrixBase& matrix() { return *this; } + EIGEN_DEVICE_FUNC const MatrixBase& matrix() const { return *this; } + + /** \returns an \link Eigen::ArrayBase Array \endlink expression of this matrix + * \sa ArrayBase::matrix() */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ArrayWrapper array() { return ArrayWrapper(derived()); } + /** \returns a const \link Eigen::ArrayBase Array \endlink expression of this matrix + * \sa ArrayBase::matrix() */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const ArrayWrapper array() const { + return ArrayWrapper(derived()); + } + + /////////// LU module /////////// + + template + inline const FullPivLU fullPivLu() const; + template + inline const PartialPivLU partialPivLu() const; + + template + inline const PartialPivLU lu() const; + + EIGEN_DEVICE_FUNC inline const Inverse inverse() const; + + template + inline void computeInverseAndDetWithCheck( + ResultType& inverse, typename ResultType::Scalar& determinant, bool& invertible, + const RealScalar& absDeterminantThreshold = NumTraits::dummy_precision()) const; + + template + inline void computeInverseWithCheck( + ResultType& inverse, bool& invertible, + const RealScalar& absDeterminantThreshold = NumTraits::dummy_precision()) const; + + EIGEN_DEVICE_FUNC Scalar determinant() const; + + /////////// Cholesky module /////////// + + inline const LLT llt() const; + inline const LDLT ldlt() const; + + /////////// QR module /////////// + + inline const HouseholderQR householderQr() const; + template + inline const ColPivHouseholderQR colPivHouseholderQr() const; + template + inline const FullPivHouseholderQR fullPivHouseholderQr() const; + template + inline const CompleteOrthogonalDecomposition completeOrthogonalDecomposition() const; + + /////////// Eigenvalues module /////////// + + inline EigenvaluesReturnType eigenvalues() const; + inline RealScalar operatorNorm() const; + + /////////// SVD module /////////// + + template + inline JacobiSVD jacobiSvd() const; + template + EIGEN_DEPRECATED inline JacobiSVD jacobiSvd(unsigned int computationOptions) const; + + template + inline BDCSVD bdcSvd() const; + template + EIGEN_DEPRECATED inline BDCSVD bdcSvd(unsigned int computationOptions) const; + + /////////// Geometry module /////////// + + template + EIGEN_DEVICE_FUNC inline typename internal::cross_impl::return_type cross( + const MatrixBase& other) const; + + template + EIGEN_DEVICE_FUNC inline PlainObject cross3(const MatrixBase& other) const; + + EIGEN_DEVICE_FUNC inline PlainObject unitOrthogonal(void) const; + + EIGEN_DEPRECATED EIGEN_DEVICE_FUNC inline Matrix eulerAngles(Index a0, Index a1, Index a2) const; + + EIGEN_DEVICE_FUNC inline Matrix canonicalEulerAngles(Index a0, Index a1, Index a2) const; + + // put this as separate enum value to work around possible GCC 4.3 bug (?) + enum { + HomogeneousReturnTypeDirection = + ColsAtCompileTime == 1 && RowsAtCompileTime == 1 + ? ((internal::traits::Flags & RowMajorBit) == RowMajorBit ? Horizontal : Vertical) + : ColsAtCompileTime == 1 ? Vertical + : Horizontal + }; + typedef Homogeneous HomogeneousReturnType; + EIGEN_DEVICE_FUNC inline HomogeneousReturnType homogeneous() const; + + enum { SizeMinusOne = SizeAtCompileTime == Dynamic ? Dynamic : SizeAtCompileTime - 1 }; + typedef Block::ColsAtCompileTime == 1 ? SizeMinusOne : 1, + internal::traits::ColsAtCompileTime == 1 ? 1 : SizeMinusOne> + ConstStartMinusOne; + typedef EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(ConstStartMinusOne, Scalar, quotient) HNormalizedReturnType; + EIGEN_DEVICE_FUNC inline const HNormalizedReturnType hnormalized() const; + + ////////// Householder module /////////// + + EIGEN_DEVICE_FUNC void makeHouseholderInPlace(Scalar& tau, RealScalar& beta); + template + EIGEN_DEVICE_FUNC void makeHouseholder(EssentialPart& essential, Scalar& tau, RealScalar& beta) const; + template + EIGEN_DEVICE_FUNC void applyHouseholderOnTheLeft(const EssentialPart& essential, const Scalar& tau, + Scalar* workspace); + template + EIGEN_DEVICE_FUNC void applyHouseholderOnTheRight(const EssentialPart& essential, const Scalar& tau, + Scalar* workspace); + + ///////// Jacobi module ///////// + + template + EIGEN_DEVICE_FUNC void applyOnTheLeft(Index p, Index q, const JacobiRotation& j); + template + EIGEN_DEVICE_FUNC void applyOnTheRight(Index p, Index q, const JacobiRotation& j); + + ///////// SparseCore module ///////// + + template + EIGEN_STRONG_INLINE const typename SparseMatrixBase::template CwiseProductDenseReturnType::Type + cwiseProduct(const SparseMatrixBase& other) const { + return other.cwiseProduct(derived()); + } + + ///////// MatrixFunctions module ///////// + + typedef typename internal::stem_function::type StemFunction; +#define EIGEN_MATRIX_FUNCTION(ReturnType, Name, Description) \ + /** \returns an expression of the matrix Description of \c *this. \brief This function requires the unsupported MatrixFunctions module. To compute the \ + * coefficient-wise Description use ArrayBase::##Name . */ \ + const ReturnType Name() const; +#define EIGEN_MATRIX_FUNCTION_1(ReturnType, Name, Description, Argument) \ + /** \returns an expression of the matrix Description of \c *this. \brief This function requires the unsupported MatrixFunctions module. To compute the \ + * coefficient-wise Description use ArrayBase::##Name . */ \ + const ReturnType Name(Argument) const; + + EIGEN_MATRIX_FUNCTION(MatrixExponentialReturnValue, exp, exponential) + /** \brief Helper function for the unsupported + * MatrixFunctions module.*/ + const MatrixFunctionReturnValue matrixFunction(StemFunction f) const; + EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, cosh, hyperbolic cosine) + EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, sinh, hyperbolic sine) + EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, atanh, inverse hyperbolic cosine) + EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, acosh, inverse hyperbolic cosine) + EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, asinh, inverse hyperbolic sine) + EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, cos, cosine) + EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, sin, sine) + EIGEN_MATRIX_FUNCTION(MatrixSquareRootReturnValue, sqrt, square root) + EIGEN_MATRIX_FUNCTION(MatrixLogarithmReturnValue, log, logarithm) + EIGEN_MATRIX_FUNCTION_1(MatrixPowerReturnValue, pow, power to \c p, const RealScalar& p) + EIGEN_MATRIX_FUNCTION_1(MatrixComplexPowerReturnValue, pow, power to \c p, const std::complex& p) + + protected: + EIGEN_DEFAULT_COPY_CONSTRUCTOR(MatrixBase) + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MatrixBase) + + private: + EIGEN_DEVICE_FUNC explicit MatrixBase(int); + EIGEN_DEVICE_FUNC MatrixBase(int, int); + template + EIGEN_DEVICE_FUNC explicit MatrixBase(const MatrixBase&); + + protected: + // mixing arrays and matrices is not legal + template + Derived& operator+=(const ArrayBase&) { + EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar)) == -1, + YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); + return *this; + } + // mixing arrays and matrices is not legal + template + Derived& operator-=(const ArrayBase&) { + EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar)) == -1, + YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); + return *this; + } +}; + +/*************************************************************************** + * Implementation of matrix base methods + ***************************************************************************/ + +/** replaces \c *this by \c *this * \a other. + * + * \returns a reference to \c *this + * + * Example: \include MatrixBase_applyOnTheRight.cpp + * Output: \verbinclude MatrixBase_applyOnTheRight.out + */ +template +template +inline Derived& MatrixBase::operator*=(const EigenBase& other) { + other.derived().applyThisOnTheRight(derived()); + return derived(); +} + +/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=(). + * + * Example: \include MatrixBase_applyOnTheRight.cpp + * Output: \verbinclude MatrixBase_applyOnTheRight.out + */ +template +template +inline void MatrixBase::applyOnTheRight(const EigenBase& other) { + other.derived().applyThisOnTheRight(derived()); +} + +/** replaces \c *this by \a other * \c *this. + * + * Example: \include MatrixBase_applyOnTheLeft.cpp + * Output: \verbinclude MatrixBase_applyOnTheLeft.out + */ +template +template +inline void MatrixBase::applyOnTheLeft(const EigenBase& other) { + other.derived().applyThisOnTheLeft(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_MATRIXBASE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/NestByValue.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/NestByValue.h new file mode 100644 index 0000000000000000000000000000000000000000..ec360ebdea969ae427de3598dde06b06c3c8efaf --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/NestByValue.h @@ -0,0 +1,91 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NESTBYVALUE_H +#define EIGEN_NESTBYVALUE_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { +template +struct traits > : public traits { + enum { Flags = traits::Flags & ~NestByRefBit }; +}; +} // namespace internal + +/** \class NestByValue + * \ingroup Core_Module + * + * \brief Expression which must be nested by value + * + * \tparam ExpressionType the type of the object of which we are requiring nesting-by-value + * + * This class is the return type of MatrixBase::nestByValue() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::nestByValue() + */ +template +class NestByValue : public internal::dense_xpr_base >::type { + public: + typedef typename internal::dense_xpr_base::type Base; + static constexpr bool HasDirectAccess = internal::has_direct_access::ret; + + EIGEN_DENSE_PUBLIC_INTERFACE(NestByValue) + + EIGEN_DEVICE_FUNC explicit inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {} + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); } + + EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; } + + EIGEN_DEVICE_FUNC const ExpressionType& nestedExpression() const { return m_expression; } + + EIGEN_DEVICE_FUNC typename std::enable_if::type data() const { + return m_expression.data(); + } + + EIGEN_DEVICE_FUNC typename std::enable_if::type innerStride() const { + return m_expression.innerStride(); + } + + EIGEN_DEVICE_FUNC typename std::enable_if::type outerStride() const { + return m_expression.outerStride(); + } + + protected: + const ExpressionType m_expression; +}; + +/** \returns an expression of the temporary version of *this. + */ +template +EIGEN_DEVICE_FUNC inline const NestByValue DenseBase::nestByValue() const { + return NestByValue(derived()); +} + +namespace internal { + +// Evaluator of Solve -> eval into a temporary +template +struct evaluator > : public evaluator { + typedef evaluator Base; + + EIGEN_DEVICE_FUNC explicit evaluator(const NestByValue& xpr) : Base(xpr.nestedExpression()) {} +}; +} // namespace internal + +} // end namespace Eigen + +#endif // EIGEN_NESTBYVALUE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/NoAlias.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/NoAlias.h new file mode 100644 index 0000000000000000000000000000000000000000..b6c7209104ea8455c18e053990406242a3860753 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/NoAlias.h @@ -0,0 +1,102 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NOALIAS_H +#define EIGEN_NOALIAS_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +/** \class NoAlias + * \ingroup Core_Module + * + * \brief Pseudo expression providing an operator = assuming no aliasing + * + * \tparam ExpressionType the type of the object on which to do the lazy assignment + * + * This class represents an expression with special assignment operators + * assuming no aliasing between the target expression and the source expression. + * More precisely it alloas to bypass the EvalBeforeAssignBit flag of the source expression. + * It is the return type of MatrixBase::noalias() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::noalias() + */ +template class StorageBase> +class NoAlias { + public: + typedef typename ExpressionType::Scalar Scalar; + + EIGEN_DEVICE_FUNC explicit NoAlias(ExpressionType& expression) : m_expression(expression) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase& other) { + call_assignment_no_alias(m_expression, other.derived(), + internal::assign_op()); + return m_expression; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase& other) { + call_assignment_no_alias(m_expression, other.derived(), + internal::add_assign_op()); + return m_expression; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase& other) { + call_assignment_no_alias(m_expression, other.derived(), + internal::sub_assign_op()); + return m_expression; + } + + EIGEN_DEVICE_FUNC ExpressionType& expression() const { return m_expression; } + + protected: + ExpressionType& m_expression; +}; + +/** \returns a pseudo expression of \c *this with an operator= assuming + * no aliasing between \c *this and the source expression. + * + * More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag. + * Currently, even though several expressions may alias, only product + * expressions have this flag. Therefore, noalias() is only useful when + * the source expression contains a matrix product. + * + * Here are some examples where noalias is useful: + * \code + * D.noalias() = A * B; + * D.noalias() += A.transpose() * B; + * D.noalias() -= 2 * A * B.adjoint(); + * \endcode + * + * On the other hand the following example will lead to a \b wrong result: + * \code + * A.noalias() = A * B; + * \endcode + * because the result matrix A is also an operand of the matrix product. Therefore, + * there is no alternative than evaluating A * B in a temporary, that is the default + * behavior when you write: + * \code + * A = A * B; + * \endcode + * + * \sa class NoAlias + */ +template +NoAlias EIGEN_DEVICE_FUNC MatrixBase::noalias() { + return NoAlias(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_NOALIAS_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/NumTraits.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/NumTraits.h new file mode 100644 index 0000000000000000000000000000000000000000..67a6c08ae4473d0d608a6f97826ccdfada8b01fc --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/NumTraits.h @@ -0,0 +1,328 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NUMTRAITS_H +#define EIGEN_NUMTRAITS_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +// default implementation of digits(), based on numeric_limits if specialized, +// 0 for integer types, and log2(epsilon()) otherwise. +template ::is_specialized, + bool is_integer = NumTraits::IsInteger> +struct default_digits_impl { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static int run() { return std::numeric_limits::digits; } +}; + +template +struct default_digits_impl // Floating point +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static int run() { + using std::ceil; + using std::log2; + typedef typename NumTraits::Real Real; + return int(ceil(-log2(NumTraits::epsilon()))); + } +}; + +template +struct default_digits_impl // Integer +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static int run() { return 0; } +}; + +// default implementation of digits10(), based on numeric_limits if specialized, +// 0 for integer types, and floor((digits()-1)*log10(2)) otherwise. +template ::is_specialized, + bool is_integer = NumTraits::IsInteger> +struct default_digits10_impl { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static int run() { return std::numeric_limits::digits10; } +}; + +template +struct default_digits10_impl // Floating point +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static int run() { + using std::floor; + using std::log10; + typedef typename NumTraits::Real Real; + return int(floor((internal::default_digits_impl::run() - 1) * log10(2))); + } +}; + +template +struct default_digits10_impl // Integer +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static int run() { return 0; } +}; + +// default implementation of max_digits10(), based on numeric_limits if specialized, +// 0 for integer types, and log10(2) * digits() + 1 otherwise. +template ::is_specialized, + bool is_integer = NumTraits::IsInteger> +struct default_max_digits10_impl { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static int run() { return std::numeric_limits::max_digits10; } +}; + +template +struct default_max_digits10_impl // Floating point +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static int run() { + using std::ceil; + using std::log10; + typedef typename NumTraits::Real Real; + return int(ceil(internal::default_digits_impl::run() * log10(2) + 1)); + } +}; + +template +struct default_max_digits10_impl // Integer +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static int run() { return 0; } +}; + +} // end namespace internal + +namespace numext { +/** \internal bit-wise cast without changing the underlying bit representation. */ + +// TODO: Replace by std::bit_cast (available in C++20) +template +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Tgt bit_cast(const Src& src) { + // The behaviour of memcpy is not specified for non-trivially copyable types + EIGEN_STATIC_ASSERT(std::is_trivially_copyable::value, THIS_TYPE_IS_NOT_SUPPORTED) + EIGEN_STATIC_ASSERT(std::is_trivially_copyable::value && std::is_default_constructible::value, + THIS_TYPE_IS_NOT_SUPPORTED) + EIGEN_STATIC_ASSERT(sizeof(Src) == sizeof(Tgt), THIS_TYPE_IS_NOT_SUPPORTED) + + Tgt tgt; + // Load src into registers first. This allows the memcpy to be elided by CUDA. + const Src staged = src; + EIGEN_USING_STD(memcpy) + memcpy(static_cast(&tgt), static_cast(&staged), sizeof(Tgt)); + return tgt; +} +} // namespace numext + +/** \class NumTraits + * \ingroup Core_Module + * + * \brief Holds information about the various numeric (i.e. scalar) types allowed by Eigen. + * + * \tparam T the numeric type at hand + * + * This class stores enums, typedefs and static methods giving information about a numeric type. + * + * The provided data consists of: + * \li A typedef \c Real, giving the "real part" type of \a T. If \a T is already real, + * then \c Real is just a typedef to \a T. If \a T is \c std::complex then \c Real + * is a typedef to \a U. + * \li A typedef \c NonInteger, giving the type that should be used for operations producing non-integral values, + * such as quotients, square roots, etc. If \a T is a floating-point type, then this typedef just gives + * \a T again. Note however that many Eigen functions such as internal::sqrt simply refuse to + * take integers. Outside of a few cases, Eigen doesn't do automatic type promotion. Thus, this typedef is + * only intended as a helper for code that needs to explicitly promote types. + * \li A typedef \c Literal giving the type to use for numeric literals such as "2" or "0.5". For instance, for \c + * std::complex, Literal is defined as \c U. Of course, this type must be fully compatible with \a T. In doubt, just + * use \a T here. \li A typedef \a Nested giving the type to use to nest a value inside of the expression tree. If you + * don't know what this means, just use \a T here. \li An enum value \a IsComplex. It is equal to 1 if \a T is a \c + * std::complex type, and to 0 otherwise. \li An enum value \a IsInteger. It is equal to \c 1 if \a T is an integer type + * such as \c int, and to \c 0 otherwise. \li Enum values ReadCost, AddCost and MulCost representing a rough estimate of + * the number of CPU cycles needed to by move / add / mul instructions respectively, assuming the data is already stored + * in CPU registers. Stay vague here. No need to do architecture-specific stuff. If you don't know what this means, just + * use \c Eigen::HugeCost. \li An enum value \a IsSigned. It is equal to \c 1 if \a T is a signed type and to 0 if \a T + * is unsigned. \li An enum value \a RequireInitialization. It is equal to \c 1 if the constructor of the numeric type + * \a T must be called, and to 0 if it is safe not to call it. Default is 0 if \a T is an arithmetic type, and 1 + * otherwise. \li An epsilon() function which, unlike std::numeric_limits::epsilon(), it returns a + * \a Real instead of a \a T. \li A dummy_precision() function returning a weak epsilon value. It is mainly used as a + * default value by the fuzzy comparison operators. \li highest() and lowest() functions returning the highest and + * lowest possible values respectively. \li digits() function returning the number of radix digits (non-sign digits for + * integers, mantissa for floating-point). This is the analogue of std::numeric_limits::digits which is used + * as the default implementation if specialized. \li digits10() function returning the number of decimal digits that can + * be represented without change. This is the analogue of std::numeric_limits::digits10 which is + * used as the default implementation if specialized. \li max_digits10() function returning the number of decimal digits + * required to uniquely represent all distinct values of the type. This is the analogue of std::numeric_limits::max_digits10 + * which is used as the default implementation if specialized. + * \li min_exponent() and max_exponent() functions returning the highest and lowest possible values, respectively, + * such that the radix raised to the power exponent-1 is a normalized floating-point number. These are equivalent + * to std::numeric_limits::min_exponent/ + * std::numeric_limits::max_exponent. + * \li infinity() function returning a representation of positive infinity, if available. + * \li quiet_NaN function returning a non-signaling "not-a-number", if available. + */ + +template +struct GenericNumTraits { + enum { + IsInteger = std::numeric_limits::is_integer, + IsSigned = std::numeric_limits::is_signed, + IsComplex = 0, + RequireInitialization = internal::is_arithmetic::value ? 0 : 1, + ReadCost = 1, + AddCost = 1, + MulCost = 1 + }; + + typedef T Real; + typedef std::conditional_t, T> NonInteger; + typedef T Nested; + typedef T Literal; + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline Real epsilon() { return numext::numeric_limits::epsilon(); } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline int digits10() { return internal::default_digits10_impl::run(); } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline int max_digits10() { + return internal::default_max_digits10_impl::run(); + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline int digits() { return internal::default_digits_impl::run(); } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline int min_exponent() { return numext::numeric_limits::min_exponent; } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline int max_exponent() { return numext::numeric_limits::max_exponent; } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline Real dummy_precision() { + // make sure to override this for floating-point types + return Real(0); + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline T highest() { return (numext::numeric_limits::max)(); } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline T lowest() { return (numext::numeric_limits::lowest)(); } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline T infinity() { return numext::numeric_limits::infinity(); } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline T quiet_NaN() { return numext::numeric_limits::quiet_NaN(); } +}; + +template +struct NumTraits : GenericNumTraits {}; + +template <> +struct NumTraits : GenericNumTraits { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline float dummy_precision() { return 1e-5f; } +}; + +template <> +struct NumTraits : GenericNumTraits { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline double dummy_precision() { return 1e-12; } +}; + +// GPU devices treat `long double` as `double`. +#ifndef EIGEN_GPU_COMPILE_PHASE +template <> +struct NumTraits : GenericNumTraits { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline long double dummy_precision() { + return static_cast(1e-15l); + } + +#if defined(EIGEN_ARCH_PPC) && (__LDBL_MANT_DIG__ == 106) + // PowerPC double double causes issues with some values + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline long double epsilon() { + // 2^(-(__LDBL_MANT_DIG__)+1) + return static_cast(2.4651903288156618919116517665087e-32l); + } +#endif +}; +#endif + +template +struct NumTraits > : GenericNumTraits > { + typedef Real_ Real; + typedef typename NumTraits::Literal Literal; + enum { + IsComplex = 1, + IsSigned = NumTraits::IsSigned, + RequireInitialization = NumTraits::RequireInitialization, + ReadCost = 2 * NumTraits::ReadCost, + AddCost = 2 * NumTraits::AddCost, + MulCost = 4 * NumTraits::MulCost + 2 * NumTraits::AddCost + }; + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline Real epsilon() { return NumTraits::epsilon(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline Real dummy_precision() { return NumTraits::dummy_precision(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline int digits10() { return NumTraits::digits10(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline int max_digits10() { return NumTraits::max_digits10(); } +}; + +template +struct NumTraits > { + typedef Array ArrayType; + typedef typename NumTraits::Real RealScalar; + typedef Array Real; + typedef typename NumTraits::NonInteger NonIntegerScalar; + typedef Array NonInteger; + typedef ArrayType& Nested; + typedef typename NumTraits::Literal Literal; + + enum { + IsComplex = NumTraits::IsComplex, + IsInteger = NumTraits::IsInteger, + IsSigned = NumTraits::IsSigned, + RequireInitialization = 1, + ReadCost = ArrayType::SizeAtCompileTime == Dynamic + ? HugeCost + : ArrayType::SizeAtCompileTime * int(NumTraits::ReadCost), + AddCost = ArrayType::SizeAtCompileTime == Dynamic ? HugeCost + : ArrayType::SizeAtCompileTime * int(NumTraits::AddCost), + MulCost = ArrayType::SizeAtCompileTime == Dynamic ? HugeCost + : ArrayType::SizeAtCompileTime * int(NumTraits::MulCost) + }; + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline RealScalar epsilon() { return NumTraits::epsilon(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline RealScalar dummy_precision() { + return NumTraits::dummy_precision(); + } + + EIGEN_CONSTEXPR + static inline int digits10() { return NumTraits::digits10(); } + EIGEN_CONSTEXPR + static inline int max_digits10() { return NumTraits::max_digits10(); } +}; + +template <> +struct NumTraits : GenericNumTraits { + enum { RequireInitialization = 1, ReadCost = HugeCost, AddCost = HugeCost, MulCost = HugeCost }; + + EIGEN_CONSTEXPR + static inline int digits10() { return 0; } + EIGEN_CONSTEXPR + static inline int max_digits10() { return 0; } + + private: + static inline std::string epsilon(); + static inline std::string dummy_precision(); + static inline std::string lowest(); + static inline std::string highest(); + static inline std::string infinity(); + static inline std::string quiet_NaN(); +}; + +// Empty specialization for void to allow template specialization based on NumTraits::Real with T==void and SFINAE. +template <> +struct NumTraits {}; + +template <> +struct NumTraits : GenericNumTraits {}; + +} // end namespace Eigen + +#endif // EIGEN_NUMTRAITS_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/PartialReduxEvaluator.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/PartialReduxEvaluator.h new file mode 100644 index 0000000000000000000000000000000000000000..7b2c8dca38f1ac941819e1d5f1b50ba91f63c4bf --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/PartialReduxEvaluator.h @@ -0,0 +1,209 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011-2018 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PARTIALREDUX_H +#define EIGEN_PARTIALREDUX_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +/*************************************************************************** + * + * This file provides evaluators for partial reductions. + * There are two modes: + * + * - scalar path: simply calls the respective function on the column or row. + * -> nothing special here, all the tricky part is handled by the return + * types of VectorwiseOp's members. They embed the functor calling the + * respective DenseBase's member function. + * + * - vectorized path: implements a packet-wise reductions followed by + * some (optional) processing of the outcome, e.g., division by n for mean. + * + * For the vectorized path let's observe that the packet-size and outer-unrolling + * are both decided by the assignment logic. So all we have to do is to decide + * on the inner unrolling. + * + * For the unrolling, we can reuse "internal::redux_vec_unroller" from Redux.h, + * but be need to be careful to specify correct increment. + * + ***************************************************************************/ + +/* logic deciding a strategy for unrolling of vectorized paths */ +template +struct packetwise_redux_traits { + enum { + OuterSize = int(Evaluator::IsRowMajor) ? Evaluator::RowsAtCompileTime : Evaluator::ColsAtCompileTime, + Cost = OuterSize == Dynamic ? HugeCost + : OuterSize * Evaluator::CoeffReadCost + (OuterSize - 1) * functor_traits::Cost, + Unrolling = Cost <= EIGEN_UNROLLING_LIMIT ? CompleteUnrolling : NoUnrolling + }; +}; + +/* Value to be returned when size==0 , by default let's return 0 */ +template +EIGEN_DEVICE_FUNC PacketType packetwise_redux_empty_value(const Func&) { + const typename unpacket_traits::type zero(0); + return pset1(zero); +} + +/* For products the default is 1 */ +template +EIGEN_DEVICE_FUNC PacketType packetwise_redux_empty_value(const scalar_product_op&) { + return pset1(Scalar(1)); +} + +/* Perform the actual reduction */ +template ::Unrolling> +struct packetwise_redux_impl; + +/* Perform the actual reduction with unrolling */ +template +struct packetwise_redux_impl { + typedef redux_novec_unroller Base; + typedef typename Evaluator::Scalar Scalar; + + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func& func, Index /*size*/) { + return redux_vec_unroller::OuterSize>::template run(eval, + func); + } +}; + +/* Add a specialization of redux_vec_unroller for size==0 at compiletime. + * This specialization is not required for general reductions, which is + * why it is defined here. + */ +template +struct redux_vec_unroller { + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator&, const Func& f) { + return packetwise_redux_empty_value(f); + } +}; + +/* Perform the actual reduction for dynamic sizes */ +template +struct packetwise_redux_impl { + typedef typename Evaluator::Scalar Scalar; + typedef typename redux_traits::PacketType PacketScalar; + + template + EIGEN_DEVICE_FUNC static PacketType run(const Evaluator& eval, const Func& func, Index size) { + if (size == 0) return packetwise_redux_empty_value(func); + + const Index size4 = (size - 1) & (~3); + PacketType p = eval.template packetByOuterInner(0, 0); + Index i = 1; + // This loop is optimized for instruction pipelining: + // - each iteration generates two independent instructions + // - thanks to branch prediction and out-of-order execution we have independent instructions across loops + for (; i < size4; i += 4) + p = func.packetOp( + p, func.packetOp(func.packetOp(eval.template packetByOuterInner(i + 0, 0), + eval.template packetByOuterInner(i + 1, 0)), + func.packetOp(eval.template packetByOuterInner(i + 2, 0), + eval.template packetByOuterInner(i + 3, 0)))); + for (; i < size; ++i) p = func.packetOp(p, eval.template packetByOuterInner(i, 0)); + return p; + } +}; + +template +struct evaluator > + : evaluator_base > { + typedef PartialReduxExpr XprType; + typedef typename internal::nested_eval::type ArgTypeNested; + typedef add_const_on_value_type_t ConstArgTypeNested; + typedef internal::remove_all_t ArgTypeNestedCleaned; + typedef typename ArgType::Scalar InputScalar; + typedef typename XprType::Scalar Scalar; + enum { + TraversalSize = Direction == int(Vertical) ? int(ArgType::RowsAtCompileTime) : int(ArgType::ColsAtCompileTime) + }; + typedef typename MemberOp::template Cost CostOpType; + enum { + CoeffReadCost = TraversalSize == Dynamic ? HugeCost + : TraversalSize == 0 + ? 1 + : int(TraversalSize) * int(evaluator::CoeffReadCost) + int(CostOpType::value), + + ArgFlags_ = evaluator::Flags, + + Vectorizable_ = bool(int(ArgFlags_) & PacketAccessBit) && bool(MemberOp::Vectorizable) && + (Direction == int(Vertical) ? bool(ArgFlags_ & RowMajorBit) : (ArgFlags_ & RowMajorBit) == 0) && + (TraversalSize != 0), + + Flags = (traits::Flags & RowMajorBit) | (evaluator::Flags & (HereditaryBits & (~RowMajorBit))) | + (Vectorizable_ ? PacketAccessBit : 0) | LinearAccessBit, + + Alignment = 0 // FIXME this will need to be improved once PartialReduxExpr is vectorized + }; + + EIGEN_DEVICE_FUNC explicit evaluator(const XprType xpr) : m_arg(xpr.nestedExpression()), m_functor(xpr.functor()) { + EIGEN_INTERNAL_CHECK_COST_VALUE(TraversalSize == Dynamic ? HugeCost + : (TraversalSize == 0 ? 1 : int(CostOpType::value))); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index i, Index j) const { + return coeff(Direction == Vertical ? j : i); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index index) const { + return m_functor(m_arg.template subVector(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index i, Index j) const { + return packet(Direction == Vertical ? j : i); + } + + template + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC PacketType packet(Index idx) const { + enum { PacketSize = internal::unpacket_traits::size }; + typedef Block + PanelType; + + PanelType panel(m_arg, Direction == Vertical ? 0 : idx, Direction == Vertical ? idx : 0, + Direction == Vertical ? m_arg.rows() : Index(PacketSize), + Direction == Vertical ? Index(PacketSize) : m_arg.cols()); + + // FIXME + // See bug 1612, currently if PacketSize==1 (i.e. complex with 128bits registers) then the storage-order of + // panel get reversed and methods like packetByOuterInner do not make sense anymore in this context. So let's just + // by pass "vectorization" in this case: + if (PacketSize == 1) return internal::pset1(coeff(idx)); + + typedef typename internal::redux_evaluator PanelEvaluator; + PanelEvaluator panel_eval(panel); + typedef typename MemberOp::BinaryOp BinaryOp; + PacketType p = internal::packetwise_redux_impl::template run( + panel_eval, m_functor.binaryFunc(), m_arg.outerSize()); + return p; + } + + protected: + ConstArgTypeNested m_arg; + const MemberOp m_functor; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PARTIALREDUX_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/PermutationMatrix.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/PermutationMatrix.h new file mode 100644 index 0000000000000000000000000000000000000000..4748b118a688162e5b9c6ff02d537672c05e806d --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/PermutationMatrix.h @@ -0,0 +1,552 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Benoit Jacob +// Copyright (C) 2009-2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PERMUTATIONMATRIX_H +#define EIGEN_PERMUTATIONMATRIX_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +enum PermPermProduct_t { PermPermProduct }; + +} // end namespace internal + +/** \class PermutationBase + * \ingroup Core_Module + * + * \brief Base class for permutations + * + * \tparam Derived the derived class + * + * This class is the base class for all expressions representing a permutation matrix, + * internally stored as a vector of integers. + * The convention followed here is that if \f$ \sigma \f$ is a permutation, the corresponding permutation matrix + * \f$ P_\sigma \f$ is such that if \f$ (e_1,\ldots,e_p) \f$ is the canonical basis, we have: + * \f[ P_\sigma(e_i) = e_{\sigma(i)}. \f] + * This convention ensures that for any two permutations \f$ \sigma, \tau \f$, we have: + * \f[ P_{\sigma\circ\tau} = P_\sigma P_\tau. \f] + * + * Permutation matrices are square and invertible. + * + * Notice that in addition to the member functions and operators listed here, there also are non-member + * operator* to multiply any kind of permutation object with any kind of matrix expression (MatrixBase) + * on either side. + * + * \sa class PermutationMatrix, class PermutationWrapper + */ +template +class PermutationBase : public EigenBase { + typedef internal::traits Traits; + typedef EigenBase Base; + + public: +#ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Traits::IndicesType IndicesType; + enum { + Flags = Traits::Flags, + RowsAtCompileTime = Traits::RowsAtCompileTime, + ColsAtCompileTime = Traits::ColsAtCompileTime, + MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = Traits::MaxColsAtCompileTime + }; + typedef typename Traits::StorageIndex StorageIndex; + typedef Matrix + DenseMatrixType; + typedef PermutationMatrix + PlainPermutationType; + typedef PlainPermutationType PlainObject; + using Base::derived; + typedef Inverse InverseReturnType; + typedef void Scalar; +#endif + + /** Copies the other permutation into *this */ + template + Derived& operator=(const PermutationBase& other) { + indices() = other.indices(); + return derived(); + } + + /** Assignment from the Transpositions \a tr */ + template + Derived& operator=(const TranspositionsBase& tr) { + setIdentity(tr.size()); + for (Index k = size() - 1; k >= 0; --k) applyTranspositionOnTheRight(k, tr.coeff(k)); + return derived(); + } + + /** \returns the number of rows */ + inline EIGEN_DEVICE_FUNC Index rows() const { return Index(indices().size()); } + + /** \returns the number of columns */ + inline EIGEN_DEVICE_FUNC Index cols() const { return Index(indices().size()); } + + /** \returns the size of a side of the respective square matrix, i.e., the number of indices */ + inline EIGEN_DEVICE_FUNC Index size() const { return Index(indices().size()); } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + template + void evalTo(MatrixBase& other) const { + other.setZero(); + for (Index i = 0; i < rows(); ++i) other.coeffRef(indices().coeff(i), i) = typename DenseDerived::Scalar(1); + } +#endif + + /** \returns a Matrix object initialized from this permutation matrix. Notice that it + * is inefficient to return this Matrix object by value. For efficiency, favor using + * the Matrix constructor taking EigenBase objects. + */ + DenseMatrixType toDenseMatrix() const { return derived(); } + + /** const version of indices(). */ + const IndicesType& indices() const { return derived().indices(); } + /** \returns a reference to the stored array representing the permutation. */ + IndicesType& indices() { return derived().indices(); } + + /** Resizes to given size. + */ + inline void resize(Index newSize) { indices().resize(newSize); } + + /** Sets *this to be the identity permutation matrix */ + void setIdentity() { + StorageIndex n = StorageIndex(size()); + for (StorageIndex i = 0; i < n; ++i) indices().coeffRef(i) = i; + } + + /** Sets *this to be the identity permutation matrix of given size. + */ + void setIdentity(Index newSize) { + resize(newSize); + setIdentity(); + } + + /** Multiplies *this by the transposition \f$(ij)\f$ on the left. + * + * \returns a reference to *this. + * + * \warning This is much slower than applyTranspositionOnTheRight(Index,Index): + * this has linear complexity and requires a lot of branching. + * + * \sa applyTranspositionOnTheRight(Index,Index) + */ + Derived& applyTranspositionOnTheLeft(Index i, Index j) { + eigen_assert(i >= 0 && j >= 0 && i < size() && j < size()); + for (Index k = 0; k < size(); ++k) { + if (indices().coeff(k) == i) + indices().coeffRef(k) = StorageIndex(j); + else if (indices().coeff(k) == j) + indices().coeffRef(k) = StorageIndex(i); + } + return derived(); + } + + /** Multiplies *this by the transposition \f$(ij)\f$ on the right. + * + * \returns a reference to *this. + * + * This is a fast operation, it only consists in swapping two indices. + * + * \sa applyTranspositionOnTheLeft(Index,Index) + */ + Derived& applyTranspositionOnTheRight(Index i, Index j) { + eigen_assert(i >= 0 && j >= 0 && i < size() && j < size()); + std::swap(indices().coeffRef(i), indices().coeffRef(j)); + return derived(); + } + + /** \returns the inverse permutation matrix. + * + * \note \blank \note_try_to_help_rvo + */ + inline InverseReturnType inverse() const { return InverseReturnType(derived()); } + /** \returns the transpose permutation matrix. + * + * \note \blank \note_try_to_help_rvo + */ + inline InverseReturnType transpose() const { return InverseReturnType(derived()); } + + /**** multiplication helpers to hopefully get RVO ****/ + +#ifndef EIGEN_PARSED_BY_DOXYGEN + protected: + template + void assignTranspose(const PermutationBase& other) { + for (Index i = 0; i < rows(); ++i) indices().coeffRef(other.indices().coeff(i)) = i; + } + template + void assignProduct(const Lhs& lhs, const Rhs& rhs) { + eigen_assert(lhs.cols() == rhs.rows()); + for (Index i = 0; i < rows(); ++i) indices().coeffRef(i) = lhs.indices().coeff(rhs.indices().coeff(i)); + } +#endif + + public: + /** \returns the product permutation matrix. + * + * \note \blank \note_try_to_help_rvo + */ + template + inline PlainPermutationType operator*(const PermutationBase& other) const { + return PlainPermutationType(internal::PermPermProduct, derived(), other.derived()); + } + + /** \returns the product of a permutation with another inverse permutation. + * + * \note \blank \note_try_to_help_rvo + */ + template + inline PlainPermutationType operator*(const InverseImpl& other) const { + return PlainPermutationType(internal::PermPermProduct, *this, other.eval()); + } + + /** \returns the product of an inverse permutation with another permutation. + * + * \note \blank \note_try_to_help_rvo + */ + template + friend inline PlainPermutationType operator*(const InverseImpl& other, + const PermutationBase& perm) { + return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); + } + + /** \returns the determinant of the permutation matrix, which is either 1 or -1 depending on the parity of the + * permutation. + * + * This function is O(\c n) procedure allocating a buffer of \c n booleans. + */ + Index determinant() const { + Index res = 1; + Index n = size(); + Matrix mask(n); + mask.fill(false); + Index r = 0; + while (r < n) { + // search for the next seed + while (r < n && mask[r]) r++; + if (r >= n) break; + // we got one, let's follow it until we are back to the seed + Index k0 = r++; + mask.coeffRef(k0) = true; + for (Index k = indices().coeff(k0); k != k0; k = indices().coeff(k)) { + mask.coeffRef(k) = true; + res = -res; + } + } + return res; + } + + protected: +}; + +namespace internal { +template +struct traits > + : traits< + Matrix > { + typedef PermutationStorage StorageKind; + typedef Matrix IndicesType; + typedef StorageIndex_ StorageIndex; + typedef void Scalar; +}; +} // namespace internal + +/** \class PermutationMatrix + * \ingroup Core_Module + * + * \brief Permutation matrix + * + * \tparam SizeAtCompileTime the number of rows/cols, or Dynamic + * \tparam MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to + * SizeAtCompileTime. Most of the time, you should not have to specify it. \tparam StorageIndex_ the integer type of the + * indices + * + * This class represents a permutation matrix, internally stored as a vector of integers. + * + * \sa class PermutationBase, class PermutationWrapper, class DiagonalMatrix + */ +template +class PermutationMatrix + : public PermutationBase > { + typedef PermutationBase Base; + typedef internal::traits Traits; + + public: + typedef const PermutationMatrix& Nested; + +#ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Traits::IndicesType IndicesType; + typedef typename Traits::StorageIndex StorageIndex; +#endif + + inline PermutationMatrix() {} + + /** Constructs an uninitialized permutation matrix of given size. + */ + explicit inline PermutationMatrix(Index size) : m_indices(size) { + eigen_internal_assert(size <= NumTraits::highest()); + } + + /** Copy constructor. */ + template + inline PermutationMatrix(const PermutationBase& other) : m_indices(other.indices()) {} + + /** Generic constructor from expression of the indices. The indices + * array has the meaning that the permutations sends each integer i to indices[i]. + * + * \warning It is your responsibility to check that the indices array that you passes actually + * describes a permutation, i.e., each value between 0 and n-1 occurs exactly once, where n is the + * array's size. + */ + template + explicit inline PermutationMatrix(const MatrixBase& indices) : m_indices(indices) {} + + /** Convert the Transpositions \a tr to a permutation matrix */ + template + explicit PermutationMatrix(const TranspositionsBase& tr) : m_indices(tr.size()) { + *this = tr; + } + + /** Copies the other permutation into *this */ + template + PermutationMatrix& operator=(const PermutationBase& other) { + m_indices = other.indices(); + return *this; + } + + /** Assignment from the Transpositions \a tr */ + template + PermutationMatrix& operator=(const TranspositionsBase& tr) { + return Base::operator=(tr.derived()); + } + + /** const version of indices(). */ + const IndicesType& indices() const { return m_indices; } + /** \returns a reference to the stored array representing the permutation. */ + IndicesType& indices() { return m_indices; } + + /**** multiplication helpers to hopefully get RVO ****/ + +#ifndef EIGEN_PARSED_BY_DOXYGEN + template + PermutationMatrix(const InverseImpl& other) + : m_indices(other.derived().nestedExpression().size()) { + eigen_internal_assert(m_indices.size() <= NumTraits::highest()); + StorageIndex end = StorageIndex(m_indices.size()); + for (StorageIndex i = 0; i < end; ++i) + m_indices.coeffRef(other.derived().nestedExpression().indices().coeff(i)) = i; + } + template + PermutationMatrix(internal::PermPermProduct_t, const Lhs& lhs, const Rhs& rhs) : m_indices(lhs.indices().size()) { + Base::assignProduct(lhs, rhs); + } +#endif + + protected: + IndicesType m_indices; +}; + +namespace internal { +template +struct traits, PacketAccess_> > + : traits< + Matrix > { + typedef PermutationStorage StorageKind; + typedef Map, PacketAccess_> IndicesType; + typedef StorageIndex_ StorageIndex; + typedef void Scalar; +}; +} // namespace internal + +template +class Map, PacketAccess_> + : public PermutationBase< + Map, PacketAccess_> > { + typedef PermutationBase Base; + typedef internal::traits Traits; + + public: +#ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Traits::IndicesType IndicesType; + typedef typename IndicesType::Scalar StorageIndex; +#endif + + inline Map(const StorageIndex* indicesPtr) : m_indices(indicesPtr) {} + + inline Map(const StorageIndex* indicesPtr, Index size) : m_indices(indicesPtr, size) {} + + /** Copies the other permutation into *this */ + template + Map& operator=(const PermutationBase& other) { + return Base::operator=(other.derived()); + } + + /** Assignment from the Transpositions \a tr */ + template + Map& operator=(const TranspositionsBase& tr) { + return Base::operator=(tr.derived()); + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + Map& operator=(const Map& other) { + m_indices = other.m_indices; + return *this; + } +#endif + + /** const version of indices(). */ + const IndicesType& indices() const { return m_indices; } + /** \returns a reference to the stored array representing the permutation. */ + IndicesType& indices() { return m_indices; } + + protected: + IndicesType m_indices; +}; + +template +class TranspositionsWrapper; +namespace internal { +template +struct traits > { + typedef PermutationStorage StorageKind; + typedef void Scalar; + typedef typename IndicesType_::Scalar StorageIndex; + typedef IndicesType_ IndicesType; + enum { + RowsAtCompileTime = IndicesType_::SizeAtCompileTime, + ColsAtCompileTime = IndicesType_::SizeAtCompileTime, + MaxRowsAtCompileTime = IndicesType::MaxSizeAtCompileTime, + MaxColsAtCompileTime = IndicesType::MaxSizeAtCompileTime, + Flags = 0 + }; +}; +} // namespace internal + +/** \class PermutationWrapper + * \ingroup Core_Module + * + * \brief Class to view a vector of integers as a permutation matrix + * + * \tparam IndicesType_ the type of the vector of integer (can be any compatible expression) + * + * This class allows to view any vector expression of integers as a permutation matrix. + * + * \sa class PermutationBase, class PermutationMatrix + */ +template +class PermutationWrapper : public PermutationBase > { + typedef PermutationBase Base; + typedef internal::traits Traits; + + public: +#ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Traits::IndicesType IndicesType; +#endif + + inline PermutationWrapper(const IndicesType& indices) : m_indices(indices) {} + + /** const version of indices(). */ + const internal::remove_all_t& indices() const { return m_indices; } + + protected: + typename IndicesType::Nested m_indices; +}; + +/** \returns the matrix with the permutation applied to the columns. + */ +template +EIGEN_DEVICE_FUNC const Product operator*( + const MatrixBase& matrix, const PermutationBase& permutation) { + return Product(matrix.derived(), permutation.derived()); +} + +/** \returns the matrix with the permutation applied to the rows. + */ +template +EIGEN_DEVICE_FUNC const Product operator*( + const PermutationBase& permutation, const MatrixBase& matrix) { + return Product(permutation.derived(), matrix.derived()); +} + +template +class InverseImpl : public EigenBase > { + typedef typename PermutationType::PlainPermutationType PlainPermutationType; + typedef internal::traits PermTraits; + + protected: + InverseImpl() {} + + public: + typedef Inverse InverseType; + using EigenBase >::derived; + +#ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename PermutationType::DenseMatrixType DenseMatrixType; + enum { + RowsAtCompileTime = PermTraits::RowsAtCompileTime, + ColsAtCompileTime = PermTraits::ColsAtCompileTime, + MaxRowsAtCompileTime = PermTraits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = PermTraits::MaxColsAtCompileTime + }; +#endif + +#ifndef EIGEN_PARSED_BY_DOXYGEN + template + void evalTo(MatrixBase& other) const { + other.setZero(); + for (Index i = 0; i < derived().rows(); ++i) + other.coeffRef(i, derived().nestedExpression().indices().coeff(i)) = typename DenseDerived::Scalar(1); + } +#endif + + /** \return the equivalent permutation matrix */ + PlainPermutationType eval() const { return derived(); } + + DenseMatrixType toDenseMatrix() const { return derived(); } + + /** \returns the matrix with the inverse permutation applied to the columns. + */ + template + friend const Product operator*(const MatrixBase& matrix, + const InverseType& trPerm) { + return Product(matrix.derived(), trPerm.derived()); + } + + /** \returns the matrix with the inverse permutation applied to the rows. + */ + template + const Product operator*(const MatrixBase& matrix) const { + return Product(derived(), matrix.derived()); + } +}; + +template +const PermutationWrapper MatrixBase::asPermutation() const { + return derived(); +} + +namespace internal { + +template <> +struct AssignmentKind { + typedef EigenBase2EigenBase Kind; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PERMUTATIONMATRIX_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/PlainObjectBase.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/PlainObjectBase.h new file mode 100644 index 0000000000000000000000000000000000000000..22f132982ccd85b44c9417675870433869bb9438 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/PlainObjectBase.h @@ -0,0 +1,1037 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DENSESTORAGEBASE_H +#define EIGEN_DENSESTORAGEBASE_H + +#if defined(EIGEN_INITIALIZE_MATRICES_BY_ZERO) +#define EIGEN_INITIALIZE_COEFFS +#define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED \ + for (Index i = 0; i < base().size(); ++i) coeffRef(i) = Scalar(0); +#elif defined(EIGEN_INITIALIZE_MATRICES_BY_NAN) +#define EIGEN_INITIALIZE_COEFFS +#define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED \ + for (Index i = 0; i < base().size(); ++i) coeffRef(i) = std::numeric_limits::quiet_NaN(); +#else +#undef EIGEN_INITIALIZE_COEFFS +#define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED +#endif + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_NO_DEBUG +template +struct check_rows_cols_for_overflow { + EIGEN_STATIC_ASSERT(MaxRowsAtCompileTime* MaxColsAtCompileTime == MaxSizeAtCompileTime, + YOU MADE A PROGRAMMING MISTAKE) + template + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE constexpr void run(Index, Index) {} +}; + +template +struct check_rows_cols_for_overflow { + template + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE constexpr void run(Index, Index cols) { + constexpr Index MaxIndex = NumTraits::highest(); + bool error = cols > (MaxIndex / MaxRowsAtCompileTime); + if (error) throw_std_bad_alloc(); + } +}; + +template +struct check_rows_cols_for_overflow { + template + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE constexpr void run(Index rows, Index) { + constexpr Index MaxIndex = NumTraits::highest(); + bool error = rows > (MaxIndex / MaxColsAtCompileTime); + if (error) throw_std_bad_alloc(); + } +}; + +template <> +struct check_rows_cols_for_overflow { + template + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE constexpr void run(Index rows, Index cols) { + constexpr Index MaxIndex = NumTraits::highest(); + bool error = cols == 0 ? false : (rows > (MaxIndex / cols)); + if (error) throw_std_bad_alloc(); + } +}; +#endif + +template +struct conservative_resize_like_impl; + +template +struct matrix_swap_impl; + +} // end namespace internal + +#ifdef EIGEN_PARSED_BY_DOXYGEN +namespace doxygen { + +// This is a workaround to doxygen not being able to understand the inheritance logic +// when it is hidden by the dense_xpr_base helper struct. +// Moreover, doxygen fails to include members that are not documented in the declaration body of +// MatrixBase if we inherits MatrixBase >, +// this is why we simply inherits MatrixBase, though this does not make sense. + +/** This class is just a workaround for Doxygen and it does not not actually exist. */ +template +struct dense_xpr_base_dispatcher; +/** This class is just a workaround for Doxygen and it does not not actually exist. */ +template +struct dense_xpr_base_dispatcher> : public MatrixBase {}; +/** This class is just a workaround for Doxygen and it does not not actually exist. */ +template +struct dense_xpr_base_dispatcher> : public ArrayBase {}; + +} // namespace doxygen + +/** \class PlainObjectBase + * \ingroup Core_Module + * \brief %Dense storage base class for matrices and arrays. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_PLAINOBJECTBASE_PLUGIN. + * + * \tparam Derived is the derived type, e.g., a Matrix or Array + * + * \sa \ref TopicClassHierarchy + */ +template +class PlainObjectBase : public doxygen::dense_xpr_base_dispatcher +#else +template +class PlainObjectBase : public internal::dense_xpr_base::type +#endif +{ + public: + enum { Options = internal::traits::Options }; + typedef typename internal::dense_xpr_base::type Base; + + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Scalar Scalar; + + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + typedef Derived DenseType; + + using Base::ColsAtCompileTime; + using Base::Flags; + using Base::IsVectorAtCompileTime; + using Base::MaxColsAtCompileTime; + using Base::MaxRowsAtCompileTime; + using Base::MaxSizeAtCompileTime; + using Base::RowsAtCompileTime; + using Base::SizeAtCompileTime; + + typedef Eigen::Map MapType; + typedef const Eigen::Map ConstMapType; + typedef Eigen::Map AlignedMapType; + typedef const Eigen::Map ConstAlignedMapType; + template + struct StridedMapType { + typedef Eigen::Map type; + }; + template + struct StridedConstMapType { + typedef Eigen::Map type; + }; + template + struct StridedAlignedMapType { + typedef Eigen::Map type; + }; + template + struct StridedConstAlignedMapType { + typedef Eigen::Map type; + }; + + protected: + DenseStorage m_storage; + + public: + enum { NeedsToAlign = (SizeAtCompileTime != Dynamic) && (internal::traits::Alignment > 0) }; + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) + + EIGEN_STATIC_ASSERT(internal::check_implication(MaxRowsAtCompileTime == 1 && MaxColsAtCompileTime != 1, + (int(Options) & RowMajor) == RowMajor), + INVALID_MATRIX_TEMPLATE_PARAMETERS) + EIGEN_STATIC_ASSERT(internal::check_implication(MaxColsAtCompileTime == 1 && MaxRowsAtCompileTime != 1, + (int(Options) & RowMajor) == 0), + INVALID_MATRIX_TEMPLATE_PARAMETERS) + EIGEN_STATIC_ASSERT((RowsAtCompileTime == Dynamic) || (RowsAtCompileTime >= 0), INVALID_MATRIX_TEMPLATE_PARAMETERS) + EIGEN_STATIC_ASSERT((ColsAtCompileTime == Dynamic) || (ColsAtCompileTime >= 0), INVALID_MATRIX_TEMPLATE_PARAMETERS) + EIGEN_STATIC_ASSERT((MaxRowsAtCompileTime == Dynamic) || (MaxRowsAtCompileTime >= 0), + INVALID_MATRIX_TEMPLATE_PARAMETERS) + EIGEN_STATIC_ASSERT((MaxColsAtCompileTime == Dynamic) || (MaxColsAtCompileTime >= 0), + INVALID_MATRIX_TEMPLATE_PARAMETERS) + EIGEN_STATIC_ASSERT((MaxRowsAtCompileTime == RowsAtCompileTime || RowsAtCompileTime == Dynamic), + INVALID_MATRIX_TEMPLATE_PARAMETERS) + EIGEN_STATIC_ASSERT((MaxColsAtCompileTime == ColsAtCompileTime || ColsAtCompileTime == Dynamic), + INVALID_MATRIX_TEMPLATE_PARAMETERS) + EIGEN_STATIC_ASSERT(((Options & (DontAlign | RowMajor)) == Options), INVALID_MATRIX_TEMPLATE_PARAMETERS) + + EIGEN_DEVICE_FUNC Base& base() { return *static_cast(this); } + EIGEN_DEVICE_FUNC const Base& base() const { return *static_cast(this); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_storage.rows(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_storage.cols(); } + + /** This is an overloaded version of DenseCoeffsBase::coeff(Index,Index) const + * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts. + * + * See DenseCoeffsBase::coeff(Index) const for details. */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const Scalar& coeff(Index rowId, Index colId) const { + if (Flags & RowMajorBit) + return m_storage.data()[colId + rowId * m_storage.cols()]; + else // column-major + return m_storage.data()[rowId + colId * m_storage.rows()]; + } + + /** This is an overloaded version of DenseCoeffsBase::coeff(Index) const + * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts. + * + * See DenseCoeffsBase::coeff(Index) const for details. */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const Scalar& coeff(Index index) const { + return m_storage.data()[index]; + } + + /** This is an overloaded version of DenseCoeffsBase::coeffRef(Index,Index) const + * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts. + * + * See DenseCoeffsBase::coeffRef(Index,Index) const for details. */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& coeffRef(Index rowId, Index colId) { + if (Flags & RowMajorBit) + return m_storage.data()[colId + rowId * m_storage.cols()]; + else // column-major + return m_storage.data()[rowId + colId * m_storage.rows()]; + } + + /** This is an overloaded version of DenseCoeffsBase::coeffRef(Index) const + * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts. + * + * See DenseCoeffsBase::coeffRef(Index) const for details. */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& coeffRef(Index index) { return m_storage.data()[index]; } + + /** This is the const version of coeffRef(Index,Index) which is thus synonym of coeff(Index,Index). + * It is provided for convenience. */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const Scalar& coeffRef(Index rowId, Index colId) const { + if (Flags & RowMajorBit) + return m_storage.data()[colId + rowId * m_storage.cols()]; + else // column-major + return m_storage.data()[rowId + colId * m_storage.rows()]; + } + + /** This is the const version of coeffRef(Index) which is thus synonym of coeff(Index). + * It is provided for convenience. */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const Scalar& coeffRef(Index index) const { + return m_storage.data()[index]; + } + + /** \internal */ + template + EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const { + return internal::ploadt( + m_storage.data() + (Flags & RowMajorBit ? colId + rowId * m_storage.cols() : rowId + colId * m_storage.rows())); + } + + /** \internal */ + template + EIGEN_STRONG_INLINE PacketScalar packet(Index index) const { + return internal::ploadt(m_storage.data() + index); + } + + /** \internal */ + template + EIGEN_STRONG_INLINE void writePacket(Index rowId, Index colId, const PacketScalar& val) { + internal::pstoret( + m_storage.data() + (Flags & RowMajorBit ? colId + rowId * m_storage.cols() : rowId + colId * m_storage.rows()), + val); + } + + /** \internal */ + template + EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& val) { + internal::pstoret(m_storage.data() + index, val); + } + + /** \returns a const pointer to the data array of this matrix */ + EIGEN_DEVICE_FUNC constexpr const Scalar* data() const { return m_storage.data(); } + + /** \returns a pointer to the data array of this matrix */ + EIGEN_DEVICE_FUNC constexpr Scalar* data() { return m_storage.data(); } + + /** Resizes \c *this to a \a rows x \a cols matrix. + * + * This method is intended for dynamic-size matrices, although it is legal to call it on any + * matrix as long as fixed dimensions are left unchanged. If you only want to change the number + * of rows and/or of columns, you can use resize(NoChange_t, Index), resize(Index, NoChange_t). + * + * If the current number of coefficients of \c *this exactly matches the + * product \a rows * \a cols, then no memory allocation is performed and + * the current values are left unchanged. In all other cases, including + * shrinking, the data is reallocated and all previous values are lost. + * + * Example: \include Matrix_resize_int_int.cpp + * Output: \verbinclude Matrix_resize_int_int.out + * + * \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t) + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index rows, Index cols) { + eigen_assert(internal::check_implication(RowsAtCompileTime != Dynamic, rows == RowsAtCompileTime) && + internal::check_implication(ColsAtCompileTime != Dynamic, cols == ColsAtCompileTime) && + internal::check_implication(RowsAtCompileTime == Dynamic && MaxRowsAtCompileTime != Dynamic, + rows <= MaxRowsAtCompileTime) && + internal::check_implication(ColsAtCompileTime == Dynamic && MaxColsAtCompileTime != Dynamic, + cols <= MaxColsAtCompileTime) && + rows >= 0 && cols >= 0 && "Invalid sizes when resizing a matrix or array."); +#ifndef EIGEN_NO_DEBUG + internal::check_rows_cols_for_overflow::run(rows, + cols); +#endif +#ifdef EIGEN_INITIALIZE_COEFFS + Index size = rows * cols; + bool size_changed = size != this->size(); + m_storage.resize(size, rows, cols); + if (size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED +#else + m_storage.resize(rows * cols, rows, cols); +#endif + } + + /** Resizes \c *this to a vector of length \a size + * + * \only_for_vectors. This method does not work for + * partially dynamic matrices when the static dimension is anything other + * than 1. For example it will not work with Matrix. + * + * Example: \include Matrix_resize_int.cpp + * Output: \verbinclude Matrix_resize_int.out + * + * \sa resize(Index,Index), resize(NoChange_t, Index), resize(Index, NoChange_t) + */ + EIGEN_DEVICE_FUNC inline constexpr void resize(Index size) { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(PlainObjectBase) + eigen_assert(((SizeAtCompileTime == Dynamic && (MaxSizeAtCompileTime == Dynamic || size <= MaxSizeAtCompileTime)) || + SizeAtCompileTime == size) && + size >= 0); +#ifdef EIGEN_INITIALIZE_COEFFS + bool size_changed = size != this->size(); +#endif + if (RowsAtCompileTime == 1) + m_storage.resize(size, 1, size); + else + m_storage.resize(size, size, 1); +#ifdef EIGEN_INITIALIZE_COEFFS + if (size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED +#endif + } + + /** Resizes the matrix, changing only the number of columns. For the parameter of type NoChange_t, just pass the + * special value \c NoChange as in the example below. + * + * Example: \include Matrix_resize_NoChange_int.cpp + * Output: \verbinclude Matrix_resize_NoChange_int.out + * + * \sa resize(Index,Index) + */ + EIGEN_DEVICE_FUNC inline constexpr void resize(NoChange_t, Index cols) { resize(rows(), cols); } + + /** Resizes the matrix, changing only the number of rows. For the parameter of type NoChange_t, just pass the special + * value \c NoChange as in the example below. + * + * Example: \include Matrix_resize_int_NoChange.cpp + * Output: \verbinclude Matrix_resize_int_NoChange.out + * + * \sa resize(Index,Index) + */ + EIGEN_DEVICE_FUNC inline constexpr void resize(Index rows, NoChange_t) { resize(rows, cols()); } + + /** Resizes \c *this to have the same dimensions as \a other. + * Takes care of doing all the checking that's needed. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resizeLike(const EigenBase& _other) { + const OtherDerived& other = _other.derived(); +#ifndef EIGEN_NO_DEBUG + internal::check_rows_cols_for_overflow::run( + other.rows(), other.cols()); +#endif + const Index othersize = other.rows() * other.cols(); + if (RowsAtCompileTime == 1) { + eigen_assert(other.rows() == 1 || other.cols() == 1); + resize(1, othersize); + } else if (ColsAtCompileTime == 1) { + eigen_assert(other.rows() == 1 || other.cols() == 1); + resize(othersize, 1); + } else + resize(other.rows(), other.cols()); + } + + /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. + * + * The method is intended for matrices of dynamic size. If you only want to change the number + * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or + * conservativeResize(Index, NoChange_t). + * + * Matrices are resized relative to the top-left element. In case values need to be + * appended to the matrix they will be uninitialized. + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void conservativeResize(Index rows, Index cols) { + internal::conservative_resize_like_impl::run(*this, rows, cols); + } + + /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. + * + * As opposed to conservativeResize(Index rows, Index cols), this version leaves + * the number of columns unchanged. + * + * In case the matrix is growing, new rows will be uninitialized. + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void conservativeResize(Index rows, NoChange_t) { + // Note: see the comment in conservativeResize(Index,Index) + conservativeResize(rows, cols()); + } + + /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. + * + * As opposed to conservativeResize(Index rows, Index cols), this version leaves + * the number of rows unchanged. + * + * In case the matrix is growing, new columns will be uninitialized. + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index cols) { + // Note: see the comment in conservativeResize(Index,Index) + conservativeResize(rows(), cols); + } + + /** Resizes the vector to \a size while retaining old values. + * + * \only_for_vectors. This method does not work for + * partially dynamic matrices when the static dimension is anything other + * than 1. For example it will not work with Matrix. + * + * When values are appended, they will be uninitialized. + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void conservativeResize(Index size) { + internal::conservative_resize_like_impl::run(*this, size); + } + + /** Resizes the matrix to \a rows x \a cols of \c other, while leaving old values untouched. + * + * The method is intended for matrices of dynamic size. If you only want to change the number + * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or + * conservativeResize(Index, NoChange_t). + * + * Matrices are resized relative to the top-left element. In case values need to be + * appended to the matrix they will copied from \c other. + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void conservativeResizeLike(const DenseBase& other) { + internal::conservative_resize_like_impl::run(*this, other); + } + + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Derived& operator=(const PlainObjectBase& other) { + return _set(other); + } + + /** \sa MatrixBase::lazyAssign() */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& lazyAssign(const DenseBase& other) { + _resize_to_match(other); + return Base::lazyAssign(other.derived()); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const ReturnByValue& func) { + resize(func.rows(), func.cols()); + return Base::operator=(func); + } + + // Prevent user from trying to instantiate PlainObjectBase objects + // by making all its constructor protected. See bug 1074. + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr PlainObjectBase() = default; + /** \brief Move constructor */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr PlainObjectBase(PlainObjectBase&&) = default; + /** \brief Move assignment operator */ + EIGEN_DEVICE_FUNC constexpr PlainObjectBase& operator=(PlainObjectBase&& other) EIGEN_NOEXCEPT { + m_storage = std::move(other.m_storage); + return *this; + } + + /** Copy constructor */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr PlainObjectBase(const PlainObjectBase&) = default; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase(Index size, Index rows, Index cols) + : m_storage(size, rows, cols) {} + + /** \brief Construct a row of column vector with fixed size from an arbitrary number of coefficients. + * + * \only_for_vectors + * + * This constructor is for 1D array or vectors with more than 4 coefficients. + * + * \warning To construct a column (resp. row) vector of fixed length, the number of values passed to this + * constructor must match the the fixed number of rows (resp. columns) of \c *this. + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase(const Scalar& a0, const Scalar& a1, const Scalar& a2, + const Scalar& a3, const ArgTypes&... args) + : m_storage() { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, sizeof...(args) + 4); + m_storage.data()[0] = a0; + m_storage.data()[1] = a1; + m_storage.data()[2] = a2; + m_storage.data()[3] = a3; + Index i = 4; + auto x = {(m_storage.data()[i++] = args, 0)...}; + static_cast(x); + } + + /** \brief Constructs a Matrix or Array and initializes it by elements given by an initializer list of initializer + * lists + */ + EIGEN_DEVICE_FUNC explicit constexpr EIGEN_STRONG_INLINE PlainObjectBase( + const std::initializer_list>& list) + : m_storage() { + size_t list_size = 0; + if (list.begin() != list.end()) { + list_size = list.begin()->size(); + } + + // This is to allow syntax like VectorXi {{1, 2, 3, 4}} + if (ColsAtCompileTime == 1 && list.size() == 1) { + eigen_assert(list_size == static_cast(RowsAtCompileTime) || RowsAtCompileTime == Dynamic); + resize(list_size, ColsAtCompileTime); + if (list.begin()->begin() != nullptr) { + std::copy(list.begin()->begin(), list.begin()->end(), m_storage.data()); + } + } else { + eigen_assert(list.size() == static_cast(RowsAtCompileTime) || RowsAtCompileTime == Dynamic); + eigen_assert(list_size == static_cast(ColsAtCompileTime) || ColsAtCompileTime == Dynamic); + resize(list.size(), list_size); + + Index row_index = 0; + for (const std::initializer_list& row : list) { + eigen_assert(list_size == row.size()); + Index col_index = 0; + for (const Scalar& e : row) { + coeffRef(row_index, col_index) = e; + ++col_index; + } + ++row_index; + } + } + } + + /** \sa PlainObjectBase::operator=(const EigenBase&) */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase(const DenseBase& other) : m_storage() { + resizeLike(other); + _set_noalias(other); + } + + /** \sa PlainObjectBase::operator=(const EigenBase&) */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase(const EigenBase& other) : m_storage() { + resizeLike(other); + *this = other.derived(); + } + /** \brief Copy constructor with in-place evaluation */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase(const ReturnByValue& other) { + // FIXME this does not automatically transpose vectors if necessary + resize(other.rows(), other.cols()); + other.evalTo(this->derived()); + } + + public: + /** \brief Copies the generic expression \a other into *this. + * \copydetails DenseBase::operator=(const EigenBase &other) + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const EigenBase& other) { + _resize_to_match(other); + Base::operator=(other.derived()); + return this->derived(); + } + + /** \name Map + * These are convenience functions returning Map objects. The Map() static functions return unaligned Map objects, + * while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned + * \a data pointers. + * + * Here is an example using strides: + * \include Matrix_Map_stride.cpp + * Output: \verbinclude Matrix_Map_stride.out + * + * \see class Map + */ + ///@{ + static inline ConstMapType Map(const Scalar* data) { return ConstMapType(data); } + static inline MapType Map(Scalar* data) { return MapType(data); } + static inline ConstMapType Map(const Scalar* data, Index size) { return ConstMapType(data, size); } + static inline MapType Map(Scalar* data, Index size) { return MapType(data, size); } + static inline ConstMapType Map(const Scalar* data, Index rows, Index cols) { return ConstMapType(data, rows, cols); } + static inline MapType Map(Scalar* data, Index rows, Index cols) { return MapType(data, rows, cols); } + + static inline ConstAlignedMapType MapAligned(const Scalar* data) { return ConstAlignedMapType(data); } + static inline AlignedMapType MapAligned(Scalar* data) { return AlignedMapType(data); } + static inline ConstAlignedMapType MapAligned(const Scalar* data, Index size) { + return ConstAlignedMapType(data, size); + } + static inline AlignedMapType MapAligned(Scalar* data, Index size) { return AlignedMapType(data, size); } + static inline ConstAlignedMapType MapAligned(const Scalar* data, Index rows, Index cols) { + return ConstAlignedMapType(data, rows, cols); + } + static inline AlignedMapType MapAligned(Scalar* data, Index rows, Index cols) { + return AlignedMapType(data, rows, cols); + } + + template + static inline typename StridedConstMapType>::type Map(const Scalar* data, + const Stride& stride) { + return typename StridedConstMapType>::type(data, stride); + } + template + static inline typename StridedMapType>::type Map(Scalar* data, + const Stride& stride) { + return typename StridedMapType>::type(data, stride); + } + template + static inline typename StridedConstMapType>::type Map(const Scalar* data, Index size, + const Stride& stride) { + return typename StridedConstMapType>::type(data, size, stride); + } + template + static inline typename StridedMapType>::type Map(Scalar* data, Index size, + const Stride& stride) { + return typename StridedMapType>::type(data, size, stride); + } + template + static inline typename StridedConstMapType>::type Map(const Scalar* data, Index rows, Index cols, + const Stride& stride) { + return typename StridedConstMapType>::type(data, rows, cols, stride); + } + template + static inline typename StridedMapType>::type Map(Scalar* data, Index rows, Index cols, + const Stride& stride) { + return typename StridedMapType>::type(data, rows, cols, stride); + } + + template + static inline typename StridedConstAlignedMapType>::type MapAligned( + const Scalar* data, const Stride& stride) { + return typename StridedConstAlignedMapType>::type(data, stride); + } + template + static inline typename StridedAlignedMapType>::type MapAligned( + Scalar* data, const Stride& stride) { + return typename StridedAlignedMapType>::type(data, stride); + } + template + static inline typename StridedConstAlignedMapType>::type MapAligned( + const Scalar* data, Index size, const Stride& stride) { + return typename StridedConstAlignedMapType>::type(data, size, stride); + } + template + static inline typename StridedAlignedMapType>::type MapAligned( + Scalar* data, Index size, const Stride& stride) { + return typename StridedAlignedMapType>::type(data, size, stride); + } + template + static inline typename StridedConstAlignedMapType>::type MapAligned( + const Scalar* data, Index rows, Index cols, const Stride& stride) { + return typename StridedConstAlignedMapType>::type(data, rows, cols, stride); + } + template + static inline typename StridedAlignedMapType>::type MapAligned( + Scalar* data, Index rows, Index cols, const Stride& stride) { + return typename StridedAlignedMapType>::type(data, rows, cols, stride); + } + ///@} + + using Base::setConstant; + EIGEN_DEVICE_FUNC Derived& setConstant(Index size, const Scalar& val); + EIGEN_DEVICE_FUNC Derived& setConstant(Index rows, Index cols, const Scalar& val); + EIGEN_DEVICE_FUNC Derived& setConstant(NoChange_t, Index cols, const Scalar& val); + EIGEN_DEVICE_FUNC Derived& setConstant(Index rows, NoChange_t, const Scalar& val); + + using Base::setZero; + EIGEN_DEVICE_FUNC Derived& setZero(Index size); + EIGEN_DEVICE_FUNC Derived& setZero(Index rows, Index cols); + EIGEN_DEVICE_FUNC Derived& setZero(NoChange_t, Index cols); + EIGEN_DEVICE_FUNC Derived& setZero(Index rows, NoChange_t); + + using Base::setOnes; + EIGEN_DEVICE_FUNC Derived& setOnes(Index size); + EIGEN_DEVICE_FUNC Derived& setOnes(Index rows, Index cols); + EIGEN_DEVICE_FUNC Derived& setOnes(NoChange_t, Index cols); + EIGEN_DEVICE_FUNC Derived& setOnes(Index rows, NoChange_t); + + using Base::setRandom; + Derived& setRandom(Index size); + Derived& setRandom(Index rows, Index cols); + Derived& setRandom(NoChange_t, Index cols); + Derived& setRandom(Index rows, NoChange_t); + +#ifdef EIGEN_PLAINOBJECTBASE_PLUGIN +#include EIGEN_PLAINOBJECTBASE_PLUGIN +#endif + + protected: + /** \internal Resizes *this in preparation for assigning \a other to it. + * Takes care of doing all the checking that's needed. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _resize_to_match(const EigenBase& other) { +#ifdef EIGEN_NO_AUTOMATIC_RESIZING + eigen_assert((this->size() == 0 || (IsVectorAtCompileTime ? (this->size() == other.size()) + : (rows() == other.rows() && cols() == other.cols()))) && + "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined"); + EIGEN_ONLY_USED_FOR_DEBUG(other); +#else + resizeLike(other); +#endif + } + + /** + * \brief Copies the value of the expression \a other into \c *this with automatic resizing. + * + * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), + * it will be initialized. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + * + * \sa operator=(const MatrixBase&), _set_noalias() + * + * \internal + */ + // aliasing is dealt once in internal::call_assignment + // so at this stage we have to assume aliasing... and resising has to be done later. + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Derived& _set(const DenseBase& other) { + internal::call_assignment(this->derived(), other.derived()); + return this->derived(); + } + + /** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which + * is the case when creating a new matrix) so one can enforce lazy evaluation. + * + * \sa operator=(const MatrixBase&), _set() + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Derived& _set_noalias(const DenseBase& other) { + // I don't think we need this resize call since the lazyAssign will anyways resize + // and lazyAssign will be called by the assign selector. + //_resize_to_match(other); + // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because + // it wouldn't allow to copy a row-vector into a column-vector. + internal::call_assignment_no_alias(this->derived(), other.derived(), + internal::assign_op()); + return this->derived(); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, + std::enable_if_t* = 0) { + EIGEN_STATIC_ASSERT(internal::is_valid_index_type::value && internal::is_valid_index_type::value, + T0 AND T1 MUST BE INTEGER TYPES) + resize(rows, cols); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init2(const T0& val0, const T1& val1, + std::enable_if_t* = 0) { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2) + m_storage.data()[0] = Scalar(val0); + m_storage.data()[1] = Scalar(val1); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init2( + const Index& val0, const Index& val1, + std::enable_if_t<(!internal::is_same::value) && (internal::is_same::value) && + (internal::is_same::value) && Base::SizeAtCompileTime == 2, + T1>* = 0) { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2) + m_storage.data()[0] = Scalar(val0); + m_storage.data()[1] = Scalar(val1); + } + + // The argument is convertible to the Index type and we either have a non 1x1 Matrix, or a dynamic-sized Array, + // then the argument is meant to be the size of the object. + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1( + Index size, + std::enable_if_t<(Base::SizeAtCompileTime != 1 || !internal::is_convertible::value) && + ((!internal::is_same::XprKind, ArrayXpr>::value || + Base::SizeAtCompileTime == Dynamic)), + T>* = 0) { + // NOTE MSVC 2008 complains if we directly put bool(NumTraits::IsInteger) as the EIGEN_STATIC_ASSERT argument. + const bool is_integer_alike = internal::is_valid_index_type::value; + EIGEN_UNUSED_VARIABLE(is_integer_alike); + EIGEN_STATIC_ASSERT(is_integer_alike, FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED) + resize(size); + } + + // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar + // type can be implicitly converted) + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1( + const Scalar& val0, + std::enable_if_t::value, T>* = 0) { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 1) + m_storage.data()[0] = val0; + } + + // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar + // type match the index type) + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1( + const Index& val0, + std::enable_if_t<(!internal::is_same::value) && (internal::is_same::value) && + Base::SizeAtCompileTime == 1 && internal::is_convertible::value, + T*>* = 0) { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 1) + m_storage.data()[0] = Scalar(val0); + } + + // Initialize a fixed size matrix from a pointer to raw data + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const Scalar* data) { + this->_set_noalias(ConstMapType(data)); + } + + // Initialize an arbitrary matrix from a dense expression + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const DenseBase& other) { + this->_set_noalias(other); + } + + // Initialize an arbitrary matrix from an object convertible to the Derived type. + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const Derived& other) { + this->_set_noalias(other); + } + + // Initialize an arbitrary matrix from a generic Eigen expression + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const EigenBase& other) { + this->derived() = other; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const ReturnByValue& other) { + resize(other.rows(), other.cols()); + other.evalTo(this->derived()); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const RotationBase& r) { + this->derived() = r; + } + + // For fixed-size Array + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1( + const Scalar& val0, + std::enable_if_t::value && + internal::is_same::XprKind, ArrayXpr>::value, + T>* = 0) { + Base::setConstant(val0); + } + + // For fixed-size Array + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1( + const Index& val0, + std::enable_if_t<(!internal::is_same::value) && (internal::is_same::value) && + Base::SizeAtCompileTime != Dynamic && Base::SizeAtCompileTime != 1 && + internal::is_convertible::value && + internal::is_same::XprKind, ArrayXpr>::value, + T*>* = 0) { + Base::setConstant(val0); + } + + template + friend struct internal::matrix_swap_impl; + + public: +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal + * \brief Override DenseBase::swap() since for dynamic-sized matrices + * of same type it is enough to swap the data pointers. + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void swap(DenseBase& other) { + enum {SwapPointers = internal::is_same::value && Base::SizeAtCompileTime == Dynamic}; + internal::matrix_swap_impl::run(this->derived(), other.derived()); + } + + /** \internal + * \brief const version forwarded to DenseBase::swap + */ + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void swap(DenseBase const& other) { + Base::swap(other.derived()); + } + + enum {IsPlainObjectBase = 1}; +#endif + public: + // These apparently need to be down here for nvcc+icc to prevent duplicate + // Map symbol. + template + friend class Eigen::Map; + friend class Eigen::Map; + friend class Eigen::Map; +#if EIGEN_MAX_ALIGN_BYTES > 0 + // for EIGEN_MAX_ALIGN_BYTES==0, AlignedMax==Unaligned, and many compilers generate warnings for friend-ing a class + // twice. + friend class Eigen::Map; + friend class Eigen::Map; +#endif +}; + +namespace internal { + +template +struct conservative_resize_like_impl { + static constexpr bool IsRelocatable = std::is_trivially_copyable::value; + static void run(DenseBase& _this, Index rows, Index cols) { + if (_this.rows() == rows && _this.cols() == cols) return; + EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) + + if (IsRelocatable && + ((Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows + (!Derived::IsRowMajor && _this.rows() == rows))) // column-major and we change only the number of columns + { +#ifndef EIGEN_NO_DEBUG + internal::check_rows_cols_for_overflow::run(rows, cols); +#endif + _this.derived().m_storage.conservativeResize(rows * cols, rows, cols); + } else { + // The storage order does not allow us to use reallocation. + Derived tmp(rows, cols); + const Index common_rows = numext::mini(rows, _this.rows()); + const Index common_cols = numext::mini(cols, _this.cols()); + tmp.block(0, 0, common_rows, common_cols) = _this.block(0, 0, common_rows, common_cols); + _this.derived().swap(tmp); + } + } + + static void run(DenseBase& _this, const DenseBase& other) { + if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; + + // Note: Here is space for improvement. Basically, for conservativeResize(Index,Index), + // neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the + // dimensions is dynamic, one could use either conservativeResize(Index rows, NoChange_t) or + // conservativeResize(NoChange_t, Index cols). For these methods new static asserts like + // EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good. + EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) + EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived) + + if (IsRelocatable && + ((Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows + (!Derived::IsRowMajor && + _this.rows() == other.rows()))) // column-major and we change only the number of columns + { + const Index new_rows = other.rows() - _this.rows(); + const Index new_cols = other.cols() - _this.cols(); + _this.derived().m_storage.conservativeResize(other.size(), other.rows(), other.cols()); + if (new_rows > 0) + _this.bottomRightCorner(new_rows, other.cols()) = other.bottomRows(new_rows); + else if (new_cols > 0) + _this.bottomRightCorner(other.rows(), new_cols) = other.rightCols(new_cols); + } else { + // The storage order does not allow us to use reallocation. + Derived tmp(other); + const Index common_rows = numext::mini(tmp.rows(), _this.rows()); + const Index common_cols = numext::mini(tmp.cols(), _this.cols()); + tmp.block(0, 0, common_rows, common_cols) = _this.block(0, 0, common_rows, common_cols); + _this.derived().swap(tmp); + } + } +}; + +// Here, the specialization for vectors inherits from the general matrix case +// to allow calling .conservativeResize(rows,cols) on vectors. +template +struct conservative_resize_like_impl + : conservative_resize_like_impl { + typedef conservative_resize_like_impl Base; + using Base::IsRelocatable; + using Base::run; + + static void run(DenseBase& _this, Index size) { + const Index new_rows = Derived::RowsAtCompileTime == 1 ? 1 : size; + const Index new_cols = Derived::RowsAtCompileTime == 1 ? size : 1; + if (IsRelocatable) + _this.derived().m_storage.conservativeResize(size, new_rows, new_cols); + else + Base::run(_this.derived(), new_rows, new_cols); + } + + static void run(DenseBase& _this, const DenseBase& other) { + if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; + + const Index num_new_elements = other.size() - _this.size(); + + const Index new_rows = Derived::RowsAtCompileTime == 1 ? 1 : other.rows(); + const Index new_cols = Derived::RowsAtCompileTime == 1 ? other.cols() : 1; + if (IsRelocatable) + _this.derived().m_storage.conservativeResize(other.size(), new_rows, new_cols); + else + Base::run(_this.derived(), new_rows, new_cols); + + if (num_new_elements > 0) _this.tail(num_new_elements) = other.tail(num_new_elements); + } +}; + +template +struct matrix_swap_impl { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(MatrixTypeA& a, MatrixTypeB& b) { a.base().swap(b); } +}; + +template +struct matrix_swap_impl { + EIGEN_DEVICE_FUNC static inline void run(MatrixTypeA& a, MatrixTypeB& b) { + static_cast(a).m_storage.swap(static_cast(b).m_storage); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_DENSESTORAGEBASE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Product.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Product.h new file mode 100644 index 0000000000000000000000000000000000000000..37683e3c27269eb8c1ce179711da852ede7e8747 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Product.h @@ -0,0 +1,307 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2011 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PRODUCT_H +#define EIGEN_PRODUCT_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +template +class ProductImpl; + +namespace internal { + +template +struct traits> { + typedef remove_all_t LhsCleaned; + typedef remove_all_t RhsCleaned; + typedef traits LhsTraits; + typedef traits RhsTraits; + + typedef MatrixXpr XprKind; + + typedef typename ScalarBinaryOpTraits::Scalar, + typename traits::Scalar>::ReturnType Scalar; + typedef typename product_promote_storage_type::ret>::ret StorageKind; + typedef typename promote_index_type::type + StorageIndex; + + enum { + RowsAtCompileTime = LhsTraits::RowsAtCompileTime, + ColsAtCompileTime = RhsTraits::ColsAtCompileTime, + MaxRowsAtCompileTime = LhsTraits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = RhsTraits::MaxColsAtCompileTime, + + // FIXME: only needed by GeneralMatrixMatrixTriangular + InnerSize = min_size_prefer_fixed(LhsTraits::ColsAtCompileTime, RhsTraits::RowsAtCompileTime), + + // The storage order is somewhat arbitrary here. The correct one will be determined through the evaluator. + Flags = (MaxRowsAtCompileTime == 1 && MaxColsAtCompileTime != 1) ? RowMajorBit + : (MaxColsAtCompileTime == 1 && MaxRowsAtCompileTime != 1) ? 0 + : (((LhsTraits::Flags & NoPreferredStorageOrderBit) && (RhsTraits::Flags & RowMajorBit)) || + ((RhsTraits::Flags & NoPreferredStorageOrderBit) && (LhsTraits::Flags & RowMajorBit))) + ? RowMajorBit + : NoPreferredStorageOrderBit + }; +}; + +struct TransposeProductEnum { + // convenience enumerations to specialize transposed products + enum : int { + Default = 0x00, + Matrix = 0x01, + Permutation = 0x02, + MatrixMatrix = (Matrix << 8) | Matrix, + MatrixPermutation = (Matrix << 8) | Permutation, + PermutationMatrix = (Permutation << 8) | Matrix + }; +}; +template +struct TransposeKind { + static constexpr int Kind = is_matrix_base_xpr::value ? TransposeProductEnum::Matrix + : is_permutation_base_xpr::value ? TransposeProductEnum::Permutation + : TransposeProductEnum::Default; +}; + +template +struct TransposeProductKind { + static constexpr int Kind = (TransposeKind::Kind << 8) | TransposeKind::Kind; +}; + +template ::Kind> +struct product_transpose_helper { + // by default, don't optimize the transposed product + using Derived = Product; + using Scalar = typename Derived::Scalar; + using TransposeType = Transpose; + using ConjugateTransposeType = CwiseUnaryOp, TransposeType>; + using AdjointType = std::conditional_t::IsComplex, ConjugateTransposeType, TransposeType>; + + // return (lhs * rhs)^T + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TransposeType run_transpose(const Derived& derived) { + return TransposeType(derived); + } + // return (lhs * rhs)^H + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE AdjointType run_adjoint(const Derived& derived) { + return AdjointType(TransposeType(derived)); + } +}; + +template +struct product_transpose_helper { + // expand the transposed matrix-matrix product + using Derived = Product; + + using LhsScalar = typename traits::Scalar; + using LhsTransposeType = typename DenseBase::ConstTransposeReturnType; + using LhsConjugateTransposeType = CwiseUnaryOp, LhsTransposeType>; + using LhsAdjointType = + std::conditional_t::IsComplex, LhsConjugateTransposeType, LhsTransposeType>; + + using RhsScalar = typename traits::Scalar; + using RhsTransposeType = typename DenseBase::ConstTransposeReturnType; + using RhsConjugateTransposeType = CwiseUnaryOp, RhsTransposeType>; + using RhsAdjointType = + std::conditional_t::IsComplex, RhsConjugateTransposeType, RhsTransposeType>; + + using TransposeType = Product; + using AdjointType = Product; + + // return rhs^T * lhs^T + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TransposeType run_transpose(const Derived& derived) { + return TransposeType(RhsTransposeType(derived.rhs()), LhsTransposeType(derived.lhs())); + } + // return rhs^H * lhs^H + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE AdjointType run_adjoint(const Derived& derived) { + return AdjointType(RhsAdjointType(RhsTransposeType(derived.rhs())), + LhsAdjointType(LhsTransposeType(derived.lhs()))); + } +}; +template +struct product_transpose_helper { + // expand the transposed permutation-matrix product + using Derived = Product; + + using LhsInverseType = typename PermutationBase::InverseReturnType; + + using RhsScalar = typename traits::Scalar; + using RhsTransposeType = typename DenseBase::ConstTransposeReturnType; + using RhsConjugateTransposeType = CwiseUnaryOp, RhsTransposeType>; + using RhsAdjointType = + std::conditional_t::IsComplex, RhsConjugateTransposeType, RhsTransposeType>; + + using TransposeType = Product; + using AdjointType = Product; + + // return rhs^T * lhs^-1 + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TransposeType run_transpose(const Derived& derived) { + return TransposeType(RhsTransposeType(derived.rhs()), LhsInverseType(derived.lhs())); + } + // return rhs^H * lhs^-1 + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE AdjointType run_adjoint(const Derived& derived) { + return AdjointType(RhsAdjointType(RhsTransposeType(derived.rhs())), LhsInverseType(derived.lhs())); + } +}; +template +struct product_transpose_helper { + // expand the transposed matrix-permutation product + using Derived = Product; + + using LhsScalar = typename traits::Scalar; + using LhsTransposeType = typename DenseBase::ConstTransposeReturnType; + using LhsConjugateTransposeType = CwiseUnaryOp, LhsTransposeType>; + using LhsAdjointType = + std::conditional_t::IsComplex, LhsConjugateTransposeType, LhsTransposeType>; + + using RhsInverseType = typename PermutationBase::InverseReturnType; + + using TransposeType = Product; + using AdjointType = Product; + + // return rhs^-1 * lhs^T + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TransposeType run_transpose(const Derived& derived) { + return TransposeType(RhsInverseType(derived.rhs()), LhsTransposeType(derived.lhs())); + } + // return rhs^-1 * lhs^H + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE AdjointType run_adjoint(const Derived& derived) { + return AdjointType(RhsInverseType(derived.rhs()), LhsAdjointType(LhsTransposeType(derived.lhs()))); + } +}; + +} // end namespace internal + +/** \class Product + * \ingroup Core_Module + * + * \brief Expression of the product of two arbitrary matrices or vectors + * + * \tparam Lhs_ the type of the left-hand side expression + * \tparam Rhs_ the type of the right-hand side expression + * + * This class represents an expression of the product of two arbitrary matrices. + * + * The other template parameters are: + * \tparam Option can be DefaultProduct, AliasFreeProduct, or LazyProduct + * + */ +template +class Product + : public ProductImpl::StorageKind, typename internal::traits::StorageKind, + internal::product_type::ret>::ret> { + public: + typedef Lhs_ Lhs; + typedef Rhs_ Rhs; + + typedef + typename ProductImpl::StorageKind, typename internal::traits::StorageKind, + internal::product_type::ret>::ret>::Base Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(Product) + + typedef typename internal::ref_selector::type LhsNested; + typedef typename internal::ref_selector::type RhsNested; + typedef internal::remove_all_t LhsNestedCleaned; + typedef internal::remove_all_t RhsNestedCleaned; + + using TransposeReturnType = typename internal::product_transpose_helper::TransposeType; + using AdjointReturnType = typename internal::product_transpose_helper::AdjointType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) { + eigen_assert(lhs.cols() == rhs.rows() && "invalid matrix product" && + "if you wanted a coeff-wise or a dot product use the respective explicit functions"); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_lhs.rows(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_rhs.cols(); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const LhsNestedCleaned& lhs() const { return m_lhs; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const RhsNestedCleaned& rhs() const { return m_rhs; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TransposeReturnType transpose() const { + return internal::product_transpose_helper::run_transpose(*this); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE AdjointReturnType adjoint() const { + return internal::product_transpose_helper::run_adjoint(*this); + } + + protected: + LhsNested m_lhs; + RhsNested m_rhs; +}; + +namespace internal { + +template ::ret> +class dense_product_base : public internal::dense_xpr_base>::type {}; + +/** Conversion to scalar for inner-products */ +template +class dense_product_base + : public internal::dense_xpr_base>::type { + typedef Product ProductXpr; + typedef typename internal::dense_xpr_base::type Base; + + public: + using Base::derived; + typedef typename Base::Scalar Scalar; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator const Scalar() const { + return internal::evaluator(derived()).coeff(0, 0); + } +}; + +} // namespace internal + +// Generic API dispatcher +template +class ProductImpl : public internal::generic_xpr_base, MatrixXpr, StorageKind>::type { + public: + typedef typename internal::generic_xpr_base, MatrixXpr, StorageKind>::type Base; +}; + +template +class ProductImpl : public internal::dense_product_base { + typedef Product Derived; + + public: + typedef typename internal::dense_product_base Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Derived) + protected: + enum { + IsOneByOne = (RowsAtCompileTime == 1 || RowsAtCompileTime == Dynamic) && + (ColsAtCompileTime == 1 || ColsAtCompileTime == Dynamic), + EnableCoeff = IsOneByOne || Option == LazyProduct + }; + + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index row, Index col) const { + EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS); + eigen_assert((Option == LazyProduct) || (this->rows() == 1 && this->cols() == 1)); + + return internal::evaluator(derived()).coeff(row, col); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index i) const { + EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS); + eigen_assert((Option == LazyProduct) || (this->rows() == 1 && this->cols() == 1)); + + return internal::evaluator(derived()).coeff(i); + } +}; + +} // end namespace Eigen + +#endif // EIGEN_PRODUCT_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ProductEvaluators.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ProductEvaluators.h new file mode 100644 index 0000000000000000000000000000000000000000..77a658a8ef953cd4c01fce86b771ba0b0fca87d6 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ProductEvaluators.h @@ -0,0 +1,1156 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2011 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PRODUCTEVALUATORS_H +#define EIGEN_PRODUCTEVALUATORS_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +/** \internal + * Evaluator of a product expression. + * Since products require special treatments to handle all possible cases, + * we simply defer the evaluation logic to a product_evaluator class + * which offers more partial specialization possibilities. + * + * \sa class product_evaluator + */ +template +struct evaluator> : public product_evaluator> { + typedef Product XprType; + typedef product_evaluator Base; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) : Base(xpr) {} +}; + +// Catch "scalar * ( A * B )" and transform it to "(A*scalar) * B" +// TODO we should apply that rule only if that's really helpful +template +struct evaluator_assume_aliasing, + const CwiseNullaryOp, Plain1>, + const Product>> { + static const bool value = true; +}; +template +struct evaluator, + const CwiseNullaryOp, Plain1>, + const Product>> + : public evaluator> { + typedef CwiseBinaryOp, + const CwiseNullaryOp, Plain1>, + const Product> + XprType; + typedef evaluator> Base; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) + : Base(xpr.lhs().functor().m_other * xpr.rhs().lhs() * xpr.rhs().rhs()) {} +}; + +template +struct evaluator, DiagIndex>> + : public evaluator, DiagIndex>> { + typedef Diagonal, DiagIndex> XprType; + typedef evaluator, DiagIndex>> Base; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) + : Base(Diagonal, DiagIndex>( + Product(xpr.nestedExpression().lhs(), xpr.nestedExpression().rhs()), xpr.index())) {} +}; + +// Helper class to perform a matrix product with the destination at hand. +// Depending on the sizes of the factors, there are different evaluation strategies +// as controlled by internal::product_type. +template ::Shape, + typename RhsShape = typename evaluator_traits::Shape, + int ProductType = internal::product_type::value> +struct generic_product_impl; + +template +struct evaluator_assume_aliasing> { + static const bool value = true; +}; + +// This is the default evaluator implementation for products: +// It creates a temporary and call generic_product_impl +template +struct product_evaluator, ProductTag, LhsShape, RhsShape> + : public evaluator::PlainObject> { + typedef Product XprType; + typedef typename XprType::PlainObject PlainObject; + typedef evaluator Base; + enum { Flags = Base::Flags | EvalBeforeNestingBit }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit product_evaluator(const XprType& xpr) + : m_result(xpr.rows(), xpr.cols()) { + internal::construct_at(this, m_result); + + // FIXME shall we handle nested_eval here?, + // if so, then we must take care at removing the call to nested_eval in the specializations (e.g., in + // permutation_matrix_product, transposition_matrix_product, etc.) + // typedef typename internal::nested_eval::type LhsNested; + // typedef typename internal::nested_eval::type RhsNested; + // typedef internal::remove_all_t LhsNestedCleaned; + // typedef internal::remove_all_t RhsNestedCleaned; + // + // const LhsNested lhs(xpr.lhs()); + // const RhsNested rhs(xpr.rhs()); + // + // generic_product_impl::evalTo(m_result, lhs, rhs); + + generic_product_impl::evalTo(m_result, xpr.lhs(), xpr.rhs()); + } + + protected: + PlainObject m_result; +}; + +// The following three shortcuts are enabled only if the scalar types match exactly. +// TODO: we could enable them for different scalar types when the product is not vectorized. + +// Dense = Product +template +struct Assignment, internal::assign_op, Dense2Dense, + std::enable_if_t<(Options == DefaultProduct || Options == AliasFreeProduct)>> { + typedef Product SrcXprType; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType& dst, const SrcXprType& src, + const internal::assign_op&) { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if ((dst.rows() != dstRows) || (dst.cols() != dstCols)) dst.resize(dstRows, dstCols); + // FIXME shall we handle nested_eval here? + generic_product_impl::evalTo(dst, src.lhs(), src.rhs()); + } +}; + +// Dense += Product +template +struct Assignment, internal::add_assign_op, Dense2Dense, + std::enable_if_t<(Options == DefaultProduct || Options == AliasFreeProduct)>> { + typedef Product SrcXprType; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType& dst, const SrcXprType& src, + const internal::add_assign_op&) { + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); + // FIXME shall we handle nested_eval here? + generic_product_impl::addTo(dst, src.lhs(), src.rhs()); + } +}; + +// Dense -= Product +template +struct Assignment, internal::sub_assign_op, Dense2Dense, + std::enable_if_t<(Options == DefaultProduct || Options == AliasFreeProduct)>> { + typedef Product SrcXprType; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType& dst, const SrcXprType& src, + const internal::sub_assign_op&) { + eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); + // FIXME shall we handle nested_eval here? + generic_product_impl::subTo(dst, src.lhs(), src.rhs()); + } +}; + +// Dense ?= scalar * Product +// TODO we should apply that rule if that's really helpful +// for instance, this is not good for inner products +template +struct Assignment, + const CwiseNullaryOp, Plain>, + const Product>, + AssignFunc, Dense2Dense> { + typedef CwiseBinaryOp, + const CwiseNullaryOp, Plain>, + const Product> + SrcXprType; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType& dst, const SrcXprType& src, + const AssignFunc& func) { + call_assignment_no_alias(dst, (src.lhs().functor().m_other * src.rhs().lhs()) * src.rhs().rhs(), func); + } +}; + +//---------------------------------------- +// Catch "Dense ?= xpr + Product<>" expression to save one temporary +// FIXME we could probably enable these rules for any product, i.e., not only Dense and DefaultProduct + +template +struct evaluator_assume_aliasing< + CwiseBinaryOp< + internal::scalar_sum_op::Scalar>, + const OtherXpr, const Product>, + DenseShape> { + static const bool value = true; +}; + +template +struct evaluator_assume_aliasing< + CwiseBinaryOp< + internal::scalar_difference_op::Scalar>, + const OtherXpr, const Product>, + DenseShape> { + static const bool value = true; +}; + +template +struct assignment_from_xpr_op_product { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType& dst, const SrcXprType& src, + const InitialFunc& /*func*/) { + call_assignment_no_alias(dst, src.lhs(), Func1()); + call_assignment_no_alias(dst, src.rhs(), Func2()); + } +}; + +#define EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(ASSIGN_OP, BINOP, ASSIGN_OP2) \ + template \ + struct Assignment, const OtherXpr, \ + const Product>, \ + internal::ASSIGN_OP, Dense2Dense> \ + : assignment_from_xpr_op_product, \ + internal::ASSIGN_OP, \ + internal::ASSIGN_OP2> {} + +EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(assign_op, scalar_sum_op, add_assign_op); +EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(add_assign_op, scalar_sum_op, add_assign_op); +EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(sub_assign_op, scalar_sum_op, sub_assign_op); + +EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(assign_op, scalar_difference_op, sub_assign_op); +EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(add_assign_op, scalar_difference_op, sub_assign_op); +EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(sub_assign_op, scalar_difference_op, add_assign_op); + +//---------------------------------------- + +template +struct generic_product_impl { + using impl = default_inner_product_impl; + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { + dst.coeffRef(0, 0) = impl::run(lhs, rhs); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { + dst.coeffRef(0, 0) += impl::run(lhs, rhs); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { + dst.coeffRef(0, 0) -= impl::run(lhs, rhs); + } +}; + +/*********************************************************************** + * Implementation of outer dense * dense vector product + ***********************************************************************/ + +// Column major result +template +void EIGEN_DEVICE_FUNC outer_product_selector_run(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Func& func, + const false_type&) { + evaluator rhsEval(rhs); + ei_declare_local_nested_eval(Lhs, lhs, Rhs::SizeAtCompileTime, actual_lhs); + // FIXME if cols is large enough, then it might be useful to make sure that lhs is sequentially stored + // FIXME not very good if rhs is real and lhs complex while alpha is real too + const Index cols = dst.cols(); + for (Index j = 0; j < cols; ++j) func(dst.col(j), rhsEval.coeff(Index(0), j) * actual_lhs); +} + +// Row major result +template +void EIGEN_DEVICE_FUNC outer_product_selector_run(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Func& func, + const true_type&) { + evaluator lhsEval(lhs); + ei_declare_local_nested_eval(Rhs, rhs, Lhs::SizeAtCompileTime, actual_rhs); + // FIXME if rows is large enough, then it might be useful to make sure that rhs is sequentially stored + // FIXME not very good if lhs is real and rhs complex while alpha is real too + const Index rows = dst.rows(); + for (Index i = 0; i < rows; ++i) func(dst.row(i), lhsEval.coeff(i, Index(0)) * actual_rhs); +} + +template +struct generic_product_impl { + template + struct is_row_major : std::conditional_t<(int(T::Flags) & RowMajorBit), internal::true_type, internal::false_type> {}; + typedef typename Product::Scalar Scalar; + + // TODO it would be nice to be able to exploit our *_assign_op functors for that purpose + struct set { + template + EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { + dst.const_cast_derived() = src; + } + }; + struct add { + template + EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { + dst.const_cast_derived() += src; + } + }; + struct sub { + template + EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { + dst.const_cast_derived() -= src; + } + }; + struct adds { + Scalar m_scale; + explicit adds(const Scalar& s) : m_scale(s) {} + template + void EIGEN_DEVICE_FUNC operator()(const Dst& dst, const Src& src) const { + dst.const_cast_derived() += m_scale * src; + } + }; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { + internal::outer_product_selector_run(dst, lhs, rhs, set(), is_row_major()); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { + internal::outer_product_selector_run(dst, lhs, rhs, add(), is_row_major()); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { + internal::outer_product_selector_run(dst, lhs, rhs, sub(), is_row_major()); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, + const Scalar& alpha) { + internal::outer_product_selector_run(dst, lhs, rhs, adds(alpha), is_row_major()); + } +}; + +// This base class provides default implementations for evalTo, addTo, subTo, in terms of scaleAndAddTo +template +struct generic_product_impl_base { + typedef typename Product::Scalar Scalar; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { + dst.setZero(); + scaleAndAddTo(dst, lhs, rhs, Scalar(1)); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { + scaleAndAddTo(dst, lhs, rhs, Scalar(1)); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { + scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, + const Scalar& alpha) { + Derived::scaleAndAddTo(dst, lhs, rhs, alpha); + } +}; + +template +struct generic_product_impl + : generic_product_impl_base> { + typedef typename nested_eval::type LhsNested; + typedef typename nested_eval::type RhsNested; + typedef typename Product::Scalar Scalar; + enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight }; + typedef internal::remove_all_t> MatrixType; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, + const Scalar& alpha) { + // Fallback to inner product if both the lhs and rhs is a runtime vector. + if (lhs.rows() == 1 && rhs.cols() == 1) { + dst.coeffRef(0, 0) += alpha * lhs.row(0).conjugate().dot(rhs.col(0)); + return; + } + LhsNested actual_lhs(lhs); + RhsNested actual_rhs(rhs); + internal::gemv_dense_selector::HasUsableDirectAccess)>::run(actual_lhs, + actual_rhs, dst, + alpha); + } +}; + +template +struct generic_product_impl { + typedef typename Product::Scalar Scalar; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { + // Same as: dst.noalias() = lhs.lazyProduct(rhs); + // but easier on the compiler side + call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::assign_op()); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { + // dst.noalias() += lhs.lazyProduct(rhs); + call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::add_assign_op()); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { + // dst.noalias() -= lhs.lazyProduct(rhs); + call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::sub_assign_op()); + } + + // This is a special evaluation path called from generic_product_impl<...,GemmProduct> in file GeneralMatrixMatrix.h + // This variant tries to extract scalar multiples from both the LHS and RHS and factor them out. For instance: + // dst {,+,-}= (s1*A)*(B*s2) + // will be rewritten as: + // dst {,+,-}= (s1*s2) * (A.lazyProduct(B)) + // There are at least four benefits of doing so: + // 1 - huge performance gain for heap-allocated matrix types as it save costly allocations. + // 2 - it is faster than simply by-passing the heap allocation through stack allocation. + // 3 - it makes this fallback consistent with the heavy GEMM routine. + // 4 - it fully by-passes huge stack allocation attempts when multiplying huge fixed-size matrices. + // (see https://stackoverflow.com/questions/54738495) + // For small fixed sizes matrices, however, the gains are less obvious, it is sometimes x2 faster, but sometimes x3 + // slower, and the behavior depends also a lot on the compiler... This is why this re-writing strategy is currently + // enabled only when falling back from the main GEMM. + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void eval_dynamic(Dst& dst, const Lhs& lhs, const Rhs& rhs, + const Func& func) { + enum { + HasScalarFactor = blas_traits::HasScalarFactor || blas_traits::HasScalarFactor, + ConjLhs = blas_traits::NeedToConjugate, + ConjRhs = blas_traits::NeedToConjugate + }; + // FIXME: in c++11 this should be auto, and extractScalarFactor should also return auto + // this is important for real*complex_mat + Scalar actualAlpha = combine_scalar_factors(lhs, rhs); + + eval_dynamic_impl(dst, blas_traits::extract(lhs).template conjugateIf(), + blas_traits::extract(rhs).template conjugateIf(), func, actualAlpha, + std::conditional_t()); + } + + protected: + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void eval_dynamic_impl(Dst& dst, const LhsT& lhs, const RhsT& rhs, + const Func& func, const Scalar& s /* == 1 */, + false_type) { + EIGEN_UNUSED_VARIABLE(s); + eigen_internal_assert(numext::is_exactly_one(s)); + call_restricted_packet_assignment_no_alias(dst, lhs.lazyProduct(rhs), func); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void eval_dynamic_impl(Dst& dst, const LhsT& lhs, const RhsT& rhs, + const Func& func, const Scalar& s, true_type) { + call_restricted_packet_assignment_no_alias(dst, s * lhs.lazyProduct(rhs), func); + } +}; + +// This specialization enforces the use of a coefficient-based evaluation strategy +template +struct generic_product_impl + : generic_product_impl {}; + +// Case 2: Evaluate coeff by coeff +// +// This is mostly taken from CoeffBasedProduct.h +// The main difference is that we add an extra argument to the etor_product_*_impl::run() function +// for the inner dimension of the product, because evaluator object do not know their size. + +template +struct etor_product_coeff_impl; + +template +struct etor_product_packet_impl; + +template +struct product_evaluator, ProductTag, DenseShape, DenseShape> + : evaluator_base> { + typedef Product XprType; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit product_evaluator(const XprType& xpr) + : m_lhs(xpr.lhs()), + m_rhs(xpr.rhs()), + m_lhsImpl(m_lhs), // FIXME the creation of the evaluator objects should result in a no-op, but check that! + m_rhsImpl(m_rhs), // Moreover, they are only useful for the packet path, so we could completely disable + // them when not needed, or perhaps declare them on the fly on the packet method... We + // have experiment to check what's best. + m_innerDim(xpr.lhs().cols()) { + EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits::MulCost); + EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits::AddCost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); +#if 0 + std::cerr << "LhsOuterStrideBytes= " << LhsOuterStrideBytes << "\n"; + std::cerr << "RhsOuterStrideBytes= " << RhsOuterStrideBytes << "\n"; + std::cerr << "LhsAlignment= " << LhsAlignment << "\n"; + std::cerr << "RhsAlignment= " << RhsAlignment << "\n"; + std::cerr << "CanVectorizeLhs= " << CanVectorizeLhs << "\n"; + std::cerr << "CanVectorizeRhs= " << CanVectorizeRhs << "\n"; + std::cerr << "CanVectorizeInner= " << CanVectorizeInner << "\n"; + std::cerr << "EvalToRowMajor= " << EvalToRowMajor << "\n"; + std::cerr << "Alignment= " << Alignment << "\n"; + std::cerr << "Flags= " << Flags << "\n"; +#endif + } + + // Everything below here is taken from CoeffBasedProduct.h + + typedef typename internal::nested_eval::type LhsNested; + typedef typename internal::nested_eval::type RhsNested; + + typedef internal::remove_all_t LhsNestedCleaned; + typedef internal::remove_all_t RhsNestedCleaned; + + typedef evaluator LhsEtorType; + typedef evaluator RhsEtorType; + + enum { + RowsAtCompileTime = LhsNestedCleaned::RowsAtCompileTime, + ColsAtCompileTime = RhsNestedCleaned::ColsAtCompileTime, + InnerSize = min_size_prefer_fixed(LhsNestedCleaned::ColsAtCompileTime, RhsNestedCleaned::RowsAtCompileTime), + MaxRowsAtCompileTime = LhsNestedCleaned::MaxRowsAtCompileTime, + MaxColsAtCompileTime = RhsNestedCleaned::MaxColsAtCompileTime + }; + + typedef typename find_best_packet::type LhsVecPacketType; + typedef typename find_best_packet::type RhsVecPacketType; + + enum { + + LhsCoeffReadCost = LhsEtorType::CoeffReadCost, + RhsCoeffReadCost = RhsEtorType::CoeffReadCost, + CoeffReadCost = InnerSize == 0 ? NumTraits::ReadCost + : InnerSize == Dynamic + ? HugeCost + : InnerSize * (NumTraits::MulCost + int(LhsCoeffReadCost) + int(RhsCoeffReadCost)) + + (InnerSize - 1) * NumTraits::AddCost, + + Unroll = CoeffReadCost <= EIGEN_UNROLLING_LIMIT, + + LhsFlags = LhsEtorType::Flags, + RhsFlags = RhsEtorType::Flags, + + LhsRowMajor = LhsFlags & RowMajorBit, + RhsRowMajor = RhsFlags & RowMajorBit, + + LhsVecPacketSize = unpacket_traits::size, + RhsVecPacketSize = unpacket_traits::size, + + // Here, we don't care about alignment larger than the usable packet size. + LhsAlignment = + plain_enum_min(LhsEtorType::Alignment, LhsVecPacketSize* int(sizeof(typename LhsNestedCleaned::Scalar))), + RhsAlignment = + plain_enum_min(RhsEtorType::Alignment, RhsVecPacketSize* int(sizeof(typename RhsNestedCleaned::Scalar))), + + SameType = is_same::value, + + CanVectorizeRhs = bool(RhsRowMajor) && (RhsFlags & PacketAccessBit) && (ColsAtCompileTime != 1), + CanVectorizeLhs = (!LhsRowMajor) && (LhsFlags & PacketAccessBit) && (RowsAtCompileTime != 1), + + EvalToRowMajor = (MaxRowsAtCompileTime == 1 && MaxColsAtCompileTime != 1) ? 1 + : (MaxColsAtCompileTime == 1 && MaxRowsAtCompileTime != 1) + ? 0 + : (bool(RhsRowMajor) && !CanVectorizeLhs), + + Flags = ((int(LhsFlags) | int(RhsFlags)) & HereditaryBits & ~RowMajorBit) | + (EvalToRowMajor ? RowMajorBit : 0) + // TODO enable vectorization for mixed types + | (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0) | + (XprType::IsVectorAtCompileTime ? LinearAccessBit : 0), + + LhsOuterStrideBytes = + int(LhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename LhsNestedCleaned::Scalar)), + RhsOuterStrideBytes = + int(RhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename RhsNestedCleaned::Scalar)), + + Alignment = bool(CanVectorizeLhs) + ? (LhsOuterStrideBytes <= 0 || (int(LhsOuterStrideBytes) % plain_enum_max(1, LhsAlignment)) != 0 + ? 0 + : LhsAlignment) + : bool(CanVectorizeRhs) + ? (RhsOuterStrideBytes <= 0 || (int(RhsOuterStrideBytes) % plain_enum_max(1, RhsAlignment)) != 0 + ? 0 + : RhsAlignment) + : 0, + + /* CanVectorizeInner deserves special explanation. It does not affect the product flags. It is not used outside + * of Product. If the Product itself is not a packet-access expression, there is still a chance that the inner + * loop of the product might be vectorized. This is the meaning of CanVectorizeInner. Since it doesn't affect + * the Flags, it is safe to make this value depend on ActualPacketAccessBit, that doesn't affect the ABI. + */ + CanVectorizeInner = SameType && LhsRowMajor && (!RhsRowMajor) && + (int(LhsFlags) & int(RhsFlags) & ActualPacketAccessBit) && + (int(InnerSize) % packet_traits::size == 0) + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const { + return (m_lhs.row(row).transpose().cwiseProduct(m_rhs.col(col))).sum(); + } + + /* Allow index-based non-packet access. It is impossible though to allow index-based packed access, + * which is why we don't set the LinearAccessBit. + * TODO: this seems possible when the result is a vector + */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index index) const { + const Index row = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime == 1) ? 0 : index; + const Index col = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime == 1) ? index : 0; + return (m_lhs.row(row).transpose().cwiseProduct(m_rhs.col(col))).sum(); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const PacketType packet(Index row, Index col) const { + PacketType res; + typedef etor_product_packet_impl + PacketImpl; + PacketImpl::run(row, col, m_lhsImpl, m_rhsImpl, m_innerDim, res); + return res; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const PacketType packet(Index index) const { + const Index row = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime == 1) ? 0 : index; + const Index col = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime == 1) ? index : 0; + return packet(row, col); + } + + protected: + add_const_on_value_type_t m_lhs; + add_const_on_value_type_t m_rhs; + + LhsEtorType m_lhsImpl; + RhsEtorType m_rhsImpl; + + // TODO: Get rid of m_innerDim if known at compile time + Index m_innerDim; +}; + +template +struct product_evaluator, LazyCoeffBasedProductMode, DenseShape, DenseShape> + : product_evaluator, CoeffBasedProductMode, DenseShape, DenseShape> { + typedef Product XprType; + typedef Product BaseProduct; + typedef product_evaluator Base; + enum { Flags = Base::Flags | EvalBeforeNestingBit }; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit product_evaluator(const XprType& xpr) + : Base(BaseProduct(xpr.lhs(), xpr.rhs())) {} +}; + +/**************************************** +*** Coeff based product, Packet path *** +****************************************/ + +template +struct etor_product_packet_impl { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, + Index innerDim, Packet& res) { + etor_product_packet_impl::run(row, col, lhs, rhs, + innerDim, res); + res = pmadd(pset1(lhs.coeff(row, Index(UnrollingIndex - 1))), + rhs.template packet(Index(UnrollingIndex - 1), col), res); + } +}; + +template +struct etor_product_packet_impl { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, + Index innerDim, Packet& res) { + etor_product_packet_impl::run(row, col, lhs, rhs, + innerDim, res); + res = pmadd(lhs.template packet(row, Index(UnrollingIndex - 1)), + pset1(rhs.coeff(Index(UnrollingIndex - 1), col)), res); + } +}; + +template +struct etor_product_packet_impl { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, + Index /*innerDim*/, Packet& res) { + res = pmul(pset1(lhs.coeff(row, Index(0))), rhs.template packet(Index(0), col)); + } +}; + +template +struct etor_product_packet_impl { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, + Index /*innerDim*/, Packet& res) { + res = pmul(lhs.template packet(row, Index(0)), pset1(rhs.coeff(Index(0), col))); + } +}; + +template +struct etor_product_packet_impl { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, + const Rhs& /*rhs*/, Index /*innerDim*/, Packet& res) { + res = pset1(typename unpacket_traits::type(0)); + } +}; + +template +struct etor_product_packet_impl { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, + const Rhs& /*rhs*/, Index /*innerDim*/, Packet& res) { + res = pset1(typename unpacket_traits::type(0)); + } +}; + +template +struct etor_product_packet_impl { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, + Index innerDim, Packet& res) { + res = pset1(typename unpacket_traits::type(0)); + for (Index i = 0; i < innerDim; ++i) + res = pmadd(pset1(lhs.coeff(row, i)), rhs.template packet(i, col), res); + } +}; + +template +struct etor_product_packet_impl { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, + Index innerDim, Packet& res) { + res = pset1(typename unpacket_traits::type(0)); + for (Index i = 0; i < innerDim; ++i) + res = pmadd(lhs.template packet(row, i), pset1(rhs.coeff(i, col)), res); + } +}; + +/*************************************************************************** + * Triangular products + ***************************************************************************/ +template +struct triangular_product_impl; + +template +struct generic_product_impl + : generic_product_impl_base> { + typedef typename Product::Scalar Scalar; + + template + static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { + triangular_product_impl::run( + dst, lhs.nestedExpression(), rhs, alpha); + } +}; + +template +struct generic_product_impl + : generic_product_impl_base> { + typedef typename Product::Scalar Scalar; + + template + static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { + triangular_product_impl::run( + dst, lhs, rhs.nestedExpression(), alpha); + } +}; + +/*************************************************************************** + * SelfAdjoint products + ***************************************************************************/ +template +struct selfadjoint_product_impl; + +template +struct generic_product_impl + : generic_product_impl_base> { + typedef typename Product::Scalar Scalar; + + template + static EIGEN_DEVICE_FUNC void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { + selfadjoint_product_impl::run( + dst, lhs.nestedExpression(), rhs, alpha); + } +}; + +template +struct generic_product_impl + : generic_product_impl_base> { + typedef typename Product::Scalar Scalar; + + template + static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { + selfadjoint_product_impl::run( + dst, lhs, rhs.nestedExpression(), alpha); + } +}; + +/*************************************************************************** + * Diagonal products + ***************************************************************************/ + +template +struct diagonal_product_evaluator_base : evaluator_base { + typedef typename ScalarBinaryOpTraits::ReturnType Scalar; + + public: + enum { + CoeffReadCost = int(NumTraits::MulCost) + int(evaluator::CoeffReadCost) + + int(evaluator::CoeffReadCost), + + MatrixFlags = evaluator::Flags, + DiagFlags = evaluator::Flags, + + StorageOrder_ = (Derived::MaxRowsAtCompileTime == 1 && Derived::MaxColsAtCompileTime != 1) ? RowMajor + : (Derived::MaxColsAtCompileTime == 1 && Derived::MaxRowsAtCompileTime != 1) ? ColMajor + : MatrixFlags & RowMajorBit ? RowMajor + : ColMajor, + SameStorageOrder_ = int(StorageOrder_) == ((MatrixFlags & RowMajorBit) ? RowMajor : ColMajor), + + ScalarAccessOnDiag_ = !((int(StorageOrder_) == ColMajor && int(ProductOrder) == OnTheLeft) || + (int(StorageOrder_) == RowMajor && int(ProductOrder) == OnTheRight)), + SameTypes_ = is_same::value, + // FIXME currently we need same types, but in the future the next rule should be the one + // Vectorizable_ = bool(int(MatrixFlags)&PacketAccessBit) && ((!_PacketOnDiag) || (SameTypes_ && + // bool(int(DiagFlags)&PacketAccessBit))), + Vectorizable_ = bool(int(MatrixFlags) & PacketAccessBit) && SameTypes_ && + (SameStorageOrder_ || (MatrixFlags & LinearAccessBit) == LinearAccessBit) && + (ScalarAccessOnDiag_ || (bool(int(DiagFlags) & PacketAccessBit))), + LinearAccessMask_ = + (MatrixType::RowsAtCompileTime == 1 || MatrixType::ColsAtCompileTime == 1) ? LinearAccessBit : 0, + Flags = + ((HereditaryBits | LinearAccessMask_) & (unsigned int)(MatrixFlags)) | (Vectorizable_ ? PacketAccessBit : 0), + Alignment = evaluator::Alignment, + + AsScalarProduct = + (DiagonalType::SizeAtCompileTime == 1) || + (DiagonalType::SizeAtCompileTime == Dynamic && MatrixType::RowsAtCompileTime == 1 && + ProductOrder == OnTheLeft) || + (DiagonalType::SizeAtCompileTime == Dynamic && MatrixType::ColsAtCompileTime == 1 && ProductOrder == OnTheRight) + }; + + EIGEN_DEVICE_FUNC diagonal_product_evaluator_base(const MatrixType& mat, const DiagonalType& diag) + : m_diagImpl(diag), m_matImpl(mat) { + EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits::MulCost); + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index idx) const { + if (AsScalarProduct) + return m_diagImpl.coeff(0) * m_matImpl.coeff(idx); + else + return m_diagImpl.coeff(idx) * m_matImpl.coeff(idx); + } + + protected: + template + EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::true_type) const { + return internal::pmul(m_matImpl.template packet(row, col), + internal::pset1(m_diagImpl.coeff(id))); + } + + template + EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::false_type) const { + enum { + InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime, + DiagonalPacketLoadMode = plain_enum_min( + LoadMode, + ((InnerSize % 16) == 0) ? int(Aligned16) : int(evaluator::Alignment)) // FIXME hardcoded 16!! + }; + return internal::pmul(m_matImpl.template packet(row, col), + m_diagImpl.template packet(id)); + } + + evaluator m_diagImpl; + evaluator m_matImpl; +}; + +// diagonal * dense +template +struct product_evaluator, ProductTag, DiagonalShape, DenseShape> + : diagonal_product_evaluator_base, + OnTheLeft> { + typedef diagonal_product_evaluator_base, + OnTheLeft> + Base; + using Base::coeff; + using Base::m_diagImpl; + using Base::m_matImpl; + typedef typename Base::Scalar Scalar; + + typedef Product XprType; + typedef typename XprType::PlainObject PlainObject; + typedef typename Lhs::DiagonalVectorType DiagonalType; + + enum { StorageOrder = Base::StorageOrder_ }; + + EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr) : Base(xpr.rhs(), xpr.lhs().diagonal()) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const { + return m_diagImpl.coeff(row) * m_matImpl.coeff(row, col); + } + +#ifndef EIGEN_GPUCC + template + EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { + // FIXME: NVCC used to complain about the template keyword, but we have to check whether this is still the case. + // See also similar calls below. + return this->template packet_impl( + row, col, row, std::conditional_t()); + } + + template + EIGEN_STRONG_INLINE PacketType packet(Index idx) const { + return packet(int(StorageOrder) == ColMajor ? idx : 0, + int(StorageOrder) == ColMajor ? 0 : idx); + } +#endif +}; + +// dense * diagonal +template +struct product_evaluator, ProductTag, DenseShape, DiagonalShape> + : diagonal_product_evaluator_base, + OnTheRight> { + typedef diagonal_product_evaluator_base, + OnTheRight> + Base; + using Base::coeff; + using Base::m_diagImpl; + using Base::m_matImpl; + typedef typename Base::Scalar Scalar; + + typedef Product XprType; + typedef typename XprType::PlainObject PlainObject; + + enum { StorageOrder = Base::StorageOrder_ }; + + EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs().diagonal()) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const { + return m_matImpl.coeff(row, col) * m_diagImpl.coeff(col); + } + +#ifndef EIGEN_GPUCC + template + EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { + return this->template packet_impl( + row, col, col, std::conditional_t()); + } + + template + EIGEN_STRONG_INLINE PacketType packet(Index idx) const { + return packet(int(StorageOrder) == ColMajor ? idx : 0, + int(StorageOrder) == ColMajor ? 0 : idx); + } +#endif +}; + +/*************************************************************************** + * Products with permutation matrices + ***************************************************************************/ + +/** \internal + * \class permutation_matrix_product + * Internal helper class implementing the product between a permutation matrix and a matrix. + * This class is specialized for DenseShape below and for SparseShape in SparseCore/SparsePermutation.h + */ +template +struct permutation_matrix_product; + +template +struct permutation_matrix_product { + typedef typename nested_eval::type MatrixType; + typedef remove_all_t MatrixTypeCleaned; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Dest& dst, const PermutationType& perm, + const ExpressionType& xpr) { + MatrixType mat(xpr); + const Index n = Side == OnTheLeft ? mat.rows() : mat.cols(); + // FIXME we need an is_same for expression that is not sensitive to constness. For instance + // is_same_xpr, Block >::value should be true. + // if(is_same::value && extract_data(dst) == extract_data(mat)) + if (is_same_dense(dst, mat)) { + // apply the permutation inplace + Matrix mask(perm.size()); + mask.fill(false); + Index r = 0; + while (r < perm.size()) { + // search for the next seed + while (r < perm.size() && mask[r]) r++; + if (r >= perm.size()) break; + // we got one, let's follow it until we are back to the seed + Index k0 = r++; + Index kPrev = k0; + mask.coeffRef(k0) = true; + for (Index k = perm.indices().coeff(k0); k != k0; k = perm.indices().coeff(k)) { + Block(dst, k) + .swap(Block < Dest, Side == OnTheLeft ? 1 : Dest::RowsAtCompileTime, + Side == OnTheRight + ? 1 + : Dest::ColsAtCompileTime > (dst, ((Side == OnTheLeft) ^ Transposed) ? k0 : kPrev)); + + mask.coeffRef(k) = true; + kPrev = k; + } + } + } else { + for (Index i = 0; i < n; ++i) { + Block( + dst, ((Side == OnTheLeft) ^ Transposed) ? perm.indices().coeff(i) : i) + + = + + Block < const MatrixTypeCleaned, + Side == OnTheLeft ? 1 : MatrixTypeCleaned::RowsAtCompileTime, + Side == OnTheRight ? 1 + : MatrixTypeCleaned::ColsAtCompileTime > + (mat, ((Side == OnTheRight) ^ Transposed) ? perm.indices().coeff(i) : i); + } + } + } +}; + +template +struct generic_product_impl { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { + permutation_matrix_product::run(dst, lhs, rhs); + } +}; + +template +struct generic_product_impl { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { + permutation_matrix_product::run(dst, rhs, lhs); + } +}; + +template +struct generic_product_impl, Rhs, PermutationShape, MatrixShape, ProductTag> { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Inverse& lhs, const Rhs& rhs) { + permutation_matrix_product::run(dst, lhs.nestedExpression(), rhs); + } +}; + +template +struct generic_product_impl, MatrixShape, PermutationShape, ProductTag> { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Inverse& rhs) { + permutation_matrix_product::run(dst, rhs.nestedExpression(), lhs); + } +}; + +/*************************************************************************** + * Products with transpositions matrices + ***************************************************************************/ + +// FIXME could we unify Transpositions and Permutation into a single "shape"?? + +/** \internal + * \class transposition_matrix_product + * Internal helper class implementing the product between a permutation matrix and a matrix. + */ +template +struct transposition_matrix_product { + typedef typename nested_eval::type MatrixType; + typedef remove_all_t MatrixTypeCleaned; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Dest& dst, const TranspositionType& tr, + const ExpressionType& xpr) { + MatrixType mat(xpr); + typedef typename TranspositionType::StorageIndex StorageIndex; + const Index size = tr.size(); + StorageIndex j = 0; + + if (!is_same_dense(dst, mat)) dst = mat; + + for (Index k = (Transposed ? size - 1 : 0); Transposed ? k >= 0 : k < size; Transposed ? --k : ++k) + if (Index(j = tr.coeff(k)) != k) { + if (Side == OnTheLeft) + dst.row(k).swap(dst.row(j)); + else if (Side == OnTheRight) + dst.col(k).swap(dst.col(j)); + } + } +}; + +template +struct generic_product_impl { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { + transposition_matrix_product::run(dst, lhs, rhs); + } +}; + +template +struct generic_product_impl { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { + transposition_matrix_product::run(dst, rhs, lhs); + } +}; + +template +struct generic_product_impl, Rhs, TranspositionsShape, MatrixShape, ProductTag> { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Transpose& lhs, const Rhs& rhs) { + transposition_matrix_product::run(dst, lhs.nestedExpression(), rhs); + } +}; + +template +struct generic_product_impl, MatrixShape, TranspositionsShape, ProductTag> { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Transpose& rhs) { + transposition_matrix_product::run(dst, rhs.nestedExpression(), lhs); + } +}; + +/*************************************************************************** + * skew symmetric products + * for now we just call the generic implementation + ***************************************************************************/ +template +struct generic_product_impl { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { + generic_product_impl::evalTo(dst, lhs, + rhs); + } +}; + +template +struct generic_product_impl { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { + generic_product_impl::evalTo(dst, lhs, + rhs); + } +}; + +template +struct generic_product_impl { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { + generic_product_impl::evalTo(dst, lhs, rhs); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PRODUCT_EVALUATORS_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Random.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Random.h new file mode 100644 index 0000000000000000000000000000000000000000..f8a543562500df7137bcc6fc3d82ebe4eebf9873 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Random.h @@ -0,0 +1,207 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_RANDOM_H +#define EIGEN_RANDOM_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +template +struct scalar_random_op { + inline const Scalar operator()() const { return random(); } +}; + +template +struct functor_traits > { + enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false, IsRepeatable = false }; +}; + +} // end namespace internal + +/** \returns a random matrix expression + * + * Numbers are uniformly spread through their whole definition range for integer types, + * and in the [-1:1] range for floating point scalar types. + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * \not_reentrant + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Random() should be used + * instead. + * + * + * Example: \include MatrixBase_random_int_int.cpp + * Output: \verbinclude MatrixBase_random_int_int.out + * + * This expression has the "evaluate before nesting" flag so that it will be evaluated into + * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected + * behavior with expressions involving random matrices. + * + * See DenseBase::NullaryExpr(Index, const CustomNullaryOp&) for an example using C++11 random generators. + * + * \sa DenseBase::setRandom(), DenseBase::Random(Index), DenseBase::Random() + */ +template +inline const typename DenseBase::RandomReturnType DenseBase::Random(Index rows, Index cols) { + return NullaryExpr(rows, cols, internal::scalar_random_op()); +} + +/** \returns a random vector expression + * + * Numbers are uniformly spread through their whole definition range for integer types, + * and in the [-1:1] range for floating point scalar types. + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * \not_reentrant + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Random() should be used + * instead. + * + * Example: \include MatrixBase_random_int.cpp + * Output: \verbinclude MatrixBase_random_int.out + * + * This expression has the "evaluate before nesting" flag so that it will be evaluated into + * a temporary vector whenever it is nested in a larger expression. This prevents unexpected + * behavior with expressions involving random matrices. + * + * \sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random() + */ +template +inline const typename DenseBase::RandomReturnType DenseBase::Random(Index size) { + return NullaryExpr(size, internal::scalar_random_op()); +} + +/** \returns a fixed-size random matrix or vector expression + * + * Numbers are uniformly spread through their whole definition range for integer types, + * and in the [-1:1] range for floating point scalar types. + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * Example: \include MatrixBase_random.cpp + * Output: \verbinclude MatrixBase_random.out + * + * This expression has the "evaluate before nesting" flag so that it will be evaluated into + * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected + * behavior with expressions involving random matrices. + * + * \not_reentrant + * + * \sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random(Index) + */ +template +inline const typename DenseBase::RandomReturnType DenseBase::Random() { + return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_random_op()); +} + +/** Sets all coefficients in this expression to random values. + * + * Numbers are uniformly spread through their whole definition range for integer types, + * and in the [-1:1] range for floating point scalar types. + * + * \not_reentrant + * + * Example: \include MatrixBase_setRandom.cpp + * Output: \verbinclude MatrixBase_setRandom.out + * + * \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index) + */ +template +EIGEN_DEVICE_FUNC inline Derived& DenseBase::setRandom() { + return *this = Random(rows(), cols()); +} + +/** Resizes to the given \a newSize, and sets all coefficients in this expression to random values. + * + * Numbers are uniformly spread through their whole definition range for integer types, + * and in the [-1:1] range for floating point scalar types. + * + * \only_for_vectors + * \not_reentrant + * + * Example: \include Matrix_setRandom_int.cpp + * Output: \verbinclude Matrix_setRandom_int.out + * + * \sa DenseBase::setRandom(), setRandom(Index,Index), class CwiseNullaryOp, DenseBase::Random() + */ +template +EIGEN_STRONG_INLINE Derived& PlainObjectBase::setRandom(Index newSize) { + resize(newSize); + return setRandom(); +} + +/** Resizes to the given size, and sets all coefficients in this expression to random values. + * + * Numbers are uniformly spread through their whole definition range for integer types, + * and in the [-1:1] range for floating point scalar types. + * + * \not_reentrant + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setRandom_int_int.cpp + * Output: \verbinclude Matrix_setRandom_int_int.out + * + * \sa DenseBase::setRandom(), setRandom(Index), class CwiseNullaryOp, DenseBase::Random() + */ +template +EIGEN_STRONG_INLINE Derived& PlainObjectBase::setRandom(Index rows, Index cols) { + resize(rows, cols); + return setRandom(); +} + +/** Resizes to the given size, changing only the number of columns, and sets all + * coefficients in this expression to random values. For the parameter of type + * NoChange_t, just pass the special value \c NoChange. + * + * Numbers are uniformly spread through their whole definition range for integer types, + * and in the [-1:1] range for floating point scalar types. + * + * \not_reentrant + * + * \sa DenseBase::setRandom(), setRandom(Index), setRandom(Index, NoChange_t), class CwiseNullaryOp, DenseBase::Random() + */ +template +EIGEN_STRONG_INLINE Derived& PlainObjectBase::setRandom(NoChange_t, Index cols) { + return setRandom(rows(), cols); +} + +/** Resizes to the given size, changing only the number of rows, and sets all + * coefficients in this expression to random values. For the parameter of type + * NoChange_t, just pass the special value \c NoChange. + * + * Numbers are uniformly spread through their whole definition range for integer types, + * and in the [-1:1] range for floating point scalar types. + * + * \not_reentrant + * + * \sa DenseBase::setRandom(), setRandom(Index), setRandom(NoChange_t, Index), class CwiseNullaryOp, DenseBase::Random() + */ +template +EIGEN_STRONG_INLINE Derived& PlainObjectBase::setRandom(Index rows, NoChange_t) { + return setRandom(rows, cols()); +} + +} // end namespace Eigen + +#endif // EIGEN_RANDOM_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/RandomImpl.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/RandomImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..76e43f5dc5f655d04df0257e88470ab5ce985f09 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/RandomImpl.h @@ -0,0 +1,255 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2024 Charles Schlosser +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_RANDOM_IMPL_H +#define EIGEN_RANDOM_IMPL_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +/**************************************************************************** + * Implementation of random * + ****************************************************************************/ + +template +struct random_default_impl {}; + +template +struct random_impl : random_default_impl::IsComplex, NumTraits::IsInteger> {}; + +template +struct random_retval { + typedef Scalar type; +}; + +template +inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y) { + return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(x, y); +} + +template +inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random() { + return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(); +} + +// TODO: replace or provide alternatives to this, e.g. std::random_device +struct eigen_random_device { + using ReturnType = int; + static constexpr int Entropy = meta_floor_log2<(unsigned int)(RAND_MAX) + 1>::value; + static constexpr ReturnType Highest = RAND_MAX; + static EIGEN_DEVICE_FUNC inline ReturnType run() { return std::rand(); } +}; + +// Fill a built-in unsigned integer with numRandomBits beginning with the least significant bit +template +struct random_bits_impl { + EIGEN_STATIC_ASSERT(std::is_unsigned::value, SCALAR MUST BE A BUILT - IN UNSIGNED INTEGER) + using RandomDevice = eigen_random_device; + using RandomReturnType = typename RandomDevice::ReturnType; + static constexpr int kEntropy = RandomDevice::Entropy; + static constexpr int kTotalBits = sizeof(Scalar) * CHAR_BIT; + // return a Scalar filled with numRandomBits beginning from the least significant bit + static EIGEN_DEVICE_FUNC inline Scalar run(int numRandomBits) { + eigen_assert((numRandomBits >= 0) && (numRandomBits <= kTotalBits)); + const Scalar mask = Scalar(-1) >> ((kTotalBits - numRandomBits) & (kTotalBits - 1)); + Scalar randomBits = 0; + for (int shift = 0; shift < numRandomBits; shift += kEntropy) { + RandomReturnType r = RandomDevice::run(); + randomBits |= static_cast(r) << shift; + } + // clear the excess bits + randomBits &= mask; + return randomBits; + } +}; + +template +EIGEN_DEVICE_FUNC inline BitsType getRandomBits(int numRandomBits) { + return random_bits_impl::run(numRandomBits); +} + +// random implementation for a built-in floating point type +template ::value> +struct random_float_impl { + using BitsType = typename numext::get_integer_by_size::unsigned_type; + static constexpr EIGEN_DEVICE_FUNC inline int mantissaBits() { + const int digits = NumTraits::digits(); + return digits - 1; + } + static EIGEN_DEVICE_FUNC inline Scalar run(int numRandomBits) { + eigen_assert(numRandomBits >= 0 && numRandomBits <= mantissaBits()); + BitsType randomBits = getRandomBits(numRandomBits); + // if fewer than MantissaBits is requested, shift them to the left + randomBits <<= (mantissaBits() - numRandomBits); + // randomBits is in the half-open interval [2,4) + randomBits |= numext::bit_cast(Scalar(2)); + // result is in the half-open interval [-1,1) + Scalar result = numext::bit_cast(randomBits) - Scalar(3); + return result; + } +}; +// random implementation for a custom floating point type +// uses double as the implementation with a mantissa with a size equal to either the target scalar's mantissa or that of +// double, whichever is smaller +template +struct random_float_impl { + static EIGEN_DEVICE_FUNC inline int mantissaBits() { + const int digits = NumTraits::digits(); + constexpr int kDoubleDigits = NumTraits::digits(); + return numext::mini(digits, kDoubleDigits) - 1; + } + static EIGEN_DEVICE_FUNC inline Scalar run(int numRandomBits) { + eigen_assert(numRandomBits >= 0 && numRandomBits <= mantissaBits()); + Scalar result = static_cast(random_float_impl::run(numRandomBits)); + return result; + } +}; + +#if !EIGEN_COMP_NVCC +// random implementation for long double +// this specialization is not compatible with double-double scalars +template ::digits != (2 * std::numeric_limits::digits)))> +struct random_longdouble_impl { + static constexpr int Size = sizeof(long double); + static constexpr EIGEN_DEVICE_FUNC inline int mantissaBits() { return NumTraits::digits() - 1; } + static EIGEN_DEVICE_FUNC inline long double run(int numRandomBits) { + eigen_assert(numRandomBits >= 0 && numRandomBits <= mantissaBits()); + EIGEN_USING_STD(memcpy); + int numLowBits = numext::mini(numRandomBits, 64); + int numHighBits = numext::maxi(numRandomBits - 64, 0); + uint64_t randomBits[2]; + long double result = 2.0L; + memcpy(&randomBits, &result, Size); + randomBits[0] |= getRandomBits(numLowBits); + randomBits[1] |= getRandomBits(numHighBits); + memcpy(&result, &randomBits, Size); + result -= 3.0L; + return result; + } +}; +template <> +struct random_longdouble_impl { + static constexpr EIGEN_DEVICE_FUNC inline int mantissaBits() { return NumTraits::digits() - 1; } + static EIGEN_DEVICE_FUNC inline long double run(int numRandomBits) { + return static_cast(random_float_impl::run(numRandomBits)); + } +}; +template <> +struct random_float_impl : random_longdouble_impl<> {}; +#endif + +template +struct random_default_impl { + using Impl = random_float_impl; + static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y, int numRandomBits) { + Scalar half_x = Scalar(0.5) * x; + Scalar half_y = Scalar(0.5) * y; + Scalar result = (half_x + half_y) + (half_y - half_x) * run(numRandomBits); + // result is in the half-open interval [x, y) -- provided that x < y + return result; + } + static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y) { + return run(x, y, Impl::mantissaBits()); + } + static EIGEN_DEVICE_FUNC inline Scalar run(int numRandomBits) { return Impl::run(numRandomBits); } + static EIGEN_DEVICE_FUNC inline Scalar run() { return run(Impl::mantissaBits()); } +}; + +template ::IsSigned, bool BuiltIn = std::is_integral::value> +struct random_int_impl; + +// random implementation for a built-in unsigned integer type +template +struct random_int_impl { + static constexpr int kTotalBits = sizeof(Scalar) * CHAR_BIT; + static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y) { + if (y <= x) return x; + Scalar range = y - x; + // handle edge case where [x,y] spans the entire range of Scalar + if (range == NumTraits::highest()) return run(); + Scalar count = range + 1; + // calculate the number of random bits needed to fill range + int numRandomBits = log2_ceil(count); + Scalar randomBits; + do { + randomBits = getRandomBits(numRandomBits); + // if the random draw is outside [0, range), try again (rejection sampling) + // in the worst-case scenario, the probability of rejection is: 1/2 - 1/2^numRandomBits < 50% + } while (randomBits >= count); + Scalar result = x + randomBits; + return result; + } + static EIGEN_DEVICE_FUNC inline Scalar run() { return getRandomBits(kTotalBits); } +}; + +// random implementation for a built-in signed integer type +template +struct random_int_impl { + static constexpr int kTotalBits = sizeof(Scalar) * CHAR_BIT; + using BitsType = typename make_unsigned::type; + static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y) { + if (y <= x) return x; + // Avoid overflow by representing `range` as an unsigned type + BitsType range = static_cast(y) - static_cast(x); + BitsType randomBits = random_int_impl::run(0, range); + // Avoid overflow in the case where `x` is negative and there is a large range so + // `randomBits` would also be negative if cast to `Scalar` first. + Scalar result = static_cast(static_cast(x) + randomBits); + return result; + } + static EIGEN_DEVICE_FUNC inline Scalar run() { return static_cast(getRandomBits(kTotalBits)); } +}; + +// todo: custom integers +template +struct random_int_impl { + static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar&, const Scalar&) { return run(); } + static EIGEN_DEVICE_FUNC inline Scalar run() { + eigen_assert(std::false_type::value && "RANDOM FOR CUSTOM INTEGERS NOT YET SUPPORTED"); + return Scalar(0); + } +}; + +template +struct random_default_impl : random_int_impl {}; + +template <> +struct random_impl { + static EIGEN_DEVICE_FUNC inline bool run(const bool& x, const bool& y) { + if (y <= x) return x; + return run(); + } + static EIGEN_DEVICE_FUNC inline bool run() { return getRandomBits(1) ? true : false; } +}; + +template +struct random_default_impl { + typedef typename NumTraits::Real RealScalar; + using Impl = random_impl; + static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y, int numRandomBits) { + return Scalar(Impl::run(x.real(), y.real(), numRandomBits), Impl::run(x.imag(), y.imag(), numRandomBits)); + } + static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y) { + return Scalar(Impl::run(x.real(), y.real()), Impl::run(x.imag(), y.imag())); + } + static EIGEN_DEVICE_FUNC inline Scalar run(int numRandomBits) { + return Scalar(Impl::run(numRandomBits), Impl::run(numRandomBits)); + } + static EIGEN_DEVICE_FUNC inline Scalar run() { return Scalar(Impl::run(), Impl::run()); } +}; + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_RANDOM_IMPL_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Redux.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Redux.h new file mode 100644 index 0000000000000000000000000000000000000000..0c5f2d9f6b6859bb8cfa4c23e8ee96f3fb957df9 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Redux.h @@ -0,0 +1,528 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_REDUX_H +#define EIGEN_REDUX_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +// TODO +// * implement other kind of vectorization +// * factorize code + +/*************************************************************************** + * Part 1 : the logic deciding a strategy for vectorization and unrolling + ***************************************************************************/ + +template +struct redux_traits { + public: + typedef typename find_best_packet::type PacketType; + enum { + PacketSize = unpacket_traits::size, + InnerMaxSize = int(Evaluator::IsRowMajor) ? Evaluator::MaxColsAtCompileTime : Evaluator::MaxRowsAtCompileTime, + OuterMaxSize = int(Evaluator::IsRowMajor) ? Evaluator::MaxRowsAtCompileTime : Evaluator::MaxColsAtCompileTime, + SliceVectorizedWork = int(InnerMaxSize) == Dynamic ? Dynamic + : int(OuterMaxSize) == Dynamic ? (int(InnerMaxSize) >= int(PacketSize) ? Dynamic : 0) + : (int(InnerMaxSize) / int(PacketSize)) * int(OuterMaxSize) + }; + + enum { + MayLinearize = (int(Evaluator::Flags) & LinearAccessBit), + MightVectorize = (int(Evaluator::Flags) & ActualPacketAccessBit) && (functor_traits::PacketAccess), + MayLinearVectorize = bool(MightVectorize) && bool(MayLinearize), + MaySliceVectorize = bool(MightVectorize) && (int(SliceVectorizedWork) == Dynamic || int(SliceVectorizedWork) >= 3) + }; + + public: + enum { + Traversal = int(MayLinearVectorize) ? int(LinearVectorizedTraversal) + : int(MaySliceVectorize) ? int(SliceVectorizedTraversal) + : int(MayLinearize) ? int(LinearTraversal) + : int(DefaultTraversal) + }; + + public: + enum { + Cost = Evaluator::SizeAtCompileTime == Dynamic + ? HugeCost + : int(Evaluator::SizeAtCompileTime) * int(Evaluator::CoeffReadCost) + + (Evaluator::SizeAtCompileTime - 1) * functor_traits::Cost, + UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize)) + }; + + public: + enum { Unrolling = Cost <= UnrollingLimit ? CompleteUnrolling : NoUnrolling }; + +#ifdef EIGEN_DEBUG_ASSIGN + static void debug() { + std::cerr << "Xpr: " << typeid(typename Evaluator::XprType).name() << std::endl; + std::cerr.setf(std::ios::hex, std::ios::basefield); + EIGEN_DEBUG_VAR(Evaluator::Flags) + std::cerr.unsetf(std::ios::hex); + EIGEN_DEBUG_VAR(InnerMaxSize) + EIGEN_DEBUG_VAR(OuterMaxSize) + EIGEN_DEBUG_VAR(SliceVectorizedWork) + EIGEN_DEBUG_VAR(PacketSize) + EIGEN_DEBUG_VAR(MightVectorize) + EIGEN_DEBUG_VAR(MayLinearVectorize) + EIGEN_DEBUG_VAR(MaySliceVectorize) + std::cerr << "Traversal" + << " = " << Traversal << " (" << demangle_traversal(Traversal) << ")" << std::endl; + EIGEN_DEBUG_VAR(UnrollingLimit) + std::cerr << "Unrolling" + << " = " << Unrolling << " (" << demangle_unrolling(Unrolling) << ")" << std::endl; + std::cerr << std::endl; + } +#endif +}; + +/*************************************************************************** + * Part 2 : unrollers + ***************************************************************************/ + +/*** no vectorization ***/ + +template +struct redux_novec_unroller { + static constexpr Index HalfLength = Length / 2; + + typedef typename Evaluator::Scalar Scalar; + + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func) { + return func(redux_novec_unroller::run(eval, func), + redux_novec_unroller::run(eval, func)); + } +}; + +template +struct redux_novec_unroller { + static constexpr Index outer = Start / Evaluator::InnerSizeAtCompileTime; + static constexpr Index inner = Start % Evaluator::InnerSizeAtCompileTime; + + typedef typename Evaluator::Scalar Scalar; + + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func&) { + return eval.coeffByOuterInner(outer, inner); + } +}; + +// This is actually dead code and will never be called. It is required +// to prevent false warnings regarding failed inlining though +// for 0 length run() will never be called at all. +template +struct redux_novec_unroller { + typedef typename Evaluator::Scalar Scalar; + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator&, const Func&) { return Scalar(); } +}; + +template +struct redux_novec_linear_unroller { + static constexpr Index HalfLength = Length / 2; + + typedef typename Evaluator::Scalar Scalar; + + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func) { + return func(redux_novec_linear_unroller::run(eval, func), + redux_novec_linear_unroller::run(eval, func)); + } +}; + +template +struct redux_novec_linear_unroller { + typedef typename Evaluator::Scalar Scalar; + + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func&) { + return eval.coeff(Start); + } +}; + +// This is actually dead code and will never be called. It is required +// to prevent false warnings regarding failed inlining though +// for 0 length run() will never be called at all. +template +struct redux_novec_linear_unroller { + typedef typename Evaluator::Scalar Scalar; + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator&, const Func&) { return Scalar(); } +}; + +/*** vectorization ***/ + +template +struct redux_vec_unroller { + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func& func) { + constexpr Index HalfLength = Length / 2; + + return func.packetOp( + redux_vec_unroller::template run(eval, func), + redux_vec_unroller::template run(eval, + func)); + } +}; + +template +struct redux_vec_unroller { + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func&) { + constexpr Index PacketSize = unpacket_traits::size; + constexpr Index index = Start * PacketSize; + constexpr Index outer = index / int(Evaluator::InnerSizeAtCompileTime); + constexpr Index inner = index % int(Evaluator::InnerSizeAtCompileTime); + constexpr int alignment = Evaluator::Alignment; + + return eval.template packetByOuterInner(outer, inner); + } +}; + +template +struct redux_vec_linear_unroller { + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func& func) { + constexpr Index HalfLength = Length / 2; + + return func.packetOp( + redux_vec_linear_unroller::template run(eval, func), + redux_vec_linear_unroller::template run( + eval, func)); + } +}; + +template +struct redux_vec_linear_unroller { + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func&) { + constexpr Index PacketSize = unpacket_traits::size; + constexpr Index index = (Start * PacketSize); + constexpr int alignment = Evaluator::Alignment; + return eval.template packet(index); + } +}; + +/*************************************************************************** + * Part 3 : implementation of all cases + ***************************************************************************/ + +template ::Traversal, + int Unrolling = redux_traits::Unrolling> +struct redux_impl; + +template +struct redux_impl { + typedef typename Evaluator::Scalar Scalar; + + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) { + eigen_assert(xpr.rows() > 0 && xpr.cols() > 0 && "you are using an empty matrix"); + Scalar res = eval.coeffByOuterInner(0, 0); + for (Index i = 1; i < xpr.innerSize(); ++i) res = func(res, eval.coeffByOuterInner(0, i)); + for (Index i = 1; i < xpr.outerSize(); ++i) + for (Index j = 0; j < xpr.innerSize(); ++j) res = func(res, eval.coeffByOuterInner(i, j)); + return res; + } +}; + +template +struct redux_impl { + typedef typename Evaluator::Scalar Scalar; + + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) { + eigen_assert(xpr.size() > 0 && "you are using an empty matrix"); + Scalar res = eval.coeff(0); + for (Index k = 1; k < xpr.size(); ++k) res = func(res, eval.coeff(k)); + return res; + } +}; + +template +struct redux_impl + : redux_novec_unroller { + typedef redux_novec_unroller Base; + typedef typename Evaluator::Scalar Scalar; + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func, + const XprType& /*xpr*/) { + return Base::run(eval, func); + } +}; + +template +struct redux_impl + : redux_novec_linear_unroller { + typedef redux_novec_linear_unroller Base; + typedef typename Evaluator::Scalar Scalar; + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func, + const XprType& /*xpr*/) { + return Base::run(eval, func); + } +}; + +template +struct redux_impl { + typedef typename Evaluator::Scalar Scalar; + typedef typename redux_traits::PacketType PacketScalar; + + template + static Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) { + const Index size = xpr.size(); + + constexpr Index packetSize = redux_traits::PacketSize; + constexpr int packetAlignment = unpacket_traits::alignment; + constexpr int alignment0 = + (bool(Evaluator::Flags & DirectAccessBit) && bool(packet_traits::AlignedOnScalar)) + ? int(packetAlignment) + : int(Unaligned); + constexpr int alignment = plain_enum_max(alignment0, Evaluator::Alignment); + const Index alignedStart = internal::first_default_aligned(xpr); + const Index alignedSize2 = ((size - alignedStart) / (2 * packetSize)) * (2 * packetSize); + const Index alignedSize = ((size - alignedStart) / (packetSize)) * (packetSize); + const Index alignedEnd2 = alignedStart + alignedSize2; + const Index alignedEnd = alignedStart + alignedSize; + Scalar res; + if (alignedSize) { + PacketScalar packet_res0 = eval.template packet(alignedStart); + if (alignedSize > packetSize) // we have at least two packets to partly unroll the loop + { + PacketScalar packet_res1 = eval.template packet(alignedStart + packetSize); + for (Index index = alignedStart + 2 * packetSize; index < alignedEnd2; index += 2 * packetSize) { + packet_res0 = func.packetOp(packet_res0, eval.template packet(index)); + packet_res1 = func.packetOp(packet_res1, eval.template packet(index + packetSize)); + } + + packet_res0 = func.packetOp(packet_res0, packet_res1); + if (alignedEnd > alignedEnd2) + packet_res0 = func.packetOp(packet_res0, eval.template packet(alignedEnd2)); + } + res = func.predux(packet_res0); + + for (Index index = 0; index < alignedStart; ++index) res = func(res, eval.coeff(index)); + + for (Index index = alignedEnd; index < size; ++index) res = func(res, eval.coeff(index)); + } else // too small to vectorize anything. + // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize. + { + res = eval.coeff(0); + for (Index index = 1; index < size; ++index) res = func(res, eval.coeff(index)); + } + + return res; + } +}; + +// NOTE: for SliceVectorizedTraversal we simply bypass unrolling +template +struct redux_impl { + typedef typename Evaluator::Scalar Scalar; + typedef typename redux_traits::PacketType PacketType; + + template + EIGEN_DEVICE_FUNC static Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) { + eigen_assert(xpr.rows() > 0 && xpr.cols() > 0 && "you are using an empty matrix"); + constexpr Index packetSize = redux_traits::PacketSize; + const Index innerSize = xpr.innerSize(); + const Index outerSize = xpr.outerSize(); + const Index packetedInnerSize = ((innerSize) / packetSize) * packetSize; + Scalar res; + if (packetedInnerSize) { + PacketType packet_res = eval.template packet(0, 0); + for (Index j = 0; j < outerSize; ++j) + for (Index i = (j == 0 ? packetSize : 0); i < packetedInnerSize; i += Index(packetSize)) + packet_res = func.packetOp(packet_res, eval.template packetByOuterInner(j, i)); + + res = func.predux(packet_res); + for (Index j = 0; j < outerSize; ++j) + for (Index i = packetedInnerSize; i < innerSize; ++i) res = func(res, eval.coeffByOuterInner(j, i)); + } else // too small to vectorize anything. + // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize. + { + res = redux_impl::run(eval, func, xpr); + } + + return res; + } +}; + +template +struct redux_impl { + typedef typename Evaluator::Scalar Scalar; + + typedef typename redux_traits::PacketType PacketType; + static constexpr Index PacketSize = redux_traits::PacketSize; + static constexpr Index Size = Evaluator::SizeAtCompileTime; + static constexpr Index VectorizedSize = (int(Size) / int(PacketSize)) * int(PacketSize); + + template + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) { + EIGEN_ONLY_USED_FOR_DEBUG(xpr) + eigen_assert(xpr.rows() > 0 && xpr.cols() > 0 && "you are using an empty matrix"); + if (VectorizedSize > 0) { + Scalar res = func.predux( + redux_vec_linear_unroller::template run(eval, func)); + if (VectorizedSize != Size) + res = func( + res, redux_novec_linear_unroller::run(eval, func)); + return res; + } else { + return redux_novec_linear_unroller::run(eval, func); + } + } +}; + +// evaluator adaptor +template +class redux_evaluator : public internal::evaluator { + typedef internal::evaluator Base; + + public: + typedef XprType_ XprType; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit redux_evaluator(const XprType& xpr) : Base(xpr) {} + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename XprType::PacketScalar PacketScalar; + + enum { + MaxRowsAtCompileTime = XprType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = XprType::MaxColsAtCompileTime, + // TODO we should not remove DirectAccessBit and rather find an elegant way to query the alignment offset at runtime + // from the evaluator + Flags = Base::Flags & ~DirectAccessBit, + IsRowMajor = XprType::IsRowMajor, + SizeAtCompileTime = XprType::SizeAtCompileTime, + InnerSizeAtCompileTime = XprType::InnerSizeAtCompileTime + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const { + return Base::coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packetByOuterInner(Index outer, Index inner) const { + return Base::template packet(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); + } +}; + +} // end namespace internal + +/*************************************************************************** + * Part 4 : public API + ***************************************************************************/ + +/** \returns the result of a full redux operation on the whole matrix or vector using \a func + * + * The template parameter \a BinaryOp is the type of the functor \a func which must be + * an associative operator. Both current C++98 and C++11 functor styles are handled. + * + * \warning the matrix must be not empty, otherwise an assertion is triggered. + * + * \sa DenseBase::sum(), DenseBase::minCoeff(), DenseBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise() + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::redux( + const Func& func) const { + eigen_assert(this->rows() > 0 && this->cols() > 0 && "you are using an empty matrix"); + + typedef typename internal::redux_evaluator ThisEvaluator; + ThisEvaluator thisEval(derived()); + + // The initial expression is passed to the reducer as an additional argument instead of + // passing it as a member of redux_evaluator to help + return internal::redux_impl::run(thisEval, func, derived()); +} + +/** \returns the minimum of all coefficients of \c *this. + * In case \c *this contains NaN, NaNPropagation determines the behavior: + * NaNPropagation == PropagateFast : undefined + * NaNPropagation == PropagateNaN : result is NaN + * NaNPropagation == PropagateNumbers : result is minimum of elements that are not NaN + * \warning the matrix must be not empty, otherwise an assertion is triggered. + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::minCoeff() const { + return derived().redux(Eigen::internal::scalar_min_op()); +} + +/** \returns the maximum of all coefficients of \c *this. + * In case \c *this contains NaN, NaNPropagation determines the behavior: + * NaNPropagation == PropagateFast : undefined + * NaNPropagation == PropagateNaN : result is NaN + * NaNPropagation == PropagateNumbers : result is maximum of elements that are not NaN + * \warning the matrix must be not empty, otherwise an assertion is triggered. + */ +template +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::maxCoeff() const { + return derived().redux(Eigen::internal::scalar_max_op()); +} + +/** \returns the sum of all coefficients of \c *this + * + * If \c *this is empty, then the value 0 is returned. + * + * \sa trace(), prod(), mean() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::sum() const { + if (SizeAtCompileTime == 0 || (SizeAtCompileTime == Dynamic && size() == 0)) return Scalar(0); + return derived().redux(Eigen::internal::scalar_sum_op()); +} + +/** \returns the mean of all coefficients of *this + * + * \sa trace(), prod(), sum() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::mean() const { +#ifdef __INTEL_COMPILER +#pragma warning push +#pragma warning(disable : 2259) +#endif + return Scalar(derived().redux(Eigen::internal::scalar_sum_op())) / Scalar(this->size()); +#ifdef __INTEL_COMPILER +#pragma warning pop +#endif +} + +/** \returns the product of all coefficients of *this + * + * Example: \include MatrixBase_prod.cpp + * Output: \verbinclude MatrixBase_prod.out + * + * \sa sum(), mean(), trace() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::prod() const { + if (SizeAtCompileTime == 0 || (SizeAtCompileTime == Dynamic && size() == 0)) return Scalar(1); + return derived().redux(Eigen::internal::scalar_product_op()); +} + +/** \returns the trace of \c *this, i.e. the sum of the coefficients on the main diagonal. + * + * \c *this can be any matrix, not necessarily square. + * + * \sa diagonal(), sum() + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar MatrixBase::trace() const { + return derived().diagonal().sum(); +} + +} // end namespace Eigen + +#endif // EIGEN_REDUX_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Ref.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Ref.h new file mode 100644 index 0000000000000000000000000000000000000000..129bc85f46fd59cfe2398c48b933dc46914e80e0 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Ref.h @@ -0,0 +1,383 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_REF_H +#define EIGEN_REF_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +template +struct traits > + : public traits > { + typedef PlainObjectType_ PlainObjectType; + typedef StrideType_ StrideType; + enum { + Options = Options_, + Flags = traits >::Flags | NestByRefBit, + Alignment = traits >::Alignment, + InnerStrideAtCompileTime = traits >::InnerStrideAtCompileTime, + OuterStrideAtCompileTime = traits >::OuterStrideAtCompileTime + }; + + template + struct match { + enum { + IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime, + HasDirectAccess = internal::has_direct_access::ret, + StorageOrderMatch = + IsVectorAtCompileTime || ((PlainObjectType::Flags & RowMajorBit) == (Derived::Flags & RowMajorBit)), + InnerStrideMatch = int(InnerStrideAtCompileTime) == int(Dynamic) || + int(InnerStrideAtCompileTime) == int(Derived::InnerStrideAtCompileTime) || + (int(InnerStrideAtCompileTime) == 0 && int(Derived::InnerStrideAtCompileTime) == 1), + OuterStrideMatch = IsVectorAtCompileTime || int(OuterStrideAtCompileTime) == int(Dynamic) || + int(OuterStrideAtCompileTime) == int(Derived::OuterStrideAtCompileTime), + // NOTE, this indirection of evaluator::Alignment is needed + // to workaround a very strange bug in MSVC related to the instantiation + // of has_*ary_operator in evaluator. + // This line is surprisingly very sensitive. For instance, simply adding parenthesis + // as "DerivedAlignment = (int(evaluator::Alignment))," will make MSVC fail... + DerivedAlignment = int(evaluator::Alignment), + AlignmentMatch = (int(traits::Alignment) == int(Unaligned)) || + (DerivedAlignment >= int(Alignment)), // FIXME the first condition is not very clear, it should + // be replaced by the required alignment + ScalarTypeMatch = internal::is_same::value, + MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && + AlignmentMatch && ScalarTypeMatch + }; + typedef std::conditional_t type; + }; +}; + +template +struct traits > : public traits {}; + +} // namespace internal + +template +class RefBase : public MapBase { + typedef typename internal::traits::PlainObjectType PlainObjectType; + typedef typename internal::traits::StrideType StrideType; + + public: + typedef MapBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(RefBase) + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const { + return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1; + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const { + return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer() + : IsVectorAtCompileTime ? this->size() + : int(Flags) & RowMajorBit ? this->cols() + : this->rows(); + } + + EIGEN_DEVICE_FUNC RefBase() + : Base(0, RowsAtCompileTime == Dynamic ? 0 : RowsAtCompileTime, + ColsAtCompileTime == Dynamic ? 0 : ColsAtCompileTime), + // Stride<> does not allow default ctor for Dynamic strides, so let' initialize it with dummy values: + m_stride(StrideType::OuterStrideAtCompileTime == Dynamic ? 0 : StrideType::OuterStrideAtCompileTime, + StrideType::InnerStrideAtCompileTime == Dynamic ? 0 : StrideType::InnerStrideAtCompileTime) {} + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(RefBase) + + protected: + typedef Stride StrideBase; + + // Resolves inner stride if default 0. + static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index resolveInnerStride(Index inner) { return inner == 0 ? 1 : inner; } + + // Resolves outer stride if default 0. + static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index resolveOuterStride(Index inner, Index outer, Index rows, Index cols, + bool isVectorAtCompileTime, bool isRowMajor) { + return outer == 0 ? isVectorAtCompileTime ? inner * rows * cols : isRowMajor ? inner * cols : inner * rows : outer; + } + + // Returns true if construction is valid, false if there is a stride mismatch, + // and fails if there is a size mismatch. + template + EIGEN_DEVICE_FUNC bool construct(Expression& expr) { + // Check matrix sizes. If this is a compile-time vector, we do allow + // implicitly transposing. + EIGEN_STATIC_ASSERT(EIGEN_PREDICATE_SAME_MATRIX_SIZE(PlainObjectType, Expression) + // If it is a vector, the transpose sizes might match. + || (PlainObjectType::IsVectorAtCompileTime && + ((int(PlainObjectType::RowsAtCompileTime) == Eigen::Dynamic || + int(Expression::ColsAtCompileTime) == Eigen::Dynamic || + int(PlainObjectType::RowsAtCompileTime) == int(Expression::ColsAtCompileTime)) && + (int(PlainObjectType::ColsAtCompileTime) == Eigen::Dynamic || + int(Expression::RowsAtCompileTime) == Eigen::Dynamic || + int(PlainObjectType::ColsAtCompileTime) == int(Expression::RowsAtCompileTime)))), + YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES) + + // Determine runtime rows and columns. + Index rows = expr.rows(); + Index cols = expr.cols(); + if (PlainObjectType::RowsAtCompileTime == 1) { + eigen_assert(expr.rows() == 1 || expr.cols() == 1); + rows = 1; + cols = expr.size(); + } else if (PlainObjectType::ColsAtCompileTime == 1) { + eigen_assert(expr.rows() == 1 || expr.cols() == 1); + rows = expr.size(); + cols = 1; + } + // Verify that the sizes are valid. + eigen_assert((PlainObjectType::RowsAtCompileTime == Dynamic) || (PlainObjectType::RowsAtCompileTime == rows)); + eigen_assert((PlainObjectType::ColsAtCompileTime == Dynamic) || (PlainObjectType::ColsAtCompileTime == cols)); + + // If this is a vector, we might be transposing, which means that stride should swap. + const bool transpose = PlainObjectType::IsVectorAtCompileTime && (rows != expr.rows()); + // If the storage format differs, we also need to swap the stride. + const bool row_major = ((PlainObjectType::Flags)&RowMajorBit) != 0; + const bool expr_row_major = (Expression::Flags & RowMajorBit) != 0; + const bool storage_differs = (row_major != expr_row_major); + + const bool swap_stride = (transpose != storage_differs); + + // Determine expr's actual strides, resolving any defaults if zero. + const Index expr_inner_actual = resolveInnerStride(expr.innerStride()); + const Index expr_outer_actual = resolveOuterStride(expr_inner_actual, expr.outerStride(), expr.rows(), expr.cols(), + Expression::IsVectorAtCompileTime != 0, expr_row_major); + + // If this is a column-major row vector or row-major column vector, the inner-stride + // is arbitrary, so set it to either the compile-time inner stride or 1. + const bool row_vector = (rows == 1); + const bool col_vector = (cols == 1); + const Index inner_stride = + ((!row_major && row_vector) || (row_major && col_vector)) + ? (StrideType::InnerStrideAtCompileTime > 0 ? Index(StrideType::InnerStrideAtCompileTime) : 1) + : swap_stride ? expr_outer_actual + : expr_inner_actual; + + // If this is a column-major column vector or row-major row vector, the outer-stride + // is arbitrary, so set it to either the compile-time outer stride or vector size. + const Index outer_stride = + ((!row_major && col_vector) || (row_major && row_vector)) + ? (StrideType::OuterStrideAtCompileTime > 0 ? Index(StrideType::OuterStrideAtCompileTime) + : rows * cols * inner_stride) + : swap_stride ? expr_inner_actual + : expr_outer_actual; + + // Check if given inner/outer strides are compatible with compile-time strides. + const bool inner_valid = (StrideType::InnerStrideAtCompileTime == Dynamic) || + (resolveInnerStride(Index(StrideType::InnerStrideAtCompileTime)) == inner_stride); + if (!inner_valid) { + return false; + } + + const bool outer_valid = + (StrideType::OuterStrideAtCompileTime == Dynamic) || + (resolveOuterStride(inner_stride, Index(StrideType::OuterStrideAtCompileTime), rows, cols, + PlainObjectType::IsVectorAtCompileTime != 0, row_major) == outer_stride); + if (!outer_valid) { + return false; + } + + internal::construct_at(this, expr.data(), rows, cols); + internal::construct_at(&m_stride, (StrideType::OuterStrideAtCompileTime == 0) ? 0 : outer_stride, + (StrideType::InnerStrideAtCompileTime == 0) ? 0 : inner_stride); + return true; + } + + StrideBase m_stride; +}; + +/** \class Ref + * \ingroup Core_Module + * + * \brief A matrix or vector expression mapping an existing expression + * + * \tparam PlainObjectType the equivalent matrix type of the mapped data + * \tparam Options specifies the pointer alignment in bytes. It can be: \c #Aligned128, , \c #Aligned64, \c #Aligned32, + * \c #Aligned16, \c #Aligned8 or \c #Unaligned. The default is \c #Unaligned. \tparam StrideType optionally specifies + * strides. By default, Ref implies a contiguous storage along the inner dimension (inner stride==1), but accepts a + * variable outer stride (leading dimension). This can be overridden by specifying strides. The type passed here must be + * a specialization of the Stride template, see examples below. + * + * This class provides a way to write non-template functions taking Eigen objects as parameters while limiting the + * number of copies. A Ref<> object can represent either a const expression or a l-value: \code + * // in-out argument: + * void foo1(Ref x); + * + * // read-only const argument: + * void foo2(const Ref& x); + * \endcode + * + * In the in-out case, the input argument must satisfy the constraints of the actual Ref<> type, otherwise a compilation + * issue will be triggered. By default, a Ref can reference any dense vector expression of float having a + * contiguous memory layout. Likewise, a Ref can reference any column-major dense matrix expression of float + * whose column's elements are contiguously stored with the possibility to have a constant space in-between each column, + * i.e. the inner stride must be equal to 1, but the outer stride (or leading dimension) can be greater than the number + * of rows. + * + * In the const case, if the input expression does not match the above requirement, then it is evaluated into a + * temporary before being passed to the function. Here are some examples: \code MatrixXf A; VectorXf a; foo1(a.head()); + * // OK foo1(A.col()); // OK foo1(A.row()); // Compilation error because here innerstride!=1 + * foo2(A.row()); // Compilation error because A.row() is a 1xN object while foo2 is expecting a Nx1 object + * foo2(A.row().transpose()); // The row is copied into a contiguous temporary + * foo2(2*a); // The expression is evaluated into a temporary + * foo2(A.col().segment(2,4)); // No temporary + * \endcode + * + * The range of inputs that can be referenced without temporary can be enlarged using the last two template parameters. + * Here is an example accepting an innerstride!=1: + * \code + * // in-out argument: + * void foo3(Ref > x); + * foo3(A.row()); // OK + * \endcode + * The downside here is that the function foo3 might be significantly slower than foo1 because it won't be able to + * exploit vectorization, and will involve more expensive address computations even if the input is contiguously stored + * in memory. To overcome this issue, one might propose to overload internally calling a template function, e.g.: \code + * // in the .h: + * void foo(const Ref& A); + * void foo(const Ref >& A); + * + * // in the .cpp: + * template void foo_impl(const TypeOfA& A) { + * ... // crazy code goes here + * } + * void foo(const Ref& A) { foo_impl(A); } + * void foo(const Ref >& A) { foo_impl(A); } + * \endcode + * + * See also the following stackoverflow questions for further references: + * - Correct usage of the + * Eigen::Ref<> class + * + * \sa PlainObjectBase::Map(), \ref TopicStorageOrders + */ +template +class Ref : public RefBase > { + private: + typedef internal::traits Traits; + template + EIGEN_DEVICE_FUNC inline Ref( + const PlainObjectBase& expr, + std::enable_if_t::MatchAtCompileTime), Derived>* = 0); + + public: + typedef RefBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Ref) + +#ifndef EIGEN_PARSED_BY_DOXYGEN + template + EIGEN_DEVICE_FUNC inline Ref( + PlainObjectBase& expr, + std::enable_if_t::MatchAtCompileTime), Derived>* = 0) { + EIGEN_STATIC_ASSERT(bool(Traits::template match::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH); + // Construction must pass since we will not create temporary storage in the non-const case. + const bool success = Base::construct(expr.derived()); + EIGEN_UNUSED_VARIABLE(success) + eigen_assert(success); + } + template + EIGEN_DEVICE_FUNC inline Ref( + const DenseBase& expr, + std::enable_if_t::MatchAtCompileTime), Derived>* = 0) +#else + /** Implicit constructor from any dense expression */ + template + inline Ref(DenseBase& expr) +#endif + { + EIGEN_STATIC_ASSERT(bool(internal::is_lvalue::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY); + EIGEN_STATIC_ASSERT(bool(Traits::template match::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH); + EIGEN_STATIC_ASSERT(!Derived::IsPlainObjectBase, THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY); + // Construction must pass since we will not create temporary storage in the non-const case. + const bool success = Base::construct(expr.const_cast_derived()); + EIGEN_UNUSED_VARIABLE(success) + eigen_assert(success); + } + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Ref) +}; + +// this is the const ref version +template +class Ref + : public RefBase > { + typedef internal::traits Traits; + + static constexpr bool may_map_m_object_successfully = + (static_cast(StrideType::InnerStrideAtCompileTime) == 0 || + static_cast(StrideType::InnerStrideAtCompileTime) == 1 || + static_cast(StrideType::InnerStrideAtCompileTime) == Dynamic) && + (TPlainObjectType::IsVectorAtCompileTime || static_cast(StrideType::OuterStrideAtCompileTime) == 0 || + static_cast(StrideType::OuterStrideAtCompileTime) == Dynamic || + static_cast(StrideType::OuterStrideAtCompileTime) == + static_cast(TPlainObjectType::InnerSizeAtCompileTime) || + static_cast(TPlainObjectType::InnerSizeAtCompileTime) == Dynamic); + + public: + typedef RefBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Ref) + + template + EIGEN_DEVICE_FUNC inline Ref(const DenseBase& expr, + std::enable_if_t::ScalarTypeMatch), Derived>* = 0) { + // std::cout << match_helper::HasDirectAccess << "," << match_helper::OuterStrideMatch << "," + // << match_helper::InnerStrideMatch << "\n"; std::cout << int(StrideType::OuterStrideAtCompileTime) + // << " - " << int(Derived::OuterStrideAtCompileTime) << "\n"; std::cout << + // int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n"; + EIGEN_STATIC_ASSERT(Traits::template match::type::value || may_map_m_object_successfully, + STORAGE_LAYOUT_DOES_NOT_MATCH); + construct(expr.derived(), typename Traits::template match::type()); + } + + EIGEN_DEVICE_FUNC inline Ref(const Ref& other) : Base(other) { + // copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy + } + + EIGEN_DEVICE_FUNC inline Ref(Ref&& other) { + if (other.data() == other.m_object.data()) { + m_object = std::move(other.m_object); + Base::construct(m_object); + } else + Base::construct(other); + } + + template + EIGEN_DEVICE_FUNC inline Ref(const RefBase& other) { + EIGEN_STATIC_ASSERT(Traits::template match::type::value || may_map_m_object_successfully, + STORAGE_LAYOUT_DOES_NOT_MATCH); + construct(other.derived(), typename Traits::template match::type()); + } + + protected: + template + EIGEN_DEVICE_FUNC void construct(const Expression& expr, internal::true_type) { + // Check if we can use the underlying expr's storage directly, otherwise call the copy version. + if (!Base::construct(expr)) { + construct(expr, internal::false_type()); + } + } + + template + EIGEN_DEVICE_FUNC void construct(const Expression& expr, internal::false_type) { + internal::call_assignment_no_alias(m_object, expr, internal::assign_op()); + const bool success = Base::construct(m_object); + EIGEN_ONLY_USED_FOR_DEBUG(success) + eigen_assert(success); + } + + protected: + TPlainObjectType m_object; +}; + +} // end namespace Eigen + +#endif // EIGEN_REF_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Replicate.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Replicate.h new file mode 100644 index 0000000000000000000000000000000000000000..11d7ad19e989fd97b3a1e3dbdd83fda00dbb7d0f --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Replicate.h @@ -0,0 +1,130 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_REPLICATE_H +#define EIGEN_REPLICATE_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { +template +struct traits > : traits { + typedef typename MatrixType::Scalar Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; + typedef typename ref_selector::type MatrixTypeNested; + typedef std::remove_reference_t MatrixTypeNested_; + enum { + RowsAtCompileTime = RowFactor == Dynamic || int(MatrixType::RowsAtCompileTime) == Dynamic + ? Dynamic + : RowFactor * MatrixType::RowsAtCompileTime, + ColsAtCompileTime = ColFactor == Dynamic || int(MatrixType::ColsAtCompileTime) == Dynamic + ? Dynamic + : ColFactor * MatrixType::ColsAtCompileTime, + // FIXME we don't propagate the max sizes !!! + MaxRowsAtCompileTime = RowsAtCompileTime, + MaxColsAtCompileTime = ColsAtCompileTime, + IsRowMajor = MaxRowsAtCompileTime == 1 && MaxColsAtCompileTime != 1 ? 1 + : MaxColsAtCompileTime == 1 && MaxRowsAtCompileTime != 1 ? 0 + : (MatrixType::Flags & RowMajorBit) ? 1 + : 0, + + // FIXME enable DirectAccess with negative strides? + Flags = IsRowMajor ? RowMajorBit : 0 + }; +}; +} // namespace internal + +/** + * \class Replicate + * \ingroup Core_Module + * + * \brief Expression of the multiple replication of a matrix or vector + * + * \tparam MatrixType the type of the object we are replicating + * \tparam RowFactor number of repetitions at compile time along the vertical direction, can be Dynamic. + * \tparam ColFactor number of repetitions at compile time along the horizontal direction, can be Dynamic. + * + * This class represents an expression of the multiple replication of a matrix or vector. + * It is the return type of DenseBase::replicate() and most of the time + * this is the only way it is used. + * + * \sa DenseBase::replicate() + */ +template +class Replicate : public internal::dense_xpr_base >::type { + typedef typename internal::traits::MatrixTypeNested MatrixTypeNested; + typedef typename internal::traits::MatrixTypeNested_ MatrixTypeNested_; + + public: + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Replicate) + typedef internal::remove_all_t NestedExpression; + + template + EIGEN_DEVICE_FUNC inline explicit Replicate(const OriginalMatrixType& matrix) + : m_matrix(matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor) { + EIGEN_STATIC_ASSERT((internal::is_same, OriginalMatrixType>::value), + THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) + eigen_assert(RowFactor != Dynamic && ColFactor != Dynamic); + } + + template + EIGEN_DEVICE_FUNC inline Replicate(const OriginalMatrixType& matrix, Index rowFactor, Index colFactor) + : m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor) { + EIGEN_STATIC_ASSERT((internal::is_same, OriginalMatrixType>::value), + THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) + } + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); } + + EIGEN_DEVICE_FUNC const MatrixTypeNested_& nestedExpression() const { return m_matrix; } + + protected: + MatrixTypeNested m_matrix; + const internal::variable_if_dynamic m_rowFactor; + const internal::variable_if_dynamic m_colFactor; +}; + +/** + * \return an expression of the replication of \c *this + * + * Example: \include MatrixBase_replicate.cpp + * Output: \verbinclude MatrixBase_replicate.out + * + * \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate + */ +template +template +EIGEN_DEVICE_FUNC const Replicate DenseBase::replicate() const { + return Replicate(derived()); +} + +/** + * \return an expression of the replication of each column (or row) of \c *this + * + * Example: \include DirectionWise_replicate_int.cpp + * Output: \verbinclude DirectionWise_replicate_int.out + * + * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate + */ +template +EIGEN_DEVICE_FUNC const typename VectorwiseOp::ReplicateReturnType +VectorwiseOp::replicate(Index factor) const { + return typename VectorwiseOp::ReplicateReturnType( + _expression(), Direction == Vertical ? factor : 1, Direction == Horizontal ? factor : 1); +} + +} // end namespace Eigen + +#endif // EIGEN_REPLICATE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Reshaped.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Reshaped.h new file mode 100644 index 0000000000000000000000000000000000000000..4b34e16f6a496cda349245b4d8c6b9ed34a325f9 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Reshaped.h @@ -0,0 +1,398 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2017 Gael Guennebaud +// Copyright (C) 2014 yoco +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_RESHAPED_H +#define EIGEN_RESHAPED_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +/** \class Reshaped + * \ingroup Core_Module + * + * \brief Expression of a fixed-size or dynamic-size reshape + * + * \tparam XprType the type of the expression in which we are taking a reshape + * \tparam Rows the number of rows of the reshape we are taking at compile time (optional) + * \tparam Cols the number of columns of the reshape we are taking at compile time (optional) + * \tparam Order can be ColMajor or RowMajor, default is ColMajor. + * + * This class represents an expression of either a fixed-size or dynamic-size reshape. + * It is the return type of DenseBase::reshaped(NRowsType,NColsType) and + * most of the time this is the only way it is used. + * + * If you want to directly manipulate reshaped expressions, + * for instance if you want to write a function returning such an expression, + * it is advised to use the \em auto keyword for such use cases. + * + * Here is an example illustrating the dynamic case: + * \include class_Reshaped.cpp + * Output: \verbinclude class_Reshaped.out + * + * Here is an example illustrating the fixed-size case: + * \include class_FixedReshaped.cpp + * Output: \verbinclude class_FixedReshaped.out + * + * \sa DenseBase::reshaped(NRowsType,NColsType) + */ + +namespace internal { + +template +struct traits > : traits { + typedef typename traits::Scalar Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; + enum { + MatrixRows = traits::RowsAtCompileTime, + MatrixCols = traits::ColsAtCompileTime, + RowsAtCompileTime = Rows, + ColsAtCompileTime = Cols, + MaxRowsAtCompileTime = Rows, + MaxColsAtCompileTime = Cols, + XpxStorageOrder = ((int(traits::Flags) & RowMajorBit) == RowMajorBit) ? RowMajor : ColMajor, + ReshapedStorageOrder = (RowsAtCompileTime == 1 && ColsAtCompileTime != 1) ? RowMajor + : (ColsAtCompileTime == 1 && RowsAtCompileTime != 1) ? ColMajor + : XpxStorageOrder, + HasSameStorageOrderAsXprType = (ReshapedStorageOrder == XpxStorageOrder), + InnerSize = (ReshapedStorageOrder == int(RowMajor)) ? int(ColsAtCompileTime) : int(RowsAtCompileTime), + InnerStrideAtCompileTime = HasSameStorageOrderAsXprType ? int(inner_stride_at_compile_time::ret) : Dynamic, + OuterStrideAtCompileTime = Dynamic, + + HasDirectAccess = internal::has_direct_access::ret && (Order == int(XpxStorageOrder)) && + ((evaluator::Flags & LinearAccessBit) == LinearAccessBit), + + MaskPacketAccessBit = + (InnerSize == Dynamic || (InnerSize % packet_traits::size) == 0) && (InnerStrideAtCompileTime == 1) + ? PacketAccessBit + : 0, + // MaskAlignedBit = ((OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) + // == 0)) ? AlignedBit : 0, + FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0, + FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, + FlagsRowMajorBit = (ReshapedStorageOrder == int(RowMajor)) ? RowMajorBit : 0, + FlagsDirectAccessBit = HasDirectAccess ? DirectAccessBit : 0, + Flags0 = traits::Flags & ((HereditaryBits & ~RowMajorBit) | MaskPacketAccessBit), + + Flags = (Flags0 | FlagsLinearAccessBit | FlagsLvalueBit | FlagsRowMajorBit | FlagsDirectAccessBit) + }; +}; + +template +class ReshapedImpl_dense; + +} // end namespace internal + +template +class ReshapedImpl; + +template +class Reshaped : public ReshapedImpl::StorageKind> { + typedef ReshapedImpl::StorageKind> Impl; + + public: + // typedef typename Impl::Base Base; + typedef Impl Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(Reshaped) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reshaped) + + /** Fixed-size constructor + */ + EIGEN_DEVICE_FUNC inline Reshaped(XprType& xpr) : Impl(xpr) { + EIGEN_STATIC_ASSERT(RowsAtCompileTime != Dynamic && ColsAtCompileTime != Dynamic, + THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE) + eigen_assert(Rows * Cols == xpr.rows() * xpr.cols()); + } + + /** Dynamic-size constructor + */ + EIGEN_DEVICE_FUNC inline Reshaped(XprType& xpr, Index reshapeRows, Index reshapeCols) + : Impl(xpr, reshapeRows, reshapeCols) { + eigen_assert((RowsAtCompileTime == Dynamic || RowsAtCompileTime == reshapeRows) && + (ColsAtCompileTime == Dynamic || ColsAtCompileTime == reshapeCols)); + eigen_assert(reshapeRows * reshapeCols == xpr.rows() * xpr.cols()); + } +}; + +// The generic default implementation for dense reshape simply forward to the internal::ReshapedImpl_dense +// that must be specialized for direct and non-direct access... +template +class ReshapedImpl + : public internal::ReshapedImpl_dense >::HasDirectAccess> { + typedef internal::ReshapedImpl_dense >::HasDirectAccess> + Impl; + + public: + typedef Impl Base; + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ReshapedImpl) + EIGEN_DEVICE_FUNC inline ReshapedImpl(XprType& xpr) : Impl(xpr) {} + EIGEN_DEVICE_FUNC inline ReshapedImpl(XprType& xpr, Index reshapeRows, Index reshapeCols) + : Impl(xpr, reshapeRows, reshapeCols) {} +}; + +namespace internal { + +/** \internal Internal implementation of dense Reshaped in the general case. */ +template +class ReshapedImpl_dense + : public internal::dense_xpr_base >::type { + typedef Reshaped ReshapedType; + + public: + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(ReshapedType) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ReshapedImpl_dense) + + typedef typename internal::ref_selector::non_const_type MatrixTypeNested; + typedef internal::remove_all_t NestedExpression; + + class InnerIterator; + + /** Fixed-size constructor + */ + EIGEN_DEVICE_FUNC inline ReshapedImpl_dense(XprType& xpr) : m_xpr(xpr), m_rows(Rows), m_cols(Cols) {} + + /** Dynamic-size constructor + */ + EIGEN_DEVICE_FUNC inline ReshapedImpl_dense(XprType& xpr, Index nRows, Index nCols) + : m_xpr(xpr), m_rows(nRows), m_cols(nCols) {} + + EIGEN_DEVICE_FUNC Index rows() const { return m_rows; } + EIGEN_DEVICE_FUNC Index cols() const { return m_cols; } + +#ifdef EIGEN_PARSED_BY_DOXYGEN + /** \sa MapBase::data() */ + EIGEN_DEVICE_FUNC constexpr const Scalar* data() const; + EIGEN_DEVICE_FUNC inline Index innerStride() const; + EIGEN_DEVICE_FUNC inline Index outerStride() const; +#endif + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC const internal::remove_all_t& nestedExpression() const { return m_xpr; } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC std::remove_reference_t& nestedExpression() { return m_xpr; } + + protected: + MatrixTypeNested m_xpr; + const internal::variable_if_dynamic m_rows; + const internal::variable_if_dynamic m_cols; +}; + +/** \internal Internal implementation of dense Reshaped in the direct access case. */ +template +class ReshapedImpl_dense : public MapBase > { + typedef Reshaped ReshapedType; + typedef typename internal::ref_selector::non_const_type XprTypeNested; + + public: + typedef MapBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(ReshapedType) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ReshapedImpl_dense) + + /** Fixed-size constructor + */ + EIGEN_DEVICE_FUNC inline ReshapedImpl_dense(XprType& xpr) : Base(xpr.data()), m_xpr(xpr) {} + + /** Dynamic-size constructor + */ + EIGEN_DEVICE_FUNC inline ReshapedImpl_dense(XprType& xpr, Index nRows, Index nCols) + : Base(xpr.data(), nRows, nCols), m_xpr(xpr) {} + + EIGEN_DEVICE_FUNC const internal::remove_all_t& nestedExpression() const { return m_xpr; } + + EIGEN_DEVICE_FUNC XprType& nestedExpression() { return m_xpr; } + + /** \sa MapBase::innerStride() */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const { return m_xpr.innerStride(); } + + /** \sa MapBase::outerStride() */ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const { + return (((Flags & RowMajorBit) == RowMajorBit) ? this->cols() : this->rows()) * m_xpr.innerStride(); + } + + protected: + XprTypeNested m_xpr; +}; + +// Evaluators +template +struct reshaped_evaluator; + +template +struct evaluator > + : reshaped_evaluator >::HasDirectAccess> { + typedef Reshaped XprType; + typedef typename XprType::Scalar Scalar; + // TODO: should check for smaller packet types + typedef typename packet_traits::type PacketScalar; + + enum { + CoeffReadCost = evaluator::CoeffReadCost, + HasDirectAccess = traits::HasDirectAccess, + + // RowsAtCompileTime = traits::RowsAtCompileTime, + // ColsAtCompileTime = traits::ColsAtCompileTime, + // MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, + // MaxColsAtCompileTime = traits::MaxColsAtCompileTime, + // + // InnerStrideAtCompileTime = traits::HasSameStorageOrderAsXprType + // ? int(inner_stride_at_compile_time::ret) + // : Dynamic, + // OuterStrideAtCompileTime = Dynamic, + + FlagsLinearAccessBit = + (traits::RowsAtCompileTime == 1 || traits::ColsAtCompileTime == 1 || HasDirectAccess) + ? LinearAccessBit + : 0, + FlagsRowMajorBit = (traits::ReshapedStorageOrder == int(RowMajor)) ? RowMajorBit : 0, + FlagsDirectAccessBit = HasDirectAccess ? DirectAccessBit : 0, + Flags0 = evaluator::Flags & (HereditaryBits & ~RowMajorBit), + Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit | FlagsDirectAccessBit, + + PacketAlignment = unpacket_traits::alignment, + Alignment = evaluator::Alignment + }; + typedef reshaped_evaluator reshaped_evaluator_type; + EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : reshaped_evaluator_type(xpr) { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } +}; + +template +struct reshaped_evaluator + : evaluator_base > { + typedef Reshaped XprType; + + enum { + CoeffReadCost = evaluator::CoeffReadCost /* TODO + cost of index computations */, + + Flags = (evaluator::Flags & (HereditaryBits /*| LinearAccessBit | DirectAccessBit*/)), + + Alignment = 0 + }; + + EIGEN_DEVICE_FUNC explicit reshaped_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_xpr(xpr) { + EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); + } + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + typedef std::pair RowCol; + + EIGEN_DEVICE_FUNC inline RowCol index_remap(Index rowId, Index colId) const { + if (Order == ColMajor) { + const Index nth_elem_idx = colId * m_xpr.rows() + rowId; + return RowCol(nth_elem_idx % m_xpr.nestedExpression().rows(), nth_elem_idx / m_xpr.nestedExpression().rows()); + } else { + const Index nth_elem_idx = colId + rowId * m_xpr.cols(); + return RowCol(nth_elem_idx / m_xpr.nestedExpression().cols(), nth_elem_idx % m_xpr.nestedExpression().cols()); + } + } + + EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index rowId, Index colId) { + EIGEN_STATIC_ASSERT_LVALUE(XprType) + const RowCol row_col = index_remap(rowId, colId); + return m_argImpl.coeffRef(row_col.first, row_col.second); + } + + EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { + const RowCol row_col = index_remap(rowId, colId); + return m_argImpl.coeffRef(row_col.first, row_col.second); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const { + const RowCol row_col = index_remap(rowId, colId); + return m_argImpl.coeff(row_col.first, row_col.second); + } + + EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index) { + EIGEN_STATIC_ASSERT_LVALUE(XprType) + const RowCol row_col = index_remap(Rows == 1 ? 0 : index, Rows == 1 ? index : 0); + return m_argImpl.coeffRef(row_col.first, row_col.second); + } + + EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { + const RowCol row_col = index_remap(Rows == 1 ? 0 : index, Rows == 1 ? index : 0); + return m_argImpl.coeffRef(row_col.first, row_col.second); + } + + EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const { + const RowCol row_col = index_remap(Rows == 1 ? 0 : index, Rows == 1 ? index : 0); + return m_argImpl.coeff(row_col.first, row_col.second); + } +#if 0 + EIGEN_DEVICE_FUNC + template + inline PacketScalar packet(Index rowId, Index colId) const + { + const RowCol row_col = index_remap(rowId, colId); + return m_argImpl.template packet(row_col.first, row_col.second); + + } + + template + EIGEN_DEVICE_FUNC + inline void writePacket(Index rowId, Index colId, const PacketScalar& val) + { + const RowCol row_col = index_remap(rowId, colId); + m_argImpl.const_cast_derived().template writePacket + (row_col.first, row_col.second, val); + } + + template + EIGEN_DEVICE_FUNC + inline PacketScalar packet(Index index) const + { + const RowCol row_col = index_remap(RowsAtCompileTime == 1 ? 0 : index, + RowsAtCompileTime == 1 ? index : 0); + return m_argImpl.template packet(row_col.first, row_col.second); + } + + template + EIGEN_DEVICE_FUNC + inline void writePacket(Index index, const PacketScalar& val) + { + const RowCol row_col = index_remap(RowsAtCompileTime == 1 ? 0 : index, + RowsAtCompileTime == 1 ? index : 0); + return m_argImpl.template packet(row_col.first, row_col.second, val); + } +#endif + protected: + evaluator m_argImpl; + const XprType& m_xpr; +}; + +template +struct reshaped_evaluator + : mapbase_evaluator, + typename Reshaped::PlainObject> { + typedef Reshaped XprType; + typedef typename XprType::Scalar Scalar; + + EIGEN_DEVICE_FUNC explicit reshaped_evaluator(const XprType& xpr) + : mapbase_evaluator(xpr) { + // TODO: for the 3.4 release, this should be turned to an internal assertion, but let's keep it as is for the beta + // lifetime + eigen_assert(((std::uintptr_t(xpr.data()) % plain_enum_max(1, evaluator::Alignment)) == 0) && + "data is not aligned"); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_RESHAPED_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ReturnByValue.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ReturnByValue.h new file mode 100644 index 0000000000000000000000000000000000000000..3b5e470ce4275277c848fcd5824a1dead4c18587 --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/ReturnByValue.h @@ -0,0 +1,115 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// Copyright (C) 2009-2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_RETURNBYVALUE_H +#define EIGEN_RETURNBYVALUE_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +template +struct traits > : public traits::ReturnType> { + enum { + // We're disabling the DirectAccess because e.g. the constructor of + // the Block-with-DirectAccess expression requires to have a coeffRef method. + // Also, we don't want to have to implement the stride stuff. + Flags = (traits::ReturnType>::Flags | EvalBeforeNestingBit) & ~DirectAccessBit + }; +}; + +/* The ReturnByValue object doesn't even have a coeff() method. + * So the only way that nesting it in an expression can work, is by evaluating it into a plain matrix. + * So internal::nested always gives the plain return matrix type. + * + * FIXME: I don't understand why we need this specialization: isn't this taken care of by the EvalBeforeNestingBit ?? + * Answer: EvalBeforeNestingBit should be deprecated since we have the evaluators + */ +template +struct nested_eval, n, PlainObject> { + typedef typename traits::ReturnType type; +}; + +} // end namespace internal + +/** \class ReturnByValue + * \ingroup Core_Module + * + */ +template +class ReturnByValue : public internal::dense_xpr_base >::type, internal::no_assignment_operator { + public: + typedef typename internal::traits::ReturnType ReturnType; + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue) + + template + EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const { + static_cast(this)->evalTo(dst); + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { + return static_cast(this)->rows(); + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { + return static_cast(this)->cols(); + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN +#define Unusable \ + YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT + class Unusable { + Unusable(const Unusable&) {} + Unusable& operator=(const Unusable&) { return *this; } + }; + const Unusable& coeff(Index) const { return *reinterpret_cast(this); } + const Unusable& coeff(Index, Index) const { return *reinterpret_cast(this); } + Unusable& coeffRef(Index) { return *reinterpret_cast(this); } + Unusable& coeffRef(Index, Index) { return *reinterpret_cast(this); } +#undef Unusable +#endif +}; + +template +template +EIGEN_DEVICE_FUNC Derived& DenseBase::operator=(const ReturnByValue& other) { + other.evalTo(derived()); + return derived(); +} + +namespace internal { + +// Expression is evaluated in a temporary; default implementation of Assignment is bypassed so that +// when a ReturnByValue expression is assigned, the evaluator is not constructed. +// TODO: Finalize port to new regime; ReturnByValue should not exist in the expression world + +template +struct evaluator > : public evaluator::ReturnType> { + typedef ReturnByValue XprType; + typedef typename internal::traits::ReturnType PlainObject; + typedef evaluator Base; + + EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : m_result(xpr.rows(), xpr.cols()) { + internal::construct_at(this, m_result); + xpr.evalTo(m_result); + } + + protected: + PlainObject m_result; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_RETURNBYVALUE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Reverse.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Reverse.h new file mode 100644 index 0000000000000000000000000000000000000000..eb06fff57f7fae37d912436132fd861fe65996fc --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Reverse.h @@ -0,0 +1,202 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2009 Ricard Marxer +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_REVERSE_H +#define EIGEN_REVERSE_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +namespace internal { + +template +struct traits > : traits { + typedef typename MatrixType::Scalar Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; + typedef typename ref_selector::type MatrixTypeNested; + typedef std::remove_reference_t MatrixTypeNested_; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + Flags = MatrixTypeNested_::Flags & (RowMajorBit | LvalueBit) + }; +}; + +template +struct reverse_packet_cond { + static inline PacketType run(const PacketType& x) { return preverse(x); } +}; + +template +struct reverse_packet_cond { + static inline PacketType run(const PacketType& x) { return x; } +}; + +} // end namespace internal + +/** \class Reverse + * \ingroup Core_Module + * + * \brief Expression of the reverse of a vector or matrix + * + * \tparam MatrixType the type of the object of which we are taking the reverse + * \tparam Direction defines the direction of the reverse operation, can be Vertical, Horizontal, or BothDirections + * + * This class represents an expression of the reverse of a vector. + * It is the return type of MatrixBase::reverse() and VectorwiseOp::reverse() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::reverse(), VectorwiseOp::reverse() + */ +template +class Reverse : public internal::dense_xpr_base >::type { + public: + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Reverse) + typedef internal::remove_all_t NestedExpression; + using Base::IsRowMajor; + + protected: + enum { + PacketSize = internal::packet_traits::size, + IsColMajor = !IsRowMajor, + ReverseRow = (Direction == Vertical) || (Direction == BothDirections), + ReverseCol = (Direction == Horizontal) || (Direction == BothDirections), + OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, + OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1, + ReversePacket = (Direction == BothDirections) || ((Direction == Vertical) && IsColMajor) || + ((Direction == Horizontal) && IsRowMajor) + }; + typedef internal::reverse_packet_cond reverse_packet; + + public: + EIGEN_DEVICE_FUNC explicit inline Reverse(const MatrixType& matrix) : m_matrix(matrix) {} + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reverse) + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); } + + EIGEN_DEVICE_FUNC inline Index innerStride() const { return -m_matrix.innerStride(); } + + EIGEN_DEVICE_FUNC const internal::remove_all_t& nestedExpression() const { + return m_matrix; + } + + protected: + typename MatrixType::Nested m_matrix; +}; + +/** \returns an expression of the reverse of *this. + * + * Example: \include MatrixBase_reverse.cpp + * Output: \verbinclude MatrixBase_reverse.out + * + */ +template +EIGEN_DEVICE_FUNC inline typename DenseBase::ReverseReturnType DenseBase::reverse() { + return ReverseReturnType(derived()); +} + +// reverse const overload moved DenseBase.h due to a CUDA compiler bug + +/** This is the "in place" version of reverse: it reverses \c *this. + * + * In most cases it is probably better to simply use the reversed expression + * of a matrix. However, when reversing the matrix data itself is really needed, + * then this "in-place" version is probably the right choice because it provides + * the following additional benefits: + * - less error prone: doing the same operation with .reverse() requires special care: + * \code m = m.reverse().eval(); \endcode + * - this API enables reverse operations without the need for a temporary + * - it allows future optimizations (cache friendliness, etc.) + * + * \sa VectorwiseOp::reverseInPlace(), reverse() */ +template +EIGEN_DEVICE_FUNC inline void DenseBase::reverseInPlace() { + constexpr int HalfRowsAtCompileTime = RowsAtCompileTime == Dynamic ? Dynamic : RowsAtCompileTime / 2; + constexpr int HalfColsAtCompileTime = ColsAtCompileTime == Dynamic ? Dynamic : ColsAtCompileTime / 2; + if (cols() > rows()) { + Index half = cols() / 2; + this->template leftCols(half).swap( + this->template rightCols(half).reverse()); + if ((cols() % 2) == 1) { + Index half2 = rows() / 2; + col(half).template head(half2).swap( + col(half).template tail(half2).reverse()); + } + } else { + Index half = rows() / 2; + this->template topRows(half).swap( + this->template bottomRows(half).reverse()); + if ((rows() % 2) == 1) { + Index half2 = cols() / 2; + row(half).template head(half2).swap( + row(half).template tail(half2).reverse()); + } + } +} + +namespace internal { + +template +struct vectorwise_reverse_inplace_impl; + +template <> +struct vectorwise_reverse_inplace_impl { + template + static void run(ExpressionType& xpr) { + constexpr Index HalfAtCompileTime = + ExpressionType::RowsAtCompileTime == Dynamic ? Dynamic : ExpressionType::RowsAtCompileTime / 2; + Index half = xpr.rows() / 2; + xpr.template topRows(half).swap( + xpr.template bottomRows(half).colwise().reverse()); + } +}; + +template <> +struct vectorwise_reverse_inplace_impl { + template + static void run(ExpressionType& xpr) { + constexpr Index HalfAtCompileTime = + ExpressionType::ColsAtCompileTime == Dynamic ? Dynamic : ExpressionType::ColsAtCompileTime / 2; + Index half = xpr.cols() / 2; + xpr.template leftCols(half).swap( + xpr.template rightCols(half).rowwise().reverse()); + } +}; + +} // end namespace internal + +/** This is the "in place" version of VectorwiseOp::reverse: it reverses each column or row of \c *this. + * + * In most cases it is probably better to simply use the reversed expression + * of a matrix. However, when reversing the matrix data itself is really needed, + * then this "in-place" version is probably the right choice because it provides + * the following additional benefits: + * - less error prone: doing the same operation with .reverse() requires special care: + * \code m = m.reverse().eval(); \endcode + * - this API enables reverse operations without the need for a temporary + * + * \sa DenseBase::reverseInPlace(), reverse() */ +template +EIGEN_DEVICE_FUNC void VectorwiseOp::reverseInPlace() { + internal::vectorwise_reverse_inplace_impl::run(m_matrix); +} + +} // end namespace Eigen + +#endif // EIGEN_REVERSE_H diff --git a/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Select.h b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Select.h new file mode 100644 index 0000000000000000000000000000000000000000..9f4612047ab29d1f385d01bf74eb4f44d39a653c --- /dev/null +++ b/thirdparty/DROID-SLAM/thirdparty/eigen/Eigen/src/Core/Select.h @@ -0,0 +1,156 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SELECT_H +#define EIGEN_SELECT_H + +// IWYU pragma: private +#include "./InternalHeaderCheck.h" + +namespace Eigen { + +/** \class Select + * \ingroup Core_Module + * + * \brief Expression of a coefficient wise version of the C++ ternary operator ?: + * + * \tparam ConditionMatrixType the type of the \em condition expression which must be a boolean matrix + * \tparam ThenMatrixType the type of the \em then expression + * \tparam ElseMatrixType the type of the \em else expression + * + * This class represents an expression of a coefficient wise version of the C++ ternary operator ?:. + * It is the return type of DenseBase::select() and most of the time this is the only way it is used. + * + * \sa DenseBase::select(const DenseBase&, const DenseBase&) const + */ + +namespace internal { +template +struct traits > : traits { + typedef typename traits::Scalar Scalar; + typedef Dense StorageKind; + typedef typename traits::XprKind XprKind; + typedef typename ConditionMatrixType::Nested ConditionMatrixNested; + typedef typename ThenMatrixType::Nested ThenMatrixNested; + typedef typename ElseMatrixType::Nested ElseMatrixNested; + enum { + RowsAtCompileTime = ConditionMatrixType::RowsAtCompileTime, + ColsAtCompileTime = ConditionMatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime, + Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & RowMajorBit + }; +}; +} // namespace internal + +template +class Select : public internal::dense_xpr_base >::type, + internal::no_assignment_operator { + public: + typedef typename internal::dense_xpr_base