白鹭先生
commited on
Commit
·
abd2a81
0
Parent(s):
init
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +27 -0
- .vscode/settings.json +3 -0
- LICENSE.txt +94 -0
- README.md +13 -0
- app.py +48 -0
- backbone.py +71 -0
- child_processes.py +76 -0
- configs/README.md +68 -0
- configs/backbone_params.deeplab101.json +6 -0
- configs/backbone_params.deeplab50.json +6 -0
- configs/backbone_params.fcn101.json +6 -0
- configs/backbone_params.fcn50.json +6 -0
- configs/backbone_params.ictnet.json +8 -0
- configs/backbone_params.unet16.json +5 -0
- configs/backbone_params.unet_resnet101.json +9 -0
- configs/config.defaults.inria_dataset_osm_aligned.json +15 -0
- configs/config.defaults.inria_dataset_osm_mask_only.json +21 -0
- configs/config.defaults.inria_dataset_polygonized.json +15 -0
- configs/config.defaults.inria_dataset_polygonized_256.json +15 -0
- configs/config.defaults.json +40 -0
- configs/config.defaults.luxcarta_dataset.json +11 -0
- configs/config.defaults.mapping_dataset.json +11 -0
- configs/config.defaults.xview2_dataset.json +15 -0
- configs/config.inria_dataset_osm_aligned.unet_resnet101_pretrained.field_off.json +24 -0
- configs/config.inria_dataset_osm_aligned.unet_resnet101_pretrained.json +24 -0
- configs/config.inria_dataset_osm_mask_only.unet16.json +17 -0
- configs/config.inria_dataset_polygonized.ictnet.leaderboard.json +38 -0
- configs/config.inria_dataset_polygonized.ictnet.rmsprop.leaderboard.field_off.json +39 -0
- configs/config.inria_dataset_polygonized.ictnet.rmsprop.leaderboard.json +39 -0
- configs/config.inria_dataset_polygonized.unet_resnet101.leaderboard.json +33 -0
- configs/config.inria_dataset_polygonized.unet_resnet101_pretrained.field_off.json +18 -0
- configs/config.inria_dataset_polygonized.unet_resnet101_pretrained.json +18 -0
- configs/config.inria_dataset_polygonized.unet_resnet101_pretrained.leaderboard.field_off.json +35 -0
- configs/config.inria_dataset_polygonized.unet_resnet101_pretrained.leaderboard.json +35 -0
- configs/config.inria_dataset_polygonized_small.unet_resnet101_pretrained.json +19 -0
- configs/config.inria_dataset_polygonized_small.unet_resnet101_pretrained.no_aug.json +23 -0
- configs/config.inria_dataset_small.unet16.json +19 -0
- configs/config.luxcarta_dataset.unet16.field_off.json +15 -0
- configs/config.luxcarta_dataset.unet16.json +15 -0
- configs/config.mapping_dataset.asip.json +5 -0
- configs/config.mapping_dataset.deeplab101.field_off.json +15 -0
- configs/config.mapping_dataset.deeplab101.field_off.train_val.json +16 -0
- configs/config.mapping_dataset.deeplab101.json +16 -0
- configs/config.mapping_dataset.deeplab101.train_val.json +15 -0
- configs/config.mapping_dataset.fcn101.json +89 -0
- configs/config.mapping_dataset.fcn50.json +89 -0
- configs/config.mapping_dataset.open_solution.json +5 -0
- configs/config.mapping_dataset.open_solution_full.json +5 -0
- configs/config.mapping_dataset.polymapper.json +5 -0
- configs/config.mapping_dataset.unet16.coupling_losses_0.4.json +88 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.vscode/settings.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"git.ignoreLimitWarning": true
|
3 |
+
}
|
LICENSE.txt
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
***
|
2 |
+
CONTRAT DE LICENCE DE LOGICIEL
|
3 |
+
|
4 |
+
Logiciel PFFL ©Inria 2020, tout droit réservé, ci-après dénommé "le Logiciel".
|
5 |
+
|
6 |
+
Le Logiciel a été conçu et réalisé par des chercheurs de l’équipe-projet TITANE d’Inria (Institut National de Recherche en Informatique et Automatique).
|
7 |
+
|
8 |
+
Inria, Domaine de Voluceau, Rocquencourt - BP 105
|
9 |
+
78153 Le Chesnay Cedex, France
|
10 |
+
|
11 |
+
Inria détient tous les droits de propriété sur le Logiciel.
|
12 |
+
|
13 |
+
Le Logiciel a été déposé auprès de l'Agence pour la Protection des Programmes (APP) sous le numéro <numéro APP>.
|
14 |
+
|
15 |
+
Le Logiciel est en cours de développement et Inria souhaite qu'il soit utilisé par la communauté scientifique de façon à le tester et l'évaluer, et afin qu’Inria puisse le cas échéant le faire évoluer.
|
16 |
+
|
17 |
+
A cette fin, Inria a décidé de distribuer le Logiciel.
|
18 |
+
|
19 |
+
Inria concède à l'utilisateur académique, gratuitement, sans droit de sous-licence, pour une période de un (1) an à compter du téléchargement du code source, le droit non-exclusif d'utiliser le Logiciel à fins de recherche. Toute autre utilisation sans l’accord préalable d’Inria est exclue.
|
20 |
+
|
21 |
+
L’utilisateur académique reconnaît expressément avoir reçu d’Inria toutes les informations lui permettant d’apprécier l’adéquation du Logiciel à ses besoins et de prendre toutes les précautions utiles pour sa mise en œuvre et son utilisation.
|
22 |
+
|
23 |
+
Le Logiciel est distribué sous forme d'un fichier source.
|
24 |
+
|
25 |
+
Si le Logiciel est utilisé pour la publication de résultats, l’utilisateur devra citer le Logiciel de la façon suivante :
|
26 |
+
|
27 |
+
@misc{girard2020polygonal,
|
28 |
+
title={Polygonal Building Segmentation by Frame Field Learning},
|
29 |
+
author={Nicolas Girard and Dmitriy Smirnov and Justin Solomon and Yuliya Tarabalka},
|
30 |
+
year={2020},
|
31 |
+
eprint={2004.14875},
|
32 |
+
archivePrefix={arXiv},
|
33 |
+
primaryClass={cs.CV}
|
34 |
+
}
|
35 |
+
|
36 |
+
|
37 |
+
Tout utilisateur du Logiciel pourra communiquer ses remarques d'utilisation du Logiciel aux développeurs de PFFL : [email protected]
|
38 |
+
|
39 |
+
|
40 |
+
L'UTILISATEUR NE PEUT FAIRE NI UTILISATION NI EXPLOITATION NI DISTRIBUTION COMMERCIALE DU LOGICIEL SANS L'ACCORD EXPRÈS PRÉALABLE d’INRIA ([email protected]).
|
41 |
+
TOUT ACTE CONTRAIRE CONSTITUERAIT UNE CONTREFAÇON.
|
42 |
+
|
43 |
+
LE LOGICIEL EST FOURNI "TEL QU'EN L'ÉTAT" SANS AUCUNE GARANTIE DE QUELQUE NATURE, IMPLICITE OU EXPLICITE, QUANT À SON UTILISATION COMMERCIALE, PROFESSIONNELLE, LÉGALE OU NON, OU AUTRE, SA COMMERCIALISATION OU SON ADAPTATION.
|
44 |
+
|
45 |
+
SAUF LORSQU'EXPLICITEMENT PRÉVU PAR LA LOI, INRIA NE POURRA ÊTRE TENU POUR RESPONSABLE DE TOUT DOMMAGE OU PRÉJUDICE DIRECT,INDIRECT, (PERTES FINANCIÈRES DUES AU MANQUE À GAGNER, À L'INTERRUPTION D'ACTIVITÉS OU À LA PERTE DE DONNÉES, ETC...) DÉCOULANT DE L'UTILISATION DE TOUT OU PARTIE DU LOGICIEL OU DE L'IMPOSSIBILITÉ D'UTILISER CELUI-CI.
|
46 |
+
|
47 |
+
***
|
48 |
+
|
49 |
+
SOFTWARE LICENSE AGREEMENT
|
50 |
+
|
51 |
+
|
52 |
+
Software PFFL ©Inria – 2020, all rights reserved, hereinafter "the Software".
|
53 |
+
|
54 |
+
This software has been developed by researchers of TITANE project team of Inria (Institut National de Recherche en Informatique et Automatique).
|
55 |
+
|
56 |
+
Inria, Domaine de Voluceau, Rocquencourt - BP 105
|
57 |
+
78153 Le Chesnay Cedex, FRANCE
|
58 |
+
|
59 |
+
|
60 |
+
Inria holds all the ownership rights on the Software.
|
61 |
+
|
62 |
+
The Software has been registered with the Agence pour la Protection des Programmes (APP) under <APP number>.
|
63 |
+
|
64 |
+
The Software is still being currently developed. It is the Inria’s aim for the Software to be used by the scientific community so as to test it and, evaluate it so that Inria may improve it.
|
65 |
+
|
66 |
+
For these reasons Inria has decided to distribute the Software.
|
67 |
+
|
68 |
+
Inria grants to the academic user, a free of charge, without right to sublicense non-exclusive right to use the Software for research purposes for a period of one (1) year from the date of the download of the source code. Any other use without of prior consent of Inria is prohibited.
|
69 |
+
|
70 |
+
The academic user explicitly acknowledges having received from Inria all information allowing him to appreciate the adequacy between of the Software and his needs and to undertake all necessary precautions for his execution and use.
|
71 |
+
|
72 |
+
The Software is provided only as a source.
|
73 |
+
|
74 |
+
In case of using the Software for a publication or other results obtained through the use of the Software, user should cite the Software as follows :
|
75 |
+
|
76 |
+
@misc{girard2020polygonal,
|
77 |
+
title={Polygonal Building Segmentation by Frame Field Learning},
|
78 |
+
author={Nicolas Girard and Dmitriy Smirnov and Justin Solomon and Yuliya Tarabalka},
|
79 |
+
year={2020},
|
80 |
+
eprint={2004.14875},
|
81 |
+
archivePrefix={arXiv},
|
82 |
+
primaryClass={cs.CV}
|
83 |
+
}
|
84 |
+
|
85 |
+
|
86 |
+
Every user of the Software could communicate to the developers of PFFL [[email protected]] his or her remarks as to the use of the Software.
|
87 |
+
|
88 |
+
THE USER CANNOT USE, EXPLOIT OR COMMERCIALY DISTRIBUTE THE SOFTWARE WITHOUT PRIOR AND EXPLICIT CONSENT OF INRIA ([email protected]). ANY SUCH ACTION WILL CONSTITUTE A FORGERY.
|
89 |
+
|
90 |
+
THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY WARRANTIES OF ANY NATURE AND ANY EXPRESS OR IMPLIED WARRANTIES,WITH REGARDS TO COMMERCIAL USE, PROFESSIONNAL USE, LEGAL OR NOT, OR OTHER, OR COMMERCIALISATION OR ADAPTATION.
|
91 |
+
|
92 |
+
UNLESS EXPLICITLY PROVIDED BY LAW, IN NO EVENT, SHALL INRIA OR THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, LOSS OF USE, DATA, OR PROFITS OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
93 |
+
|
94 |
+
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Luuu
|
3 |
+
emoji: 🌍
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: purple
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 2.8.12
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
app.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
Author: Egrt
|
3 |
+
Date: 2022-03-19 10:23:48
|
4 |
+
LastEditors: Egrt
|
5 |
+
LastEditTime: 2022-03-20 13:41:53
|
6 |
+
FilePath: \Luuu\app.py
|
7 |
+
'''
|
8 |
+
|
9 |
+
from gis import GIS
|
10 |
+
import gradio as gr
|
11 |
+
import os
|
12 |
+
os.system('pip install requirements.txt')
|
13 |
+
from zipfile import ZipFile
|
14 |
+
gis = GIS()
|
15 |
+
|
16 |
+
|
17 |
+
# --------模型推理---------- #
|
18 |
+
def inference(filepath):
|
19 |
+
filename, file_list = gis.detect_image(filepath)
|
20 |
+
with ZipFile("result.zip", "w") as zipObj:
|
21 |
+
zipObj.write(file_list[0], "{}.tif".format(filename+'mask'))
|
22 |
+
zipObj.write(file_list[1], "{}.tif".format(filename))
|
23 |
+
zipObj.write(file_list[2], "{}.pdf".format(filename))
|
24 |
+
zipObj.write(file_list[3], "{}.cpg".format(filename))
|
25 |
+
zipObj.write(file_list[4], "{}.dbf".format(filename))
|
26 |
+
zipObj.write(file_list[5], "{}.shx".format(filename))
|
27 |
+
zipObj.write(file_list[6], "{}.shp".format(filename))
|
28 |
+
zipObj.write(file_list[7], "{}.prj".format(filename))
|
29 |
+
return "images/result.zip"
|
30 |
+
|
31 |
+
|
32 |
+
# --------网页信息---------- #
|
33 |
+
title = "基于帧场学习的多边形建筑提取"
|
34 |
+
description = "目前最先进图像分割模型通常以栅格形式输出分割,但地理信息系统中的应用通常需要矢量多边形。我们在遥感图像中提取建筑物的任务中,将帧场输出添加到深度分割模型中,将预测的帧场与地面实况轮廓对齐,帮助减少深度网络输出与下游任务中输出样式之间的差距。 @Luuuu🐋🐋"
|
35 |
+
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2004.14875' target='_blank'>Polygonization-by-Frame-Field-Learning</a> | <a href='https://github.com/JingyunLiang/SwinIR' target='_blank'>Github Repo</a></p>"
|
36 |
+
example_img_dir = 'images'
|
37 |
+
example_img_name = os.listdir(example_img_dir)
|
38 |
+
examples=[[os.path.join(example_img_dir, image_path)] for image_path in example_img_name if image_path.endswith('.png')]
|
39 |
+
gr.Interface(
|
40 |
+
inference,
|
41 |
+
[gr.inputs.Image(type="filepath", label="待检测图片")],
|
42 |
+
gr.outputs.File(label="检测结果"),
|
43 |
+
title=title,
|
44 |
+
description=description,
|
45 |
+
article=article,
|
46 |
+
enable_queue=True,
|
47 |
+
examples=examples
|
48 |
+
).launch(debug=True)
|
backbone.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torchvision
|
5 |
+
|
6 |
+
from lydorn_utils import print_utils
|
7 |
+
|
8 |
+
|
9 |
+
def get_backbone(backbone_params):
|
10 |
+
set_download_dir()
|
11 |
+
if backbone_params["name"] == "unet":
|
12 |
+
from torchvision.models.segmentation._utils import _SimpleSegmentationModel
|
13 |
+
from frame_field_learning.unet import UNetBackbone
|
14 |
+
|
15 |
+
backbone = UNetBackbone(backbone_params["input_features"], backbone_params["features"])
|
16 |
+
backbone = _SimpleSegmentationModel(backbone, classifier=torch.nn.Identity())
|
17 |
+
elif backbone_params["name"] == "fcn50":
|
18 |
+
backbone = torchvision.models.segmentation.fcn_resnet50(pretrained=backbone_params["pretrained"],
|
19 |
+
num_classes=21)
|
20 |
+
backbone.classifier = torch.nn.Sequential(*list(backbone.classifier.children())[:-1],
|
21 |
+
torch.nn.Conv2d(512, backbone_params["features"], kernel_size=(1, 1),
|
22 |
+
stride=(1, 1)))
|
23 |
+
elif backbone_params["name"] == "fcn101":
|
24 |
+
backbone = torchvision.models.segmentation.fcn_resnet101(pretrained=backbone_params["pretrained"],
|
25 |
+
num_classes=21)
|
26 |
+
backbone.classifier = torch.nn.Sequential(*list(backbone.classifier.children())[:-1],
|
27 |
+
torch.nn.Conv2d(512, backbone_params["features"], kernel_size=(1, 1),
|
28 |
+
stride=(1, 1)))
|
29 |
+
|
30 |
+
elif backbone_params["name"] == "deeplab50":
|
31 |
+
backbone = torchvision.models.segmentation.deeplabv3_resnet50(pretrained=backbone_params["pretrained"],
|
32 |
+
num_classes=21)
|
33 |
+
backbone.classifier = torch.nn.Sequential(*list(backbone.classifier.children())[:-1],
|
34 |
+
torch.nn.Conv2d(256, backbone_params["features"], kernel_size=(1, 1),
|
35 |
+
stride=(1, 1)))
|
36 |
+
elif backbone_params["name"] == "deeplab101":
|
37 |
+
backbone = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=backbone_params["pretrained"],
|
38 |
+
num_classes=21)
|
39 |
+
backbone.classifier = torch.nn.Sequential(*list(backbone.classifier.children())[:-1],
|
40 |
+
torch.nn.Conv2d(256, backbone_params["features"], kernel_size=(1, 1),
|
41 |
+
stride=(1, 1)))
|
42 |
+
elif backbone_params["name"] == "unet_resnet":
|
43 |
+
from torchvision.models.segmentation._utils import _SimpleSegmentationModel
|
44 |
+
from frame_field_learning.unet_resnet import UNetResNetBackbone
|
45 |
+
|
46 |
+
backbone = UNetResNetBackbone(backbone_params["encoder_depth"], num_filters=backbone_params["num_filters"],
|
47 |
+
dropout_2d=backbone_params["dropout_2d"],
|
48 |
+
pretrained=backbone_params["pretrained"],
|
49 |
+
is_deconv=backbone_params["is_deconv"])
|
50 |
+
backbone = _SimpleSegmentationModel(backbone, classifier=torch.nn.Identity())
|
51 |
+
|
52 |
+
elif backbone_params["name"] == "ictnet":
|
53 |
+
from torchvision.models.segmentation._utils import _SimpleSegmentationModel
|
54 |
+
from frame_field_learning.ictnet import ICTNetBackbone
|
55 |
+
|
56 |
+
backbone = ICTNetBackbone(in_channels=backbone_params["in_channels"],
|
57 |
+
out_channels=backbone_params["out_channels"],
|
58 |
+
preset_model=backbone_params["preset_model"],
|
59 |
+
dropout_2d=backbone_params["dropout_2d"],
|
60 |
+
efficient=backbone_params["efficient"])
|
61 |
+
backbone = _SimpleSegmentationModel(backbone, classifier=torch.nn.Identity())
|
62 |
+
else:
|
63 |
+
print_utils.print_error("ERROR: config[\"backbone_params\"][\"name\"] = \"{}\" is an unknown backbone!"
|
64 |
+
"If it is a new backbone you want to use, "
|
65 |
+
"add it in backbone.py's get_backbone() function.".format(backbone_params["name"]))
|
66 |
+
raise RuntimeError("Specified backbone {} unknown".format(backbone_params["name"]))
|
67 |
+
return backbone
|
68 |
+
|
69 |
+
|
70 |
+
def set_download_dir():
|
71 |
+
os.environ['TORCH_HOME'] = 'models' # setting the environment variable
|
child_processes.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import torch
|
4 |
+
|
5 |
+
from lydorn_utils import python_utils
|
6 |
+
from lydorn_utils import print_utils
|
7 |
+
|
8 |
+
from backbone import get_backbone
|
9 |
+
from dataset_folds import get_folds
|
10 |
+
|
11 |
+
|
12 |
+
def train_process(gpu, config, shared_dict, barrier):
|
13 |
+
from frame_field_learning.train import train
|
14 |
+
|
15 |
+
print_utils.print_info("GPU {} -> Ready. There are {} GPU(s) available on this node.".format(gpu, torch.cuda.device_count()))
|
16 |
+
|
17 |
+
torch.manual_seed(0) # Ensure same seed for all processes
|
18 |
+
# --- Find data directory --- #
|
19 |
+
root_dir_candidates = [os.path.join(data_dirpath, config["dataset_params"]["root_dirname"]) for data_dirpath in config["data_dir_candidates"]]
|
20 |
+
root_dir, paths_tried = python_utils.choose_first_existing_path(root_dir_candidates, return_tried_paths=True)
|
21 |
+
if root_dir is None:
|
22 |
+
print_utils.print_error("GPU {} -> ERROR: Data root directory amongst \"{}\" not found!".format(gpu, paths_tried))
|
23 |
+
exit()
|
24 |
+
print_utils.print_info("GPU {} -> Using data from {}".format(gpu, root_dir))
|
25 |
+
|
26 |
+
# --- Get dataset splits
|
27 |
+
# - CHANGE HERE TO ADD YOUR OWN DATASET
|
28 |
+
# We have to adapt the config["fold"] param to the folds argument of the get_folds function
|
29 |
+
fold = set(config["fold"])
|
30 |
+
if fold == {"train"}:
|
31 |
+
# Val will be used for evaluating the model after each epoch:
|
32 |
+
train_ds, val_ds = get_folds(config, root_dir, folds=["train", "val"])
|
33 |
+
elif fold == {"train", "val"}:
|
34 |
+
# Both train and val are meant to be used for training
|
35 |
+
train_ds, = get_folds(config, root_dir, folds=["train_val"])
|
36 |
+
val_ds = None
|
37 |
+
else:
|
38 |
+
# Should not arrive here since main makes sure config["fold"] is either one of the above
|
39 |
+
print_utils.print_error("ERROR: specified folds not recognized!")
|
40 |
+
raise NotImplementedError
|
41 |
+
|
42 |
+
# --- Instantiate backbone network
|
43 |
+
if config["backbone_params"]["name"] in ["deeplab50", "deeplab101"]:
|
44 |
+
assert 1 < config["optim_params"]["batch_size"], \
|
45 |
+
"When using backbone {}, batch_size has to be at least 2 for the batchnorm of the ASPPPooling to work."\
|
46 |
+
.format(config["backbone_params"]["name"])
|
47 |
+
backbone = get_backbone(config["backbone_params"])
|
48 |
+
|
49 |
+
# --- Launch training
|
50 |
+
train(gpu, config, shared_dict, barrier, train_ds, val_ds, backbone)
|
51 |
+
|
52 |
+
|
53 |
+
def eval_process(gpu, config, shared_dict, barrier):
|
54 |
+
from frame_field_learning.evaluate import evaluate
|
55 |
+
|
56 |
+
torch.manual_seed(0) # Ensure same seed for all processes
|
57 |
+
# --- Find data directory --- #
|
58 |
+
root_dir_candidates = [os.path.join(data_dirpath, config["dataset_params"]["root_dirname"]) for data_dirpath in
|
59 |
+
config["data_dir_candidates"]]
|
60 |
+
root_dir, paths_tried = python_utils.choose_first_existing_path(root_dir_candidates, return_tried_paths=True)
|
61 |
+
if root_dir is None:
|
62 |
+
print_utils.print_error(
|
63 |
+
"GPU {} -> ERROR: Data root directory amongst \"{}\" not found!".format(gpu, paths_tried))
|
64 |
+
raise NotADirectoryError(f"Couldn't find a directory in {paths_tried} (gpu:{gpu})")
|
65 |
+
print_utils.print_info("GPU {} -> Using data from {}".format(gpu, root_dir))
|
66 |
+
config["data_root_dir"] = root_dir
|
67 |
+
|
68 |
+
# --- Get dataset
|
69 |
+
# - CHANGE HERE TO ADD YOUR OWN DATASET
|
70 |
+
eval_ds, = get_folds(config, root_dir, folds=config["fold"]) # config["fold"] is already a list (of length 1)
|
71 |
+
|
72 |
+
# --- Instantiate backbone network (its backbone will be used to extract features)
|
73 |
+
backbone = get_backbone(config["backbone_params"])
|
74 |
+
|
75 |
+
evaluate(gpu, config, shared_dict, barrier, eval_ds, backbone)
|
76 |
+
|
configs/README.md
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
This folder stores all configuration variables used for launching training runs (and evaluating the results from those runs) in the form on config files.
|
2 |
+
|
3 |
+
The ```main.py``` script has a ```--config``` argument which can be the path to any of the "config.*.json" files in this folder. Of course you can also write your own config file.
|
4 |
+
Thus one "config.*.json" files points to all parameters used for the run.
|
5 |
+
|
6 |
+
As we perform quite a lot of different experiments requiring only a few parameters to change, I have setup a hierarchical system of config files allowing default values that can be overwritten.
|
7 |
+
This way a single parameter can be shared by all experiments and can be changed for all of them by modifying it in only one location.
|
8 |
+
A config is stored as a JSON dictionary. This config dictionary can store nested dictionaries of parameters.
|
9 |
+
Whenever the "defaults_filepath" key is used in a config file,
|
10 |
+
its value is assumed to be the path to another config file whose dictionary is loaded and merged with the dictionary that had the "defaults_filepath" key.
|
11 |
+
The keys alongside the "defaults_filepath" key specify parameters that should overwrite the default values loaded from the "defaults_filepath" config file.
|
12 |
+
|
13 |
+
To illustrate how this works, let's say the config folder looks like this:
|
14 |
+
```
|
15 |
+
config
|
16 |
+
|-- config.defaults.json
|
17 |
+
`-- config.my_exp_1.json
|
18 |
+
`-- config.my_exp_2.json
|
19 |
+
```
|
20 |
+
|
21 |
+
Let's say config.defaults.json is:
|
22 |
+
```json
|
23 |
+
{
|
24 |
+
"learning_rate": 0.1,
|
25 |
+
"batch_size": 16
|
26 |
+
}
|
27 |
+
```
|
28 |
+
|
29 |
+
And config.my_exp_1.json is:
|
30 |
+
```json
|
31 |
+
{
|
32 |
+
"defaults_filepath": "configs/config.defaults.json"
|
33 |
+
}
|
34 |
+
```
|
35 |
+
|
36 |
+
And config.my_exp_2.json is:
|
37 |
+
```json
|
38 |
+
{
|
39 |
+
"defaults_filepath": "configs/config.defaults.json",
|
40 |
+
|
41 |
+
"learning_rate": 0.01
|
42 |
+
|
43 |
+
}
|
44 |
+
```
|
45 |
+
|
46 |
+
When loaded by the ```main.py``` script, they will be expanded into the following.
|
47 |
+
|
48 |
+
config.my_exp_1.json:
|
49 |
+
```json
|
50 |
+
{
|
51 |
+
"learning_rate": 0.1,
|
52 |
+
"batch_size": 16
|
53 |
+
}
|
54 |
+
```
|
55 |
+
|
56 |
+
config.my_exp_2.json:
|
57 |
+
```json
|
58 |
+
{
|
59 |
+
"learning_rate": 0.01,
|
60 |
+
"batch_size": 16
|
61 |
+
|
62 |
+
}
|
63 |
+
```
|
64 |
+
|
65 |
+
When a lot of parameters are used by the actual config files we used, it is thus very easy to know that all "my_exp_2" does is change the learning rate.
|
66 |
+
Also if we want to change the batch size for all experiments, all we have to do is change its value in "config.defaults.json".
|
67 |
+
This principle of using the "defaults_filepath" key to point to another config file can be used in nested dictionary parameters as well.
|
68 |
+
A config file is thus the root of a config tree loaded recursively.
|
configs/backbone_params.deeplab101.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "deeplab101",
|
3 |
+
"input_features": 3,
|
4 |
+
"features": 128,
|
5 |
+
"pretrained": false
|
6 |
+
}
|
configs/backbone_params.deeplab50.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "deeplab50",
|
3 |
+
"input_features": 3,
|
4 |
+
"features": 128,
|
5 |
+
"pretrained": false
|
6 |
+
}
|
configs/backbone_params.fcn101.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "fcn101",
|
3 |
+
"input_features": 3,
|
4 |
+
"features": 256,
|
5 |
+
"pretrained": false
|
6 |
+
}
|
configs/backbone_params.fcn50.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "fcn50",
|
3 |
+
"input_features": 3,
|
4 |
+
"features": 256,
|
5 |
+
"pretrained": false
|
6 |
+
}
|
configs/backbone_params.ictnet.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "ictnet",
|
3 |
+
"preset_model": "FC-DenseNet103", // FC-DenseNet56, FC-DenseNet67 and FC-DenseNet103 are possible
|
4 |
+
"in_channels": 3,
|
5 |
+
"out_channels": 32,
|
6 |
+
"dropout_2d": 0.0, // Default: 0.2
|
7 |
+
"efficient": true // If true, use gradient checkpointing to reduce memory at the expense of some speed
|
8 |
+
}
|
configs/backbone_params.unet16.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "unet",
|
3 |
+
"input_features": 3,
|
4 |
+
"features": 16
|
5 |
+
}
|
configs/backbone_params.unet_resnet101.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "unet_resnet",
|
3 |
+
"encoder_depth": 101, // 34, 101 and 152 are possible
|
4 |
+
"input_features": 3,
|
5 |
+
"num_filters": 32, // Default: 32
|
6 |
+
"pretrained": false,
|
7 |
+
"dropout_2d": 0.2, // Default: 0.2
|
8 |
+
"is_deconv": false // Default: false
|
9 |
+
}
|
configs/config.defaults.inria_dataset_osm_aligned.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.json",
|
3 |
+
|
4 |
+
"dataset_params": {
|
5 |
+
"defaults_filepath": "configs/dataset_params.inria_dataset_osm_aligned.json" // Path from the project's root to a JSON with default values for dataset_params
|
6 |
+
},
|
7 |
+
|
8 |
+
"eval_params" : {
|
9 |
+
"defaults_filepath": "configs/eval_params.inria_dataset.json" // Path from the project's root to a JSON with default values for eval_params
|
10 |
+
},
|
11 |
+
|
12 |
+
"optim_params": {
|
13 |
+
"gamma": 0.99
|
14 |
+
}
|
15 |
+
}
|
configs/config.defaults.inria_dataset_osm_mask_only.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.json",
|
3 |
+
|
4 |
+
"dataset_params": {
|
5 |
+
"defaults_filepath": "configs/dataset_params.inria_dataset_osm_mask_only.json" // Path from the project's root to a JSON with default values for dataset_params
|
6 |
+
},
|
7 |
+
|
8 |
+
"eval_params" : {
|
9 |
+
"defaults_filepath": "configs/eval_params.inria_dataset.json" // Path from the project's root to a JSON with default values for eval_params
|
10 |
+
},
|
11 |
+
|
12 |
+
"optim_params": {
|
13 |
+
"gamma": 0.99
|
14 |
+
},
|
15 |
+
|
16 |
+
"data_aug_params": {
|
17 |
+
"color_jitter": false
|
18 |
+
},
|
19 |
+
|
20 |
+
"compute_seg": false
|
21 |
+
}
|
configs/config.defaults.inria_dataset_polygonized.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.json",
|
3 |
+
|
4 |
+
"dataset_params": {
|
5 |
+
"defaults_filepath": "configs/dataset_params.inria_dataset_polygonized.json" // Path from the project's root to a JSON with default values for dataset_params
|
6 |
+
},
|
7 |
+
|
8 |
+
"eval_params" : {
|
9 |
+
"defaults_filepath": "configs/eval_params.inria_dataset.json" // Path from the project's root to a JSON with default values for eval_params
|
10 |
+
},
|
11 |
+
|
12 |
+
"optim_params": {
|
13 |
+
"gamma": 0.99
|
14 |
+
}
|
15 |
+
}
|
configs/config.defaults.inria_dataset_polygonized_256.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.json",
|
3 |
+
|
4 |
+
"dataset_params": {
|
5 |
+
"defaults_filepath": "configs/dataset_params.inria_dataset_polygonized_256.json" // Path from the project's root to a JSON with default values for dataset_params
|
6 |
+
},
|
7 |
+
|
8 |
+
"eval_params" : {
|
9 |
+
"defaults_filepath": "configs/eval_params.inria_dataset.json" // Path from the project's root to a JSON with default values for eval_params
|
10 |
+
},
|
11 |
+
|
12 |
+
"optim_params": {
|
13 |
+
"gamma": 0.99
|
14 |
+
}
|
15 |
+
}
|
configs/config.defaults.json
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"data_dir_candidates": [
|
3 |
+
"/data/titane/user/nigirard/data",
|
4 |
+
"~/data",
|
5 |
+
"/data"
|
6 |
+
],
|
7 |
+
"num_workers": null, // If null, will use multiprocess.cpu_count() workers in total
|
8 |
+
"data_aug_params": {
|
9 |
+
"enable": true,
|
10 |
+
"vflip": true,
|
11 |
+
"affine": true,
|
12 |
+
"scaling": [0.75, 1.5], // Range of scaling factor to apply during affine transform. Set to None to not apply.
|
13 |
+
"color_jitter": true,
|
14 |
+
"device": "cuda"
|
15 |
+
},
|
16 |
+
|
17 |
+
"device": "cuda", // Only has effects when mode is val or test. When mode is train, always use CUDA
|
18 |
+
"use_amp": false, // Automatic Mixed Precision switch
|
19 |
+
|
20 |
+
"compute_seg": true,
|
21 |
+
"compute_crossfield": true,
|
22 |
+
|
23 |
+
"seg_params": {
|
24 |
+
"compute_interior": true,
|
25 |
+
"compute_edge": true,
|
26 |
+
"compute_vertex": false
|
27 |
+
},
|
28 |
+
|
29 |
+
"loss_params": {
|
30 |
+
"defaults_filepath": "configs/loss_params.json" // Path from the project's root to a JSON with default values for dataset_params
|
31 |
+
},
|
32 |
+
|
33 |
+
"optim_params": {
|
34 |
+
"defaults_filepath": "configs/optim_params.json" // Path from the project's root to a JSON with default values for optim_params
|
35 |
+
},
|
36 |
+
|
37 |
+
"polygonize_params": {
|
38 |
+
"defaults_filepath": "configs/polygonize_params.json" // Path from the project's root to a JSON with default values for polygonize_params
|
39 |
+
}
|
40 |
+
}
|
configs/config.defaults.luxcarta_dataset.json
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.json",
|
3 |
+
|
4 |
+
"dataset_params": {
|
5 |
+
"defaults_filepath": "configs/dataset_params.luxcarta_dataset.json" // Path from the project's root to a JSON with default values for dataset_params
|
6 |
+
},
|
7 |
+
|
8 |
+
"eval_params" : {
|
9 |
+
"defaults_filepath": "configs/eval_params.luxcarta_dataset.json" // Path from the project's root to a JSON with default values for eval_params
|
10 |
+
}
|
11 |
+
}
|
configs/config.defaults.mapping_dataset.json
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.json",
|
3 |
+
|
4 |
+
"dataset_params": {
|
5 |
+
"defaults_filepath": "configs/dataset_params.mapping_dataset.json" // Path from the project's root to a JSON with default values for dataset_params
|
6 |
+
},
|
7 |
+
|
8 |
+
"eval_params" : {
|
9 |
+
"defaults_filepath": "configs/eval_params.mapping_dataset.json" // Path from the project's root to a JSON with default values for eval_params
|
10 |
+
}
|
11 |
+
}
|
configs/config.defaults.xview2_dataset.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.json",
|
3 |
+
|
4 |
+
"dataset_params": {
|
5 |
+
"defaults_filepath": "configs/dataset_params.xview2_dataset.json" // Path from the project's root to a JSON with default values for dataset_params
|
6 |
+
},
|
7 |
+
|
8 |
+
"eval_params" : {
|
9 |
+
"defaults_filepath": "configs/eval_params.xview2_dataset.json" // Path from the project's root to a JSON with default values for eval_params
|
10 |
+
},
|
11 |
+
|
12 |
+
"optim_params": {
|
13 |
+
"gamma": 0.99
|
14 |
+
}
|
15 |
+
}
|
configs/config.inria_dataset_osm_aligned.unet_resnet101_pretrained.field_off.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.inria_dataset_osm_aligned.json",
|
3 |
+
|
4 |
+
"run_name": "inria_dataset_osm_aligned.unet_resnet101_pretrained.field_off",
|
5 |
+
|
6 |
+
"compute_crossfield": false,
|
7 |
+
|
8 |
+
"backbone_params": {
|
9 |
+
"defaults_filepath": "configs/backbone_params.unet_resnet101.json", // Path from the project's root to a JSON with default values for backbone_params
|
10 |
+
"pretrained": true
|
11 |
+
},
|
12 |
+
|
13 |
+
"loss_params": {
|
14 |
+
"seg_loss_params": {
|
15 |
+
"use_size": false
|
16 |
+
}
|
17 |
+
},
|
18 |
+
|
19 |
+
"optim_params": {
|
20 |
+
"optimizer": "RMSProp",
|
21 |
+
"gamma": 0.99,
|
22 |
+
"batch_size": 3 // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
23 |
+
}
|
24 |
+
}
|
configs/config.inria_dataset_osm_aligned.unet_resnet101_pretrained.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.inria_dataset_osm_aligned.json",
|
3 |
+
|
4 |
+
"run_name": "inria_dataset_osm_aligned.unet_resnet101_pretrained",
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
"backbone_params": {
|
9 |
+
"defaults_filepath": "configs/backbone_params.unet_resnet101.json", // Path from the project's root to a JSON with default values for backbone_params
|
10 |
+
"pretrained": true
|
11 |
+
},
|
12 |
+
|
13 |
+
"loss_params": {
|
14 |
+
"seg_loss_params": {
|
15 |
+
"use_size": false
|
16 |
+
}
|
17 |
+
},
|
18 |
+
|
19 |
+
"optim_params": {
|
20 |
+
"optimizer": "RMSProp",
|
21 |
+
"gamma": 0.99,
|
22 |
+
"batch_size": 3 // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
23 |
+
}
|
24 |
+
}
|
configs/config.inria_dataset_osm_mask_only.unet16.json
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.inria_dataset_osm_mask_only.json",
|
3 |
+
|
4 |
+
"run_name": "inria_dataset_osm_mask_only.unet16",
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
"backbone_params": {
|
11 |
+
"defaults_filepath": "configs/backbone_params.unet16.json" // Path from the project's root to a JSON with default values for backbone_params
|
12 |
+
},
|
13 |
+
|
14 |
+
"optim_params": {
|
15 |
+
"batch_size": 16 // Batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
16 |
+
}
|
17 |
+
}
|
configs/config.inria_dataset_polygonized.ictnet.leaderboard.json
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.inria_dataset_polygonized.json",
|
3 |
+
|
4 |
+
"run_name": "inria_dataset_polygonized.ictnet.leaderboard",
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
"seg_params": {
|
13 |
+
"compute_interior": true,
|
14 |
+
"compute_edge": false,
|
15 |
+
"compute_vertex": false
|
16 |
+
},
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
"backbone_params": {
|
23 |
+
"defaults_filepath": "configs/backbone_params.ictnet.json" // Path from the project's root to a JSON with default values for backbone_params
|
24 |
+
},
|
25 |
+
|
26 |
+
"loss_params": {
|
27 |
+
"seg_loss_params": {
|
28 |
+
"bce_coef": 1.0,
|
29 |
+
"dice_coef": 0.2,
|
30 |
+
"use_dist": true, // Dist weights as in the original U-Net paper
|
31 |
+
"use_size": false // Size weights increasing importance of smaller buildings
|
32 |
+
}
|
33 |
+
},
|
34 |
+
|
35 |
+
"optim_params": {
|
36 |
+
"batch_size": 2 // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
37 |
+
}
|
38 |
+
}
|
configs/config.inria_dataset_polygonized.ictnet.rmsprop.leaderboard.field_off.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.inria_dataset_polygonized.json",
|
3 |
+
|
4 |
+
"run_name": "inria_dataset_polygonized.ictnet.rmsprop.leaderboard.field_off",
|
5 |
+
|
6 |
+
"compute_crossfield": false,
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
"seg_params": {
|
13 |
+
"compute_interior": true,
|
14 |
+
"compute_edge": false,
|
15 |
+
"compute_vertex": false
|
16 |
+
},
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
"backbone_params": {
|
23 |
+
"defaults_filepath": "configs/backbone_params.ictnet.json" // Path from the project's root to a JSON with default values for backbone_params
|
24 |
+
},
|
25 |
+
|
26 |
+
"loss_params": {
|
27 |
+
"seg_loss_params": {
|
28 |
+
"bce_coef": 1.0,
|
29 |
+
"dice_coef": 0.2,
|
30 |
+
"use_dist": true, // Dist weights as in the original U-Net paper
|
31 |
+
"use_size": false // Size weights increasing importance of smaller buildings
|
32 |
+
}
|
33 |
+
},
|
34 |
+
|
35 |
+
"optim_params": {
|
36 |
+
"optimizer": "RMSProp",
|
37 |
+
"batch_size": 2 // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
38 |
+
}
|
39 |
+
}
|
configs/config.inria_dataset_polygonized.ictnet.rmsprop.leaderboard.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.inria_dataset_polygonized.json",
|
3 |
+
|
4 |
+
"run_name": "inria_dataset_polygonized.ictnet.rmsprop.leaderboard",
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
"seg_params": {
|
13 |
+
"compute_interior": true,
|
14 |
+
"compute_edge": false,
|
15 |
+
"compute_vertex": false
|
16 |
+
},
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
"backbone_params": {
|
23 |
+
"defaults_filepath": "configs/backbone_params.ictnet.json" // Path from the project's root to a JSON with default values for backbone_params
|
24 |
+
},
|
25 |
+
|
26 |
+
"loss_params": {
|
27 |
+
"seg_loss_params": {
|
28 |
+
"bce_coef": 1.0,
|
29 |
+
"dice_coef": 0.2,
|
30 |
+
"use_dist": true, // Dist weights as in the original U-Net paper
|
31 |
+
"use_size": false // Size weights increasing importance of smaller buildings
|
32 |
+
}
|
33 |
+
},
|
34 |
+
|
35 |
+
"optim_params": {
|
36 |
+
"optimizer": "RMSProp",
|
37 |
+
"batch_size": 2 // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
38 |
+
}
|
39 |
+
}
|
configs/config.inria_dataset_polygonized.unet_resnet101.leaderboard.json
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.inria_dataset_polygonized.json",
|
3 |
+
|
4 |
+
"run_name": "inria_dataset_polygonized.unet_resnet101.leaderboard",
|
5 |
+
|
6 |
+
"seg_params": {
|
7 |
+
"compute_interior": true,
|
8 |
+
"compute_edge": false,
|
9 |
+
"compute_vertex": false
|
10 |
+
},
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
"backbone_params": {
|
17 |
+
"defaults_filepath": "configs/backbone_params.unet_resnet101.json", // Path from the project's root to a JSON with default values for backbone_params
|
18 |
+
"pretrained": false
|
19 |
+
},
|
20 |
+
|
21 |
+
"loss_params": {
|
22 |
+
"seg_loss_params": {
|
23 |
+
"bce_coef": 1.0,
|
24 |
+
"dice_coef": 0.2,
|
25 |
+
"use_dist": true, // Dist weights as in the original U-Net paper
|
26 |
+
"use_size": false // Size weights increasing importance of smaller buildings
|
27 |
+
}
|
28 |
+
},
|
29 |
+
|
30 |
+
"optim_params": {
|
31 |
+
"batch_size": 4 // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
32 |
+
}
|
33 |
+
}
|
configs/config.inria_dataset_polygonized.unet_resnet101_pretrained.field_off.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.inria_dataset_polygonized.json",
|
3 |
+
|
4 |
+
"run_name": "inria_dataset_polygonized.unet_resnet101_pretrained.field_off",
|
5 |
+
|
6 |
+
"compute_crossfield": false,
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
"backbone_params": {
|
11 |
+
"defaults_filepath": "configs/backbone_params.unet_resnet101.json", // Path from the project's root to a JSON with default values for backbone_params
|
12 |
+
"pretrained": true
|
13 |
+
},
|
14 |
+
|
15 |
+
"optim_params": {
|
16 |
+
"batch_size": 10 // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
17 |
+
}
|
18 |
+
}
|
configs/config.inria_dataset_polygonized.unet_resnet101_pretrained.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.inria_dataset_polygonized.json",
|
3 |
+
|
4 |
+
"run_name": "inria_dataset_polygonized.unet_resnet101_pretrained",
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
"backbone_params": {
|
11 |
+
"defaults_filepath": "configs/backbone_params.unet_resnet101.json", // Path from the project's root to a JSON with default values for backbone_params
|
12 |
+
"pretrained": true
|
13 |
+
},
|
14 |
+
|
15 |
+
"optim_params": {
|
16 |
+
"batch_size": 10 // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
17 |
+
}
|
18 |
+
}
|
configs/config.inria_dataset_polygonized.unet_resnet101_pretrained.leaderboard.field_off.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.inria_dataset_polygonized.json",
|
3 |
+
|
4 |
+
"run_name": "inria_dataset_polygonized.unet_resnet101_pretrained.leaderboard.field_off",
|
5 |
+
|
6 |
+
"compute_crossfield": false,
|
7 |
+
|
8 |
+
"seg_params": {
|
9 |
+
"compute_interior": true,
|
10 |
+
"compute_edge": false,
|
11 |
+
"compute_vertex": false
|
12 |
+
},
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
"backbone_params": {
|
19 |
+
"defaults_filepath": "configs/backbone_params.unet_resnet101.json", // Path from the project's root to a JSON with default values for backbone_params
|
20 |
+
"pretrained": true
|
21 |
+
},
|
22 |
+
|
23 |
+
"loss_params": {
|
24 |
+
"seg_loss_params": {
|
25 |
+
"bce_coef": 1.0,
|
26 |
+
"dice_coef": 0.2,
|
27 |
+
"use_dist": true, // Dist weights as in the original U-Net paper
|
28 |
+
"use_size": false // Size weights increasing importance of smaller buildings
|
29 |
+
}
|
30 |
+
},
|
31 |
+
|
32 |
+
"optim_params": {
|
33 |
+
"batch_size": 4 // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
34 |
+
}
|
35 |
+
}
|
configs/config.inria_dataset_polygonized.unet_resnet101_pretrained.leaderboard.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.inria_dataset_polygonized.json",
|
3 |
+
|
4 |
+
"run_name": "inria_dataset_polygonized.unet_resnet101_pretrained.leaderboard",
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
"seg_params": {
|
9 |
+
"compute_interior": true,
|
10 |
+
"compute_edge": false,
|
11 |
+
"compute_vertex": false
|
12 |
+
},
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
"backbone_params": {
|
19 |
+
"defaults_filepath": "configs/backbone_params.unet_resnet101.json", // Path from the project's root to a JSON with default values for backbone_params
|
20 |
+
"pretrained": true
|
21 |
+
},
|
22 |
+
|
23 |
+
"loss_params": {
|
24 |
+
"seg_loss_params": {
|
25 |
+
"bce_coef": 1.0,
|
26 |
+
"dice_coef": 0.2,
|
27 |
+
"use_dist": true, // Dist weights as in the original U-Net paper
|
28 |
+
"use_size": false // Size weights increasing importance of smaller buildings
|
29 |
+
}
|
30 |
+
},
|
31 |
+
|
32 |
+
"optim_params": {
|
33 |
+
"batch_size": 4 // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
34 |
+
}
|
35 |
+
}
|
configs/config.inria_dataset_polygonized_small.unet_resnet101_pretrained.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.inria_dataset_polygonized.json",
|
3 |
+
|
4 |
+
"run_name": "inria_dataset_polygonized_small.unet_resnet101_pretrained",
|
5 |
+
|
6 |
+
"dataset_params": {
|
7 |
+
"small": true
|
8 |
+
},
|
9 |
+
|
10 |
+
"backbone_params": {
|
11 |
+
"defaults_filepath": "configs/backbone_params.unet_resnet101.json", // Path from the project's root to a JSON with default values for backbone_params
|
12 |
+
"pretrained": true
|
13 |
+
},
|
14 |
+
|
15 |
+
"optim_params": {
|
16 |
+
"batch_size": 10, // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
17 |
+
"gamma": 1.0
|
18 |
+
}
|
19 |
+
}
|
configs/config.inria_dataset_polygonized_small.unet_resnet101_pretrained.no_aug.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.inria_dataset_polygonized.json",
|
3 |
+
|
4 |
+
"run_name": "inria_dataset_polygonized_small.unet_resnet101_pretrained.no_aug",
|
5 |
+
|
6 |
+
"dataset_params": {
|
7 |
+
"small": true
|
8 |
+
},
|
9 |
+
|
10 |
+
"backbone_params": {
|
11 |
+
"defaults_filepath": "configs/backbone_params.unet_resnet101.json", // Path from the project's root to a JSON with default values for backbone_params
|
12 |
+
"pretrained": true
|
13 |
+
},
|
14 |
+
|
15 |
+
"optim_params": {
|
16 |
+
"gamma": 1.0,
|
17 |
+
"log_steps": 10
|
18 |
+
},
|
19 |
+
|
20 |
+
"data_aug_params": {
|
21 |
+
"enable": false
|
22 |
+
}
|
23 |
+
}
|
configs/config.inria_dataset_small.unet16.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.inria_dataset.json",
|
3 |
+
|
4 |
+
"run_name": "inria_dataset_small.unet16",
|
5 |
+
|
6 |
+
"dataset_params": {
|
7 |
+
"small": true
|
8 |
+
},
|
9 |
+
|
10 |
+
"backbone_params": {
|
11 |
+
"defaults_filepath": "configs/backbone_params.unet16.json" // Path from the project's root to a JSON with default values for backbone_params
|
12 |
+
|
13 |
+
},
|
14 |
+
|
15 |
+
"optim_params": {
|
16 |
+
"batch_size": 12, // Batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
17 |
+
"gamma": 0.99
|
18 |
+
}
|
19 |
+
}
|
configs/config.luxcarta_dataset.unet16.field_off.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.luxcarta_dataset.json",
|
3 |
+
|
4 |
+
"run_name": "luxcarta_dataset.unet16.field_off",
|
5 |
+
|
6 |
+
"compute_crossfield": false,
|
7 |
+
|
8 |
+
"backbone_params": {
|
9 |
+
"defaults_filepath": "configs/backbone_params.unet16.json" // Path from the project's root to a JSON with default values for backbone_params
|
10 |
+
},
|
11 |
+
|
12 |
+
"optim_params": {
|
13 |
+
"batch_size": 16 // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
14 |
+
}
|
15 |
+
}
|
configs/config.luxcarta_dataset.unet16.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.luxcarta_dataset.json",
|
3 |
+
|
4 |
+
"run_name": "luxcarta_dataset.unet16",
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
"backbone_params": {
|
9 |
+
"defaults_filepath": "configs/backbone_params.unet16.json" // Path from the project's root to a JSON with default values for backbone_params
|
10 |
+
},
|
11 |
+
|
12 |
+
"optim_params": {
|
13 |
+
"batch_size": 16 // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
14 |
+
}
|
15 |
+
}
|
configs/config.mapping_dataset.asip.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.mapping_dataset.json",
|
3 |
+
|
4 |
+
"run_name": "mapping_dataset.asip"
|
5 |
+
}
|
configs/config.mapping_dataset.deeplab101.field_off.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.mapping_dataset.json",
|
3 |
+
|
4 |
+
"run_name": "mapping_dataset.deeplab101.field_off",
|
5 |
+
|
6 |
+
"compute_crossfield": false,
|
7 |
+
|
8 |
+
"backbone_params": {
|
9 |
+
"defaults_filepath": "configs/backbone_params.deeplab101.json" // Path from the project's root to a JSON with default values for backbone_params
|
10 |
+
},
|
11 |
+
|
12 |
+
"optim_params": {
|
13 |
+
"batch_size": 8 // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
14 |
+
}
|
15 |
+
}
|
configs/config.mapping_dataset.deeplab101.field_off.train_val.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.mapping_dataset.json",
|
3 |
+
|
4 |
+
"run_name": "mapping_dataset.deeplab101.field_off.train_val",
|
5 |
+
"fold": ["train", "val"],
|
6 |
+
|
7 |
+
"compute_crossfield": false,
|
8 |
+
|
9 |
+
"backbone_params": {
|
10 |
+
"defaults_filepath": "configs/backbone_params.deeplab101.json" // Path from the project's root to a JSON with default values for backbone_params
|
11 |
+
},
|
12 |
+
|
13 |
+
"optim_params": {
|
14 |
+
"batch_size": 8 // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
15 |
+
}
|
16 |
+
}
|
configs/config.mapping_dataset.deeplab101.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.mapping_dataset.json",
|
3 |
+
|
4 |
+
"run_name": "mapping_dataset.deeplab101",
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
"backbone_params": {
|
10 |
+
"defaults_filepath": "configs/backbone_params.deeplab101.json" // Path from the project's root to a JSON with default values for backbone_params
|
11 |
+
},
|
12 |
+
|
13 |
+
"optim_params": {
|
14 |
+
"batch_size": 8 // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
15 |
+
}
|
16 |
+
}
|
configs/config.mapping_dataset.deeplab101.train_val.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.mapping_dataset.json",
|
3 |
+
|
4 |
+
"run_name": "mapping_dataset.deeplab101.train_val",
|
5 |
+
"fold": ["train", "val"],
|
6 |
+
|
7 |
+
|
8 |
+
"backbone_params": {
|
9 |
+
"defaults_filepath": "configs/backbone_params.deeplab101.json" // Path from the project's root to a JSON with default values for backbone_params
|
10 |
+
},
|
11 |
+
|
12 |
+
"optim_params": {
|
13 |
+
"batch_size": 8 // Overwrite default batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
14 |
+
}
|
15 |
+
}
|
configs/config.mapping_dataset.fcn101.json
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"run_name": "mapping_dataset.fcn50",
|
3 |
+
|
4 |
+
"data_dir_candidates": [
|
5 |
+
"/local/shared/data", // try cluster local node first
|
6 |
+
"/data/titane/user/nigirard/data", // Try cluster /data directory
|
7 |
+
"~/data", // In home directory (docker)
|
8 |
+
"/data" // In landsat's /data volume (docker)
|
9 |
+
],
|
10 |
+
"data_root_partial_dirpath": "mapping_challenge_dataset",
|
11 |
+
"dataset_params": {
|
12 |
+
"small": false
|
13 |
+
},
|
14 |
+
"num_workers": 10,
|
15 |
+
"data_split_params": {
|
16 |
+
"seed": 0, // Change this to change the random splitting of data in train/val/test
|
17 |
+
"train_fraction": 0.75,
|
18 |
+
"val_fraction": 0.25 // test_fraction is the rest
|
19 |
+
},
|
20 |
+
"data_aug_params": {
|
21 |
+
"enable": true,
|
22 |
+
"vflip": true,
|
23 |
+
"rotate": true,
|
24 |
+
"color_jitter": true,
|
25 |
+
"device": "cuda"
|
26 |
+
},
|
27 |
+
|
28 |
+
"device": "cuda", // Only has effects when mode is val or test. When mode is train, always use CUDA
|
29 |
+
"use_amp": true, // Automatic Mixed Precision switch
|
30 |
+
|
31 |
+
"backbone_params": {
|
32 |
+
"name": "fcn101",
|
33 |
+
"input_features": 3,
|
34 |
+
"features": 256,
|
35 |
+
"pretrained": false
|
36 |
+
},
|
37 |
+
|
38 |
+
"compute_seg": true,
|
39 |
+
"compute_crossfield": true,
|
40 |
+
|
41 |
+
"seg_params": {
|
42 |
+
"compute_interior": true,
|
43 |
+
"compute_edge": true,
|
44 |
+
"compute_vertex": false
|
45 |
+
},
|
46 |
+
|
47 |
+
"loss_params": {
|
48 |
+
"multiloss": {
|
49 |
+
"normalization_params": {
|
50 |
+
"min_samples": 10, // Per GPU
|
51 |
+
"max_samples": 1000 // Per GPU
|
52 |
+
},
|
53 |
+
"coefs": {
|
54 |
+
"seg_interior": 1,
|
55 |
+
"seg_edge": 1,
|
56 |
+
"seg_vertex": 0,
|
57 |
+
"crossfield_align": 1,
|
58 |
+
"crossfield_align90": 0.2,
|
59 |
+
"crossfield_smooth": 0.2,
|
60 |
+
"seg_interior_crossfield": 0.2,
|
61 |
+
"seg_edge_crossfield": 0.2,
|
62 |
+
"seg_edge_interior": 0.2
|
63 |
+
}
|
64 |
+
},
|
65 |
+
"seg_loss_params": { // https://github.com/neptune-ai/open-solution-mapping-challenge/blob/master/neptune.yaml
|
66 |
+
"bce_coef": 1.0,
|
67 |
+
"dice_coef": 0.2,
|
68 |
+
"w0": 50, // From original U-Net paper: distance weight to increase loss between objects
|
69 |
+
"sigma": 10 // From original U-Net paper: distance weight to increase loss between objects
|
70 |
+
}
|
71 |
+
},
|
72 |
+
|
73 |
+
"batch_size": 12, // Batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
74 |
+
"base_lr": 1e-4, // Will be multiplied by the effective_batch_size=world_size*batch_size.
|
75 |
+
"max_lr": 1e-2, // Maximum learning rate
|
76 |
+
"warmup_epochs": 1, // Number of epochs for warmup (learning rate starts at lr*warmup_factor and gradually increases to lr)
|
77 |
+
"warmup_factor": 1e-3,
|
78 |
+
"weight_decay": 0,
|
79 |
+
"dropout_keep_prob": 1.0, // Not used for now
|
80 |
+
"max_epoch": 25,
|
81 |
+
"log_steps": 50,
|
82 |
+
"checkpoint_epoch": 1,
|
83 |
+
"checkpoints_to_keep": 10, // outputs
|
84 |
+
"logs_dirname": "logs",
|
85 |
+
"save_input_output": false,
|
86 |
+
"log_input_output": false,
|
87 |
+
"checkpoints_dirname": "checkpoints",
|
88 |
+
"eval_dirname": "eval"
|
89 |
+
}
|
configs/config.mapping_dataset.fcn50.json
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"run_name": "mapping_dataset.fcn50",
|
3 |
+
|
4 |
+
"data_dir_candidates": [
|
5 |
+
"/local/shared/data", // try cluster local node first
|
6 |
+
"/data/titane/user/nigirard/data", // Try cluster /data directory
|
7 |
+
"~/data", // In home directory (docker)
|
8 |
+
"/data" // In landsat's /data volume (docker)
|
9 |
+
],
|
10 |
+
"data_root_partial_dirpath": "mapping_challenge_dataset",
|
11 |
+
"dataset_params": {
|
12 |
+
"small": false
|
13 |
+
},
|
14 |
+
"num_workers": 10,
|
15 |
+
"data_split_params": {
|
16 |
+
"seed": 0, // Change this to change the random splitting of data in train/val/test
|
17 |
+
"train_fraction": 0.75,
|
18 |
+
"val_fraction": 0.25 // test_fraction is the rest
|
19 |
+
},
|
20 |
+
"data_aug_params": {
|
21 |
+
"enable": true,
|
22 |
+
"vflip": true,
|
23 |
+
"rotate": true,
|
24 |
+
"color_jitter": true,
|
25 |
+
"device": "cuda"
|
26 |
+
},
|
27 |
+
|
28 |
+
"device": "cuda", // Only has effects when mode is val or test. When mode is train, always use CUDA
|
29 |
+
"use_amp": true, // Automatic Mixed Precision switch
|
30 |
+
|
31 |
+
"backbone_params": {
|
32 |
+
"name": "fcn50",
|
33 |
+
"input_features": 3,
|
34 |
+
"features": 256,
|
35 |
+
"pretrained": false
|
36 |
+
},
|
37 |
+
|
38 |
+
"compute_seg": true,
|
39 |
+
"compute_crossfield": true,
|
40 |
+
|
41 |
+
"seg_params": {
|
42 |
+
"compute_interior": true,
|
43 |
+
"compute_edge": true,
|
44 |
+
"compute_vertex": false
|
45 |
+
},
|
46 |
+
|
47 |
+
"loss_params": {
|
48 |
+
"multiloss": {
|
49 |
+
"normalization_params": {
|
50 |
+
"min_samples": 10, // Per GPU
|
51 |
+
"max_samples": 1000 // Per GPU
|
52 |
+
},
|
53 |
+
"coefs": {
|
54 |
+
"seg_interior": 1,
|
55 |
+
"seg_edge": 1,
|
56 |
+
"seg_vertex": 0,
|
57 |
+
"crossfield_align": 1,
|
58 |
+
"crossfield_align90": 0.2,
|
59 |
+
"crossfield_smooth": 0.2,
|
60 |
+
"seg_interior_crossfield": 0.2,
|
61 |
+
"seg_edge_crossfield": 0.2,
|
62 |
+
"seg_edge_interior": 0.2
|
63 |
+
}
|
64 |
+
},
|
65 |
+
"seg_loss_params": { // https://github.com/neptune-ai/open-solution-mapping-challenge/blob/master/neptune.yaml
|
66 |
+
"bce_coef": 1.0,
|
67 |
+
"dice_coef": 0.2,
|
68 |
+
"w0": 50, // From original U-Net paper: distance weight to increase loss between objects
|
69 |
+
"sigma": 10 // From original U-Net paper: distance weight to increase loss between objects
|
70 |
+
}
|
71 |
+
},
|
72 |
+
|
73 |
+
"batch_size": 16, // Batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
74 |
+
"base_lr": 1e-4, // Will be multiplied by the effective_batch_size=world_size*batch_size.
|
75 |
+
"max_lr": 1e-2, // Maximum learning rate
|
76 |
+
"warmup_epochs": 1, // Number of epochs for warmup (learning rate starts at lr*warmup_factor and gradually increases to lr)
|
77 |
+
"warmup_factor": 1e-3,
|
78 |
+
"weight_decay": 0,
|
79 |
+
"dropout_keep_prob": 1.0, // Not used for now
|
80 |
+
"max_epoch": 25,
|
81 |
+
"log_steps": 50,
|
82 |
+
"checkpoint_epoch": 1,
|
83 |
+
"checkpoints_to_keep": 10, // outputs
|
84 |
+
"logs_dirname": "logs",
|
85 |
+
"save_input_output": false,
|
86 |
+
"log_input_output": false,
|
87 |
+
"checkpoints_dirname": "checkpoints",
|
88 |
+
"eval_dirname": "eval"
|
89 |
+
}
|
configs/config.mapping_dataset.open_solution.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.mapping_dataset.json",
|
3 |
+
|
4 |
+
"run_name": "mapping_dataset.open_solution"
|
5 |
+
}
|
configs/config.mapping_dataset.open_solution_full.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.mapping_dataset.json",
|
3 |
+
|
4 |
+
"run_name": "mapping_dataset.open_solution_full"
|
5 |
+
}
|
configs/config.mapping_dataset.polymapper.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"defaults_filepath": "configs/config.defaults.mapping_dataset.json",
|
3 |
+
|
4 |
+
"run_name": "mapping_dataset.polymapper"
|
5 |
+
}
|
configs/config.mapping_dataset.unet16.coupling_losses_0.4.json
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"run_name": "mapping_dataset.unet16.coupling_losses_0.4",
|
3 |
+
|
4 |
+
"data_dir_candidates": [
|
5 |
+
"/local/shared/data", // try cluster local node first
|
6 |
+
"/data/titane/user/nigirard/data", // Try cluster /data directory
|
7 |
+
"~/data", // In home directory (docker)
|
8 |
+
"/data" // In landsat's /data volume (docker)
|
9 |
+
],
|
10 |
+
"data_root_partial_dirpath": "mapping_challenge_dataset",
|
11 |
+
"dataset_params": {
|
12 |
+
"small": false
|
13 |
+
},
|
14 |
+
"num_workers": 10,
|
15 |
+
"data_split_params": {
|
16 |
+
"seed": 0, // Change this to change the random splitting of data in train/val/test
|
17 |
+
"train_fraction": 0.75,
|
18 |
+
"val_fraction": 0.25 // test_fraction is the rest
|
19 |
+
},
|
20 |
+
"data_aug_params": {
|
21 |
+
"enable": true,
|
22 |
+
"vflip": true,
|
23 |
+
"rotate": true,
|
24 |
+
"color_jitter": true,
|
25 |
+
"device": "cuda"
|
26 |
+
},
|
27 |
+
|
28 |
+
"device": "cuda", // Only has effects when mode is val or test. When mode is train, always use CUDA
|
29 |
+
"use_amp": true, // Automatic Mixed Precision switch
|
30 |
+
|
31 |
+
"backbone_params": {
|
32 |
+
"name": "unet",
|
33 |
+
"input_features": 3,
|
34 |
+
"features": 16
|
35 |
+
},
|
36 |
+
|
37 |
+
"compute_seg": true,
|
38 |
+
"compute_crossfield": true,
|
39 |
+
|
40 |
+
"seg_params": {
|
41 |
+
"compute_interior": true,
|
42 |
+
"compute_edge": true,
|
43 |
+
"compute_vertex": false
|
44 |
+
},
|
45 |
+
|
46 |
+
"loss_params": {
|
47 |
+
"multiloss": {
|
48 |
+
"normalization_params": {
|
49 |
+
"min_samples": 10, // Per GPU
|
50 |
+
"max_samples": 1000 // Per GPU
|
51 |
+
},
|
52 |
+
"coefs": {
|
53 |
+
"seg_interior": 1,
|
54 |
+
"seg_edge": 1,
|
55 |
+
"seg_vertex": 0,
|
56 |
+
"crossfield_align": 1,
|
57 |
+
"crossfield_align90": 0.4,
|
58 |
+
"crossfield_smooth": 0.4,
|
59 |
+
"seg_interior_crossfield": 0.4,
|
60 |
+
"seg_edge_crossfield": 0.4,
|
61 |
+
"seg_edge_interior": 0.4
|
62 |
+
}
|
63 |
+
},
|
64 |
+
"seg_loss_params": { // https://github.com/neptune-ai/open-solution-mapping-challenge/blob/master/neptune.yaml
|
65 |
+
"bce_coef": 1.0,
|
66 |
+
"dice_coef": 0.2,
|
67 |
+
"w0": 50, // From original U-Net paper: distance weight to increase loss between objects
|
68 |
+
"sigma": 10 // From original U-Net paper: distance weight to increase loss between objects
|
69 |
+
}
|
70 |
+
},
|
71 |
+
|
72 |
+
"batch_size": 32, // Batch size per GPU. The effective batch size is effective_batch_size=world_size*batch_size
|
73 |
+
"base_lr": 1e-4, // Will be multiplied by the effective_batch_size=world_size*batch_size.
|
74 |
+
"max_lr": 1e-2, // Maximum learning rate
|
75 |
+
"warmup_epochs": 1, // Number of epochs for warmup (learning rate starts at lr*warmup_factor and gradually increases to lr)
|
76 |
+
"warmup_factor": 1e-3,
|
77 |
+
"weight_decay": 0,
|
78 |
+
"dropout_keep_prob": 1.0, // Not used for now
|
79 |
+
"max_epoch": 25,
|
80 |
+
"log_steps": 50,
|
81 |
+
"checkpoint_epoch": 1,
|
82 |
+
"checkpoints_to_keep": 10, // outputs
|
83 |
+
"logs_dirname": "logs",
|
84 |
+
"save_input_output": false,
|
85 |
+
"log_input_output": false,
|
86 |
+
"checkpoints_dirname": "checkpoints",
|
87 |
+
"eval_dirname": "eval"
|
88 |
+
}
|