VMamba / ade20k_upernet_vmamba_small_160k_640_iter160000_508.log
sunsmarterjieleaf's picture
Upload 2 files
826bef8 verified
raw
history blame
28.1 kB
2024/01/18 13:41:34 - mmengine - INFO -
------------------------------------------------------------
System environment:
sys.platform: linux
Python: 3.10.13 (main, Sep 11 2023, 13:44:35) [GCC 11.2.0]
CUDA available: True
numpy_random_seed: 1142582054
GPU 0,1,2,3,4,5,6: NVIDIA A100-SXM4-80GB
CUDA_HOME: /usr/local/cuda-11.7
NVCC: Cuda compilation tools, release 11.7, V11.7.64
GCC: gcc (Ubuntu 9.4.0-1ubuntu1~20.04.3) 9.4.0
PyTorch: 1.13.0
PyTorch compiling details: PyTorch built with:
- GCC 9.3
- C++ Version: 201402
- Intel(R) oneAPI Math Kernel Library Version 2023.1-Product Build 20230303 for Intel(R) 64 architecture applications
- Intel(R) MKL-DNN v2.6.0 (Git Hash 52b5f107dd9cf10910aaa19cb47f3abf9b349815)
- OpenMP 201511 (a.k.a. OpenMP 4.5)
- LAPACK is enabled (usually provided by MKL)
- NNPACK is enabled
- CPU capability usage: AVX2
- CUDA Runtime 11.7
- NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_37,code=compute_37
- CuDNN 8.5
- Magma 2.6.1
- Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.7, CUDNN_VERSION=8.5.0, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -fabi-version=11 -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wunused-local-typedefs -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.13.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF,
TorchVision: 0.14.0
OpenCV: 4.8.1
MMEngine: 0.10.1
Runtime environment:
cudnn_benchmark: True
mp_cfg: {'mp_start_method': 'fork', 'opencv_num_threads': 0}
dist_cfg: {'backend': 'nccl'}
seed: 1142582054
Distributed launcher: pytorch
Distributed training: True
GPU number: 4
------------------------------------------------------------
2024/01/18 13:41:36 - mmengine - INFO - Config:
backbone_norm_cfg = dict(requires_grad=True, type='LN')
checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_small_patch4_window7_224_20220317-7ba6d6dd.pth'
crop_size = (
640,
640,
)
data_preprocessor = dict(
bgr_to_rgb=True,
mean=[
123.675,
116.28,
103.53,
],
pad_val=0,
seg_pad_val=255,
size=(
640,
640,
),
std=[
58.395,
57.12,
57.375,
],
type='SegDataPreProcessor')
data_root = 'data/ade/ADEChallengeData2016'
dataset_type = 'ADE20KDataset'
default_hooks = dict(
checkpoint=dict(by_epoch=False, interval=16000, type='CheckpointHook'),
logger=dict(interval=50, log_metric_by_epoch=False, type='LoggerHook'),
param_scheduler=dict(type='ParamSchedulerHook'),
sampler_seed=dict(type='DistSamplerSeedHook'),
timer=dict(type='IterTimerHook'),
visualization=dict(type='SegVisualizationHook'))
default_scope = 'mmseg'
env_cfg = dict(
cudnn_benchmark=True,
dist_cfg=dict(backend='nccl'),
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0))
img_ratios = [
0.5,
0.75,
1.0,
1.25,
1.5,
1.75,
]
launcher = 'pytorch'
load_from = '/home/LiuYue/Workspace3/ckpts/segmentation/work_dirs/upernet_vssm_4xb4-160k_ade20k-640x640_small/iter_160000.pth'
log_level = 'INFO'
log_processor = dict(by_epoch=False)
model = dict(
module=dict(
auxiliary_head=dict(
align_corners=False,
channels=256,
concat_input=False,
dropout_ratio=0.1,
in_channels=384,
in_index=2,
loss_decode=dict(
loss_weight=0.4, type='CrossEntropyLoss', use_sigmoid=False),
norm_cfg=dict(requires_grad=True, type='SyncBN'),
num_classes=150,
num_convs=1,
type='FCNHead'),
backbone=dict(
act_cfg=dict(type='GELU'),
attn_drop_rate=0.0,
depths=(
2,
2,
27,
2,
),
dims=96,
drop_path_rate=0.3,
drop_rate=0.0,
embed_dims=96,
init_cfg=dict(
checkpoint=
'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_small_patch4_window7_224_20220317-7ba6d6dd.pth',
type='Pretrained'),
mlp_ratio=4,
norm_cfg=dict(requires_grad=True, type='LN'),
num_heads=[
3,
6,
12,
24,
],
out_indices=(
0,
1,
2,
3,
),
patch_norm=True,
patch_size=4,
pretrain_img_size=224,
pretrained='../../ckpts/vssmsmall/ckpt_epoch_238.pth',
qk_scale=None,
qkv_bias=True,
strides=(
4,
2,
2,
2,
),
type='MMSEG_VSSM',
use_abs_pos_embed=False,
window_size=7),
data_preprocessor=dict(
bgr_to_rgb=True,
mean=[
123.675,
116.28,
103.53,
],
pad_val=0,
seg_pad_val=255,
size=(
640,
640,
),
std=[
58.395,
57.12,
57.375,
],
type='SegDataPreProcessor'),
decode_head=dict(
align_corners=False,
channels=512,
dropout_ratio=0.1,
in_channels=[
96,
192,
384,
768,
],
in_index=[
0,
1,
2,
3,
],
loss_decode=dict(
loss_weight=1.0, type='CrossEntropyLoss', use_sigmoid=False),
norm_cfg=dict(requires_grad=True, type='SyncBN'),
num_classes=150,
pool_scales=(
1,
2,
3,
6,
),
type='UPerHead'),
pretrained=None,
test_cfg=dict(mode='whole'),
train_cfg=dict(),
type='EncoderDecoder'),
type='SegTTAModel')
norm_cfg = dict(requires_grad=True, type='SyncBN')
optim_wrapper = dict(
optimizer=dict(
betas=(
0.9,
0.999,
), lr=6e-05, type='AdamW', weight_decay=0.01),
paramwise_cfg=dict(
custom_keys=dict(
absolute_pos_embed=dict(decay_mult=0.0),
norm=dict(decay_mult=0.0),
relative_position_bias_table=dict(decay_mult=0.0))),
type='OptimWrapper')
optimizer = dict(lr=0.01, momentum=0.9, type='SGD', weight_decay=0.0005)
param_scheduler = [
dict(
begin=0, by_epoch=False, end=1500, start_factor=1e-06,
type='LinearLR'),
dict(
begin=1500,
by_epoch=False,
end=160000,
eta_min=0.0,
power=1.0,
type='PolyLR'),
]
resume = False
test_cfg = dict(type='TestLoop')
test_dataloader = dict(
batch_size=1,
dataset=dict(
data_prefix=dict(
img_path='images/validation',
seg_map_path='annotations/validation'),
data_root='data/ade/ADEChallengeData2016',
pipeline=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(
transforms=[
[
dict(keep_ratio=True, scale_factor=0.5, type='Resize'),
dict(
keep_ratio=True, scale_factor=0.75, type='Resize'),
dict(keep_ratio=True, scale_factor=1.0, type='Resize'),
dict(
keep_ratio=True, scale_factor=1.25, type='Resize'),
dict(keep_ratio=True, scale_factor=1.5, type='Resize'),
dict(
keep_ratio=True, scale_factor=1.75, type='Resize'),
],
[
dict(
direction='horizontal',
prob=0.0,
type='RandomFlip'),
dict(
direction='horizontal',
prob=1.0,
type='RandomFlip'),
],
[
dict(type='LoadAnnotations'),
],
[
dict(type='PackSegInputs'),
],
],
type='TestTimeAug'),
],
type='ADE20KDataset'),
num_workers=4,
persistent_workers=True,
sampler=dict(shuffle=False, type='DefaultSampler'))
test_evaluator = dict(
iou_metrics=[
'mIoU',
], type='IoUMetric')
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(keep_ratio=True, scale=(
2560,
640,
), type='Resize'),
dict(reduce_zero_label=True, type='LoadAnnotations'),
dict(type='PackSegInputs'),
]
train_cfg = dict(
max_iters=160000, type='IterBasedTrainLoop', val_interval=16000)
train_dataloader = dict(
batch_size=2,
dataset=dict(
data_prefix=dict(
img_path='images/training', seg_map_path='annotations/training'),
data_root='data/ade/ADEChallengeData2016',
pipeline=[
dict(type='LoadImageFromFile'),
dict(reduce_zero_label=True, type='LoadAnnotations'),
dict(
keep_ratio=True,
ratio_range=(
0.5,
2.0,
),
scale=(
2560,
640,
),
type='RandomResize'),
dict(
cat_max_ratio=0.75, crop_size=(
640,
640,
), type='RandomCrop'),
dict(prob=0.5, type='RandomFlip'),
dict(type='PhotoMetricDistortion'),
dict(type='PackSegInputs'),
],
type='ADE20KDataset'),
num_workers=4,
persistent_workers=True,
sampler=dict(shuffle=True, type='InfiniteSampler'))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(reduce_zero_label=True, type='LoadAnnotations'),
dict(
keep_ratio=True,
ratio_range=(
0.5,
2.0,
),
scale=(
2560,
640,
),
type='RandomResize'),
dict(cat_max_ratio=0.75, crop_size=(
640,
640,
), type='RandomCrop'),
dict(prob=0.5, type='RandomFlip'),
dict(type='PhotoMetricDistortion'),
dict(type='PackSegInputs'),
]
tta_model = dict(
module=dict(
auxiliary_head=dict(
align_corners=False,
channels=256,
concat_input=False,
dropout_ratio=0.1,
in_channels=384,
in_index=2,
loss_decode=dict(
loss_weight=0.4, type='CrossEntropyLoss', use_sigmoid=False),
norm_cfg=dict(requires_grad=True, type='SyncBN'),
num_classes=150,
num_convs=1,
type='FCNHead'),
backbone=dict(
act_cfg=dict(type='GELU'),
attn_drop_rate=0.0,
depths=(
2,
2,
27,
2,
),
dims=96,
drop_path_rate=0.3,
drop_rate=0.0,
embed_dims=96,
init_cfg=dict(
checkpoint=
'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_small_patch4_window7_224_20220317-7ba6d6dd.pth',
type='Pretrained'),
mlp_ratio=4,
norm_cfg=dict(requires_grad=True, type='LN'),
num_heads=[
3,
6,
12,
24,
],
out_indices=(
0,
1,
2,
3,
),
patch_norm=True,
patch_size=4,
pretrain_img_size=224,
pretrained='../../ckpts/vssmsmall/ckpt_epoch_238.pth',
qk_scale=None,
qkv_bias=True,
strides=(
4,
2,
2,
2,
),
type='MMSEG_VSSM',
use_abs_pos_embed=False,
window_size=7),
data_preprocessor=dict(
bgr_to_rgb=True,
mean=[
123.675,
116.28,
103.53,
],
pad_val=0,
seg_pad_val=255,
size=(
640,
640,
),
std=[
58.395,
57.12,
57.375,
],
type='SegDataPreProcessor'),
decode_head=dict(
align_corners=False,
channels=512,
dropout_ratio=0.1,
in_channels=[
96,
192,
384,
768,
],
in_index=[
0,
1,
2,
3,
],
loss_decode=dict(
loss_weight=1.0, type='CrossEntropyLoss', use_sigmoid=False),
norm_cfg=dict(requires_grad=True, type='SyncBN'),
num_classes=150,
pool_scales=(
1,
2,
3,
6,
),
type='UPerHead'),
pretrained=None,
test_cfg=dict(mode='whole'),
train_cfg=dict(),
type='EncoderDecoder'),
type='SegTTAModel')
tta_pipeline = [
dict(backend_args=None, type='LoadImageFromFile'),
dict(
transforms=[
[
dict(keep_ratio=True, scale_factor=0.5, type='Resize'),
dict(keep_ratio=True, scale_factor=0.75, type='Resize'),
dict(keep_ratio=True, scale_factor=1.0, type='Resize'),
dict(keep_ratio=True, scale_factor=1.25, type='Resize'),
dict(keep_ratio=True, scale_factor=1.5, type='Resize'),
dict(keep_ratio=True, scale_factor=1.75, type='Resize'),
],
[
dict(direction='horizontal', prob=0.0, type='RandomFlip'),
dict(direction='horizontal', prob=1.0, type='RandomFlip'),
],
[
dict(type='LoadAnnotations'),
],
[
dict(type='PackSegInputs'),
],
],
type='TestTimeAug'),
]
val_cfg = dict(type='ValLoop')
val_dataloader = dict(
batch_size=1,
dataset=dict(
data_prefix=dict(
img_path='images/validation',
seg_map_path='annotations/validation'),
data_root='data/ade/ADEChallengeData2016',
pipeline=[
dict(type='LoadImageFromFile'),
dict(keep_ratio=True, scale=(
2560,
640,
), type='Resize'),
dict(reduce_zero_label=True, type='LoadAnnotations'),
dict(type='PackSegInputs'),
],
type='ADE20KDataset'),
num_workers=4,
persistent_workers=True,
sampler=dict(shuffle=False, type='DefaultSampler'))
val_evaluator = dict(
iou_metrics=[
'mIoU',
], type='IoUMetric')
vis_backends = [
dict(type='LocalVisBackend'),
]
visualizer = dict(
name='visualizer',
type='SegLocalVisualizer',
vis_backends=[
dict(type='LocalVisBackend'),
])
work_dir = './work_dirs/upernet_vssm_4xb4-160k_ade20k-640x640_small'
2024/01/18 13:41:39 - mmengine - INFO - Hooks will be executed in the following order:
before_run:
(VERY_HIGH ) RuntimeInfoHook
(BELOW_NORMAL) LoggerHook
--------------------
before_train:
(VERY_HIGH ) RuntimeInfoHook
(NORMAL ) IterTimerHook
(VERY_LOW ) CheckpointHook
--------------------
before_train_epoch:
(VERY_HIGH ) RuntimeInfoHook
(NORMAL ) IterTimerHook
(NORMAL ) DistSamplerSeedHook
--------------------
before_train_iter:
(VERY_HIGH ) RuntimeInfoHook
(NORMAL ) IterTimerHook
--------------------
after_train_iter:
(VERY_HIGH ) RuntimeInfoHook
(NORMAL ) IterTimerHook
(NORMAL ) SegVisualizationHook
(BELOW_NORMAL) LoggerHook
(LOW ) ParamSchedulerHook
(VERY_LOW ) CheckpointHook
--------------------
after_train_epoch:
(NORMAL ) IterTimerHook
(LOW ) ParamSchedulerHook
(VERY_LOW ) CheckpointHook
--------------------
before_val:
(VERY_HIGH ) RuntimeInfoHook
--------------------
before_val_epoch:
(NORMAL ) IterTimerHook
--------------------
before_val_iter:
(NORMAL ) IterTimerHook
--------------------
after_val_iter:
(NORMAL ) IterTimerHook
(NORMAL ) SegVisualizationHook
(BELOW_NORMAL) LoggerHook
--------------------
after_val_epoch:
(VERY_HIGH ) RuntimeInfoHook
(NORMAL ) IterTimerHook
(BELOW_NORMAL) LoggerHook
(LOW ) ParamSchedulerHook
(VERY_LOW ) CheckpointHook
--------------------
after_val:
(VERY_HIGH ) RuntimeInfoHook
--------------------
after_train:
(VERY_HIGH ) RuntimeInfoHook
(VERY_LOW ) CheckpointHook
--------------------
before_test:
(VERY_HIGH ) RuntimeInfoHook
--------------------
before_test_epoch:
(NORMAL ) IterTimerHook
--------------------
before_test_iter:
(NORMAL ) IterTimerHook
--------------------
after_test_iter:
(NORMAL ) IterTimerHook
(NORMAL ) SegVisualizationHook
(BELOW_NORMAL) LoggerHook
--------------------
after_test_epoch:
(VERY_HIGH ) RuntimeInfoHook
(NORMAL ) IterTimerHook
(BELOW_NORMAL) LoggerHook
--------------------
after_test:
(VERY_HIGH ) RuntimeInfoHook
--------------------
after_run:
(BELOW_NORMAL) LoggerHook
--------------------
2024/01/18 13:41:41 - mmengine - WARNING - The prefix is not set in metric class IoUMetric.
2024/01/18 13:41:42 - mmengine - INFO - Load checkpoint from /home/LiuYue/Workspace3/ckpts/segmentation/work_dirs/upernet_vssm_4xb4-160k_ade20k-640x640_small/iter_160000.pth
2024/01/18 13:53:00 - mmengine - INFO - Iter(test) [ 50/500] eta: 1:41:38 time: 9.2342 data_time: 0.0153 memory: 53982
2024/01/18 14:00:40 - mmengine - INFO - Iter(test) [100/500] eta: 1:15:51 time: 3.6223 data_time: 0.0136 memory: 52867
2024/01/18 14:04:27 - mmengine - INFO - Iter(test) [150/500] eta: 0:53:03 time: 1.3106 data_time: 0.0160 memory: 52745
2024/01/18 14:11:51 - mmengine - INFO - Iter(test) [200/500] eta: 0:45:12 time: 3.2742 data_time: 0.0150 memory: 52971
2024/01/18 14:15:23 - mmengine - INFO - Iter(test) [250/500] eta: 0:33:40 time: 4.4249 data_time: 0.0168 memory: 53191
2024/01/18 14:20:45 - mmengine - INFO - Iter(test) [300/500] eta: 0:26:01 time: 6.0236 data_time: 0.0202 memory: 56580
2024/01/18 14:24:59 - mmengine - INFO - Iter(test) [350/500] eta: 0:18:32 time: 7.2593 data_time: 0.0146 memory: 52298
2024/01/18 14:28:39 - mmengine - INFO - Iter(test) [400/500] eta: 0:11:44 time: 2.0090 data_time: 0.0136 memory: 53112
2024/01/18 14:32:55 - mmengine - INFO - Iter(test) [450/500] eta: 0:05:41 time: 0.9588 data_time: 0.0158 memory: 52817
2024/01/18 14:36:26 - mmengine - INFO - Iter(test) [500/500] eta: 0:00:00 time: 7.8064 data_time: 0.0142 memory: 52995
2024/01/18 14:38:02 - mmengine - INFO - per class results:
2024/01/18 14:38:02 - mmengine - INFO -
+---------------------+-------+-------+
| Class | IoU | Acc |
+---------------------+-------+-------+
| wall | 78.75 | 89.36 |
| building | 83.12 | 92.71 |
| sky | 94.5 | 97.63 |
| floor | 81.76 | 90.23 |
| tree | 74.85 | 88.04 |
| ceiling | 85.58 | 92.92 |
| road | 85.53 | 91.16 |
| bed | 89.56 | 95.86 |
| windowpane | 64.66 | 81.12 |
| grass | 65.41 | 80.54 |
| cabinet | 61.71 | 73.16 |
| sidewalk | 69.77 | 82.53 |
| person | 80.78 | 92.72 |
| earth | 39.83 | 53.66 |
| door | 53.67 | 67.04 |
| table | 61.54 | 79.57 |
| mountain | 57.79 | 75.02 |
| plant | 52.7 | 63.35 |
| curtain | 74.79 | 86.97 |
| chair | 59.42 | 72.69 |
| car | 84.32 | 92.36 |
| water | 55.89 | 69.4 |
| painting | 74.79 | 87.5 |
| sofa | 68.36 | 84.71 |
| shelf | 44.36 | 63.6 |
| house | 46.15 | 61.18 |
| sea | 57.85 | 81.06 |
| mirror | 69.21 | 77.51 |
| rug | 61.87 | 73.64 |
| field | 29.81 | 47.44 |
| armchair | 46.69 | 64.08 |
| seat | 62.14 | 82.15 |
| fence | 47.03 | 64.8 |
| desk | 53.19 | 70.23 |
| rock | 46.6 | 70.86 |
| wardrobe | 46.65 | 66.04 |
| lamp | 66.87 | 78.03 |
| bathtub | 83.11 | 86.64 |
| railing | 35.37 | 49.1 |
| cushion | 60.08 | 72.91 |
| base | 28.85 | 42.24 |
| box | 26.91 | 33.36 |
| column | 46.47 | 58.22 |
| signboard | 38.24 | 51.08 |
| chest of drawers | 45.6 | 66.14 |
| counter | 25.59 | 34.04 |
| sand | 45.36 | 64.69 |
| sink | 73.4 | 81.15 |
| skyscraper | 49.52 | 60.23 |
| fireplace | 80.08 | 90.52 |
| refrigerator | 76.78 | 81.87 |
| grandstand | 46.64 | 79.47 |
| path | 25.75 | 36.79 |
| stairs | 34.91 | 44.92 |
| runway | 70.95 | 92.5 |
| case | 61.74 | 76.13 |
| pool table | 91.83 | 96.65 |
| pillow | 60.23 | 71.02 |
| screen door | 70.03 | 75.59 |
| stairway | 34.92 | 41.71 |
| river | 9.03 | 17.44 |
| bridge | 67.13 | 78.16 |
| bookcase | 44.09 | 68.9 |
| blind | 46.02 | 50.39 |
| coffee table | 59.14 | 82.97 |
| toilet | 85.59 | 90.78 |
| flower | 37.12 | 51.46 |
| book | 46.03 | 62.65 |
| hill | 12.8 | 20.47 |
| bench | 40.19 | 46.67 |
| countertop | 56.79 | 74.35 |
| stove | 78.19 | 85.06 |
| palm | 51.92 | 70.76 |
| kitchen island | 49.25 | 77.56 |
| computer | 76.69 | 89.25 |
| swivel chair | 46.97 | 64.54 |
| boat | 39.55 | 56.75 |
| bar | 40.71 | 53.86 |
| arcade machine | 85.72 | 94.08 |
| hovel | 33.09 | 39.0 |
| bus | 93.28 | 97.04 |
| towel | 66.95 | 78.09 |
| light | 57.36 | 64.37 |
| truck | 43.92 | 56.1 |
| tower | 17.34 | 27.06 |
| chandelier | 70.27 | 85.27 |
| awning | 25.15 | 30.83 |
| streetlight | 27.76 | 33.84 |
| booth | 34.47 | 38.09 |
| television receiver | 70.57 | 77.57 |
| airplane | 60.13 | 67.32 |
| dirt track | 1.29 | 2.65 |
| apparel | 30.46 | 48.93 |
| pole | 22.16 | 29.32 |
| land | 2.43 | 3.38 |
| bannister | 12.98 | 17.41 |
| escalator | 35.52 | 51.31 |
| ottoman | 49.75 | 64.2 |
| bottle | 36.52 | 57.03 |
| buffet | 45.18 | 59.91 |
| poster | 26.96 | 30.14 |
| stage | 15.07 | 19.98 |
| van | 40.79 | 58.46 |
| ship | 58.61 | 93.2 |
| fountain | 37.13 | 37.62 |
| conveyer belt | 73.14 | 91.11 |
| canopy | 16.41 | 21.48 |
| washer | 70.64 | 72.56 |
| plaything | 26.95 | 40.15 |
| swimming pool | 46.85 | 49.67 |
| stool | 43.95 | 55.94 |
| barrel | 43.46 | 68.28 |
| basket | 28.25 | 40.89 |
| waterfall | 52.45 | 64.56 |
| tent | 88.84 | 98.38 |
| bag | 16.38 | 20.77 |
| minibike | 74.94 | 87.17 |
| cradle | 76.09 | 97.44 |
| oven | 56.13 | 67.6 |
| ball | 48.07 | 61.55 |
| food | 47.43 | 54.79 |
| step | 11.71 | 13.34 |
| tank | 49.12 | 52.75 |
| trade name | 25.88 | 29.71 |
| microwave | 85.51 | 93.72 |
| pot | 45.76 | 52.34 |
| animal | 55.15 | 57.0 |
| bicycle | 57.35 | 80.39 |
| lake | 47.5 | 63.73 |
| dishwasher | 70.78 | 80.28 |
| screen | 66.71 | 81.93 |
| blanket | 11.9 | 13.84 |
| sculpture | 64.88 | 77.88 |
| hood | 58.27 | 69.63 |
| sconce | 49.96 | 61.09 |
| vase | 44.7 | 55.6 |
| traffic light | 37.16 | 53.95 |
| tray | 7.6 | 10.69 |
| ashcan | 42.42 | 56.09 |
| fan | 62.16 | 76.81 |
| pier | 47.99 | 56.02 |
| crt screen | 6.95 | 19.0 |
| plate | 53.63 | 67.7 |
| monitor | 4.75 | 5.08 |
| bulletin board | 54.16 | 62.52 |
| shower | 0.0 | 0.0 |
| radiator | 62.46 | 71.64 |
| glass | 13.45 | 14.01 |
| clock | 40.9 | 46.31 |
| flag | 50.72 | 53.6 |
+---------------------+-------+-------+
2024/01/18 14:38:02 - mmengine - INFO - Iter(test) [500/500] aAcc: 83.8800 mIoU: 50.7800 mAcc: 62.2700 data_time: 0.0226 time: 6.5675