deep-tempest/KAIR/denoising/drunet/train.log

5534 lines
216 KiB
Plaintext
Raw Normal View History

2023-03-26 08:21:52 -07:00
23-03-24 18:59:44.942 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../imgs
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set12
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-24 18:59:45.373 : Number of train images: 7,740, iters: 121
23-03-24 19:00:47.180 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../imgs
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-24 19:00:47.412 : Number of train images: 7,740, iters: 121
23-03-24 19:18:43.927 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../imgs
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-24 19:18:44.225 : Number of train images: 7,740, iters: 121
23-03-24 19:30:32.473 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../imgs
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-24 19:30:32.703 : Number of train images: 7,740, iters: 121
23-03-24 19:32:09.714 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../imgs
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-24 19:32:09.949 : Number of train images: 7,740, iters: 121
23-03-24 22:06:29.069 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../imgs
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-24 22:06:33.445 : Number of train images: 7,740, iters: 121
23-03-25 13:02:54.030 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../imgs
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-25 13:02:54.257 : Number of train images: 7,740, iters: 121
23-03-25 13:07:50.847 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../imgs
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-25 13:07:51.072 : Number of train images: 7,740, iters: 121
23-03-25 13:43:56.615 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../imgs
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-25 13:43:56.894 : Number of train images: 7,740, iters: 121
23-03-25 13:43:57.621 :
Networks name: UNetRes
Params number: 32638656
Net structure:
UNetRes(
(m_head): Conv2d(2, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(m_down1): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(64, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down2): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(128, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down3): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(256, 512, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_body): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up3): Sequential(
(0): ConvTranspose2d(512, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up2): Sequential(
(0): ConvTranspose2d(256, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up1): Sequential(
(0): ConvTranspose2d(128, 64, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_tail): Conv2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
23-03-25 13:43:57.641 :
| mean | min | max | std || shape
| 0.001 | -0.086 | 0.072 | 0.025 | torch.Size([64, 2, 3, 3]) || m_head.weight
| 0.000 | -0.031 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.0.weight
| 0.000 | -0.033 | 0.037 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.2.weight
| -0.000 | -0.040 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.0.weight
| 0.000 | -0.036 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.2.weight
| 0.000 | -0.032 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.0.weight
| 0.000 | -0.035 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.2.weight
| -0.000 | -0.035 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.0.weight
| -0.000 | -0.033 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.2.weight
| 0.000 | -0.051 | 0.047 | 0.013 | torch.Size([128, 64, 2, 2]) || m_down1.4.weight
| -0.000 | -0.027 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.0.weight
| -0.000 | -0.026 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.2.weight
| 0.000 | -0.031 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.0.weight
| 0.000 | -0.026 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.2.weight
| -0.000 | -0.026 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.0.weight
| 0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.2.weight
| 0.000 | -0.027 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.0.weight
| -0.000 | -0.026 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.2.weight
| 0.000 | -0.043 | 0.041 | 0.009 | torch.Size([256, 128, 2, 2]) || m_down2.4.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.0.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.2.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.0.weight
| 0.000 | -0.020 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.2.weight
| 0.000 | -0.020 | 0.022 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.0.weight
| 0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.2.weight
| -0.000 | -0.022 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.0.weight
| 0.000 | -0.019 | 0.022 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.2.weight
| 0.000 | -0.031 | 0.029 | 0.006 | torch.Size([512, 256, 2, 2]) || m_down3.4.weight
| -0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.0.weight
| 0.000 | -0.015 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.2.weight
| 0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.0.weight
| 0.000 | -0.016 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.2.weight
| -0.000 | -0.015 | 0.016 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.0.weight
| -0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.2.weight
| 0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.0.weight
| 0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.2.weight
| 0.000 | -0.029 | 0.028 | 0.006 | torch.Size([512, 256, 2, 2]) || m_up3.0.weight
| -0.000 | -0.018 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.0.weight
| -0.000 | -0.021 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.2.weight
| -0.000 | -0.022 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.0.weight
| -0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.2.weight
| 0.000 | -0.021 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.0.weight
| -0.000 | -0.020 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.2.weight
| 0.000 | -0.022 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.0.weight
| 0.000 | -0.021 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.2.weight
| -0.000 | -0.038 | 0.042 | 0.009 | torch.Size([256, 128, 2, 2]) || m_up2.0.weight
| 0.000 | -0.026 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.0.weight
| 0.000 | -0.028 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.2.weight
| 0.000 | -0.024 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.0.weight
| 0.000 | -0.026 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.2.weight
| 0.000 | -0.027 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.0.weight
| 0.000 | -0.029 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.2.weight
| 0.000 | -0.027 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.0.weight
| -0.000 | -0.027 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.2.weight
| -0.000 | -0.047 | 0.053 | 0.012 | torch.Size([128, 64, 2, 2]) || m_up1.0.weight
| -0.000 | -0.035 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.0.weight
| -0.000 | -0.036 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.2.weight
| 0.000 | -0.033 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.0.weight
| -0.000 | -0.032 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.2.weight
| -0.000 | -0.041 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.0.weight
| -0.000 | -0.038 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.2.weight
| 0.000 | -0.034 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.0.weight
| 0.000 | -0.032 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.2.weight
| 0.001 | -0.024 | 0.027 | 0.008 | torch.Size([1, 64, 3, 3]) || m_tail.weight
23-03-25 14:24:58.573 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../imgs
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-25 14:24:58.810 : Number of train images: 7,740, iters: 121
23-03-25 14:24:59.525 :
Networks name: UNetRes
Params number: 32638656
Net structure:
UNetRes(
(m_head): Conv2d(2, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(m_down1): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(64, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down2): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(128, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down3): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(256, 512, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_body): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up3): Sequential(
(0): ConvTranspose2d(512, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up2): Sequential(
(0): ConvTranspose2d(256, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up1): Sequential(
(0): ConvTranspose2d(128, 64, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_tail): Conv2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
23-03-25 14:24:59.545 :
| mean | min | max | std || shape
| -0.000 | -0.083 | 0.100 | 0.025 | torch.Size([64, 2, 3, 3]) || m_head.weight
| -0.000 | -0.032 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.0.weight
| 0.000 | -0.030 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.2.weight
| 0.000 | -0.031 | 0.037 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.0.weight
| 0.000 | -0.031 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.2.weight
| -0.000 | -0.034 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.0.weight
| -0.000 | -0.036 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.2.weight
| 0.000 | -0.040 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.0.weight
| 0.000 | -0.033 | 0.037 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.2.weight
| -0.000 | -0.050 | 0.049 | 0.012 | torch.Size([128, 64, 2, 2]) || m_down1.4.weight
| 0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.0.weight
| 0.000 | -0.030 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.2.weight
| -0.000 | -0.027 | 0.023 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.0.weight
| -0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.2.weight
| -0.000 | -0.026 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.0.weight
| 0.000 | -0.028 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.2.weight
| -0.000 | -0.028 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.0.weight
| -0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.2.weight
| -0.000 | -0.039 | 0.039 | 0.009 | torch.Size([256, 128, 2, 2]) || m_down2.4.weight
| 0.000 | -0.019 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.0.weight
| -0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.2.weight
| -0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.0.weight
| -0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.2.weight
| -0.000 | -0.018 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.0.weight
| 0.000 | -0.023 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.2.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.0.weight
| -0.000 | -0.019 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.2.weight
| 0.000 | -0.030 | 0.028 | 0.006 | torch.Size([512, 256, 2, 2]) || m_down3.4.weight
| 0.000 | -0.014 | 0.017 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.0.weight
| -0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.2.weight
| -0.000 | -0.014 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.0.weight
| -0.000 | -0.015 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.2.weight
| -0.000 | -0.016 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.0.weight
| -0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.2.weight
| 0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.0.weight
| -0.000 | -0.015 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.2.weight
| 0.000 | -0.027 | 0.029 | 0.006 | torch.Size([512, 256, 2, 2]) || m_up3.0.weight
| 0.000 | -0.020 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.0.weight
| -0.000 | -0.020 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.2.weight
| 0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.0.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.2.weight
| 0.000 | -0.023 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.0.weight
| -0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.2.weight
| 0.000 | -0.023 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.0.weight
| 0.000 | -0.019 | 0.022 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.2.weight
| -0.000 | -0.035 | 0.036 | 0.009 | torch.Size([256, 128, 2, 2]) || m_up2.0.weight
| 0.000 | -0.027 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.0.weight
| -0.000 | -0.027 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.2.weight
| 0.000 | -0.026 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.0.weight
| -0.000 | -0.026 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.2.weight
| -0.000 | -0.026 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.0.weight
| -0.000 | -0.028 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.2.weight
| -0.000 | -0.025 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.0.weight
| -0.000 | -0.026 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.2.weight
| 0.000 | -0.051 | 0.051 | 0.013 | torch.Size([128, 64, 2, 2]) || m_up1.0.weight
| 0.000 | -0.032 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.0.weight
| 0.000 | -0.035 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.2.weight
| -0.000 | -0.030 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.0.weight
| -0.000 | -0.032 | 0.031 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.2.weight
| -0.000 | -0.034 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.0.weight
| 0.000 | -0.031 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.2.weight
| -0.000 | -0.031 | 0.039 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.0.weight
| 0.000 | -0.034 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.2.weight
| 0.000 | -0.026 | 0.025 | 0.008 | torch.Size([1, 64, 3, 3]) || m_tail.weight
23-03-25 14:26:36.736 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../imgs
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-25 14:26:36.972 : Number of train images: 7,740, iters: 121
23-03-25 14:26:37.699 :
Networks name: UNetRes
Params number: 32638656
Net structure:
UNetRes(
(m_head): Conv2d(2, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(m_down1): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(64, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down2): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(128, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down3): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(256, 512, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_body): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up3): Sequential(
(0): ConvTranspose2d(512, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up2): Sequential(
(0): ConvTranspose2d(256, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up1): Sequential(
(0): ConvTranspose2d(128, 64, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_tail): Conv2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
23-03-25 14:26:37.719 :
| mean | min | max | std || shape
| -0.001 | -0.074 | 0.071 | 0.025 | torch.Size([64, 2, 3, 3]) || m_head.weight
| -0.000 | -0.033 | 0.031 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.0.weight
| -0.000 | -0.034 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.2.weight
| 0.000 | -0.035 | 0.037 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.0.weight
| 0.000 | -0.035 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.2.weight
| 0.000 | -0.032 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.0.weight
| 0.000 | -0.033 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.2.weight
| -0.000 | -0.036 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.0.weight
| -0.000 | -0.037 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.2.weight
| 0.000 | -0.048 | 0.048 | 0.012 | torch.Size([128, 64, 2, 2]) || m_down1.4.weight
| -0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.0.weight
| 0.000 | -0.025 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.2.weight
| -0.000 | -0.026 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.0.weight
| -0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.2.weight
| 0.000 | -0.029 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.0.weight
| 0.000 | -0.026 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.2.weight
| 0.000 | -0.026 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.0.weight
| -0.000 | -0.026 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.2.weight
| -0.000 | -0.038 | 0.042 | 0.009 | torch.Size([256, 128, 2, 2]) || m_down2.4.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.0.weight
| -0.000 | -0.021 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.2.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.0.weight
| -0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.2.weight
| 0.000 | -0.020 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.0.weight
| 0.000 | -0.018 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.2.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.0.weight
| 0.000 | -0.020 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.2.weight
| -0.000 | -0.027 | 0.028 | 0.006 | torch.Size([512, 256, 2, 2]) || m_down3.4.weight
| -0.000 | -0.015 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.0.weight
| 0.000 | -0.015 | 0.016 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.2.weight
| 0.000 | -0.014 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.0.weight
| 0.000 | -0.014 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.2.weight
| -0.000 | -0.016 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.0.weight
| 0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.2.weight
| -0.000 | -0.016 | 0.016 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.0.weight
| 0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.2.weight
| 0.000 | -0.030 | 0.029 | 0.006 | torch.Size([512, 256, 2, 2]) || m_up3.0.weight
| -0.000 | -0.020 | 0.023 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.0.weight
| 0.000 | -0.018 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.2.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.0.weight
| -0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.2.weight
| -0.000 | -0.021 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.0.weight
| -0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.2.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.0.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.2.weight
| -0.000 | -0.039 | 0.036 | 0.009 | torch.Size([256, 128, 2, 2]) || m_up2.0.weight
| 0.000 | -0.028 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.0.weight
| 0.000 | -0.025 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.2.weight
| -0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.0.weight
| -0.000 | -0.024 | 0.029 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.2.weight
| -0.000 | -0.027 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.0.weight
| -0.000 | -0.027 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.2.weight
| 0.000 | -0.026 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.0.weight
| 0.000 | -0.027 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.2.weight
| 0.000 | -0.055 | 0.050 | 0.013 | torch.Size([128, 64, 2, 2]) || m_up1.0.weight
| -0.000 | -0.040 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.0.weight
| -0.000 | -0.035 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.2.weight
| -0.000 | -0.035 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.0.weight
| 0.000 | -0.035 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.2.weight
| 0.000 | -0.033 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.0.weight
| -0.000 | -0.037 | 0.031 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.2.weight
| -0.000 | -0.034 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.0.weight
| -0.000 | -0.037 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.2.weight
| -0.000 | -0.023 | 0.023 | 0.008 | torch.Size([1, 64, 3, 3]) || m_tail.weight
23-03-25 14:30:32.831 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../imgs
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-25 14:30:33.069 : Number of train images: 7,740, iters: 121
23-03-25 14:30:33.780 :
Networks name: UNetRes
Params number: 32638656
Net structure:
UNetRes(
(m_head): Conv2d(2, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(m_down1): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(64, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down2): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(128, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down3): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(256, 512, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_body): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up3): Sequential(
(0): ConvTranspose2d(512, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up2): Sequential(
(0): ConvTranspose2d(256, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up1): Sequential(
(0): ConvTranspose2d(128, 64, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_tail): Conv2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
23-03-25 14:30:33.800 :
| mean | min | max | std || shape
| 0.000 | -0.075 | 0.095 | 0.025 | torch.Size([64, 2, 3, 3]) || m_head.weight
| -0.000 | -0.035 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.0.weight
| 0.000 | -0.035 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.2.weight
| -0.000 | -0.036 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.0.weight
| -0.000 | -0.037 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.2.weight
| -0.000 | -0.031 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.0.weight
| 0.000 | -0.034 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.2.weight
| -0.000 | -0.033 | 0.037 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.0.weight
| 0.000 | -0.033 | 0.037 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.2.weight
| -0.000 | -0.048 | 0.048 | 0.013 | torch.Size([128, 64, 2, 2]) || m_down1.4.weight
| -0.000 | -0.024 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.0.weight
| -0.000 | -0.023 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.2.weight
| 0.000 | -0.025 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.0.weight
| 0.000 | -0.027 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.2.weight
| 0.000 | -0.027 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.0.weight
| 0.000 | -0.026 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.2.weight
| 0.000 | -0.024 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.0.weight
| -0.000 | -0.024 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.2.weight
| -0.000 | -0.036 | 0.039 | 0.009 | torch.Size([256, 128, 2, 2]) || m_down2.4.weight
| 0.000 | -0.021 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.0.weight
| 0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.2.weight
| -0.000 | -0.019 | 0.023 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.0.weight
| -0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.2.weight
| -0.000 | -0.020 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.0.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.2.weight
| -0.000 | -0.019 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.0.weight
| 0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.2.weight
| 0.000 | -0.029 | 0.028 | 0.006 | torch.Size([512, 256, 2, 2]) || m_down3.4.weight
| 0.000 | -0.015 | 0.017 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.0.weight
| 0.000 | -0.014 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.2.weight
| -0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.0.weight
| -0.000 | -0.016 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.2.weight
| 0.000 | -0.015 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.0.weight
| 0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.2.weight
| -0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.0.weight
| 0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.2.weight
| 0.000 | -0.028 | 0.030 | 0.006 | torch.Size([512, 256, 2, 2]) || m_up3.0.weight
| -0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.0.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.2.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.0.weight
| -0.000 | -0.020 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.2.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.0.weight
| 0.000 | -0.020 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.2.weight
| 0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.0.weight
| -0.000 | -0.021 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.2.weight
| 0.000 | -0.040 | 0.036 | 0.009 | torch.Size([256, 128, 2, 2]) || m_up2.0.weight
| 0.000 | -0.025 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.0.weight
| 0.000 | -0.026 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.2.weight
| -0.000 | -0.027 | 0.029 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.0.weight
| 0.000 | -0.027 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.2.weight
| 0.000 | -0.024 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.0.weight
| -0.000 | -0.029 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.2.weight
| 0.000 | -0.025 | 0.029 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.0.weight
| -0.000 | -0.026 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.2.weight
| 0.000 | -0.050 | 0.054 | 0.012 | torch.Size([128, 64, 2, 2]) || m_up1.0.weight
| -0.000 | -0.039 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.0.weight
| -0.000 | -0.037 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.2.weight
| 0.000 | -0.034 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.0.weight
| 0.000 | -0.033 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.2.weight
| 0.000 | -0.035 | 0.031 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.0.weight
| 0.000 | -0.033 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.2.weight
| 0.000 | -0.032 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.0.weight
| 0.000 | -0.035 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.2.weight
| -0.000 | -0.024 | 0.026 | 0.008 | torch.Size([1, 64, 3, 3]) || m_tail.weight
23-03-25 14:32:06.176 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../imgs
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-25 14:32:06.412 : Number of train images: 7,740, iters: 121
23-03-25 14:32:07.115 :
Networks name: UNetRes
Params number: 32638656
Net structure:
UNetRes(
(m_head): Conv2d(2, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(m_down1): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(64, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down2): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(128, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down3): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(256, 512, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_body): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up3): Sequential(
(0): ConvTranspose2d(512, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up2): Sequential(
(0): ConvTranspose2d(256, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up1): Sequential(
(0): ConvTranspose2d(128, 64, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_tail): Conv2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
23-03-25 14:32:07.136 :
| mean | min | max | std || shape
| 0.000 | -0.089 | 0.088 | 0.025 | torch.Size([64, 2, 3, 3]) || m_head.weight
| 0.000 | -0.035 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.0.weight
| -0.000 | -0.032 | 0.031 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.2.weight
| 0.000 | -0.037 | 0.031 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.0.weight
| 0.000 | -0.031 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.2.weight
| 0.000 | -0.041 | 0.031 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.0.weight
| -0.000 | -0.038 | 0.037 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.2.weight
| 0.000 | -0.030 | 0.031 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.0.weight
| -0.000 | -0.033 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.2.weight
| 0.000 | -0.048 | 0.055 | 0.013 | torch.Size([128, 64, 2, 2]) || m_down1.4.weight
| -0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.0.weight
| -0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.2.weight
| -0.000 | -0.027 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.0.weight
| -0.000 | -0.028 | 0.029 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.2.weight
| -0.000 | -0.027 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.0.weight
| -0.000 | -0.026 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.2.weight
| 0.000 | -0.025 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.0.weight
| -0.000 | -0.026 | 0.029 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.2.weight
| 0.000 | -0.037 | 0.039 | 0.009 | torch.Size([256, 128, 2, 2]) || m_down2.4.weight
| -0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.0.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.2.weight
| 0.000 | -0.020 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.0.weight
| 0.000 | -0.020 | 0.022 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.2.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.0.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.2.weight
| -0.000 | -0.019 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.0.weight
| -0.000 | -0.020 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.2.weight
| 0.000 | -0.032 | 0.028 | 0.006 | torch.Size([512, 256, 2, 2]) || m_down3.4.weight
| 0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.0.weight
| 0.000 | -0.015 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.2.weight
| 0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.0.weight
| 0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.2.weight
| -0.000 | -0.015 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.0.weight
| 0.000 | -0.015 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.2.weight
| -0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.0.weight
| -0.000 | -0.015 | 0.016 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.2.weight
| 0.000 | -0.030 | 0.031 | 0.006 | torch.Size([512, 256, 2, 2]) || m_up3.0.weight
| -0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.0.weight
| 0.000 | -0.020 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.2.weight
| -0.000 | -0.019 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.0.weight
| 0.000 | -0.020 | 0.022 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.2.weight
| -0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.0.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.2.weight
| 0.000 | -0.019 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.0.weight
| -0.000 | -0.020 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.2.weight
| 0.000 | -0.038 | 0.037 | 0.009 | torch.Size([256, 128, 2, 2]) || m_up2.0.weight
| -0.000 | -0.026 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.0.weight
| 0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.2.weight
| 0.000 | -0.027 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.0.weight
| 0.000 | -0.027 | 0.030 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.2.weight
| 0.000 | -0.025 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.0.weight
| 0.000 | -0.029 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.2.weight
| -0.000 | -0.025 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.0.weight
| 0.000 | -0.028 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.2.weight
| 0.000 | -0.059 | 0.054 | 0.013 | torch.Size([128, 64, 2, 2]) || m_up1.0.weight
| -0.000 | -0.033 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.0.weight
| -0.000 | -0.034 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.2.weight
| -0.000 | -0.033 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.0.weight
| 0.000 | -0.033 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.2.weight
| 0.000 | -0.034 | 0.037 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.0.weight
| 0.000 | -0.036 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.2.weight
| -0.000 | -0.033 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.0.weight
| 0.000 | -0.034 | 0.037 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.2.weight
| 0.000 | -0.025 | 0.022 | 0.008 | torch.Size([1, 64, 3, 3]) || m_tail.weight
23-03-25 14:33:19.667 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../imgs
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-25 14:33:19.901 : Number of train images: 7,740, iters: 121
23-03-25 14:33:20.609 :
Networks name: UNetRes
Params number: 32638656
Net structure:
UNetRes(
(m_head): Conv2d(2, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(m_down1): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(64, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down2): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(128, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down3): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(256, 512, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_body): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up3): Sequential(
(0): ConvTranspose2d(512, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up2): Sequential(
(0): ConvTranspose2d(256, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up1): Sequential(
(0): ConvTranspose2d(128, 64, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_tail): Conv2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
23-03-25 14:33:20.629 :
| mean | min | max | std || shape
| -0.000 | -0.076 | 0.073 | 0.025 | torch.Size([64, 2, 3, 3]) || m_head.weight
| -0.000 | -0.035 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.0.weight
| 0.000 | -0.033 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.2.weight
| -0.000 | -0.032 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.0.weight
| -0.000 | -0.036 | 0.037 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.2.weight
| -0.000 | -0.033 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.0.weight
| -0.000 | -0.034 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.2.weight
| -0.000 | -0.034 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.0.weight
| -0.000 | -0.037 | 0.031 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.2.weight
| -0.000 | -0.061 | 0.049 | 0.013 | torch.Size([128, 64, 2, 2]) || m_down1.4.weight
| -0.000 | -0.025 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.0.weight
| 0.000 | -0.026 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.2.weight
| -0.000 | -0.028 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.0.weight
| 0.000 | -0.027 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.2.weight
| -0.000 | -0.024 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.0.weight
| -0.000 | -0.027 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.2.weight
| 0.000 | -0.026 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.0.weight
| 0.000 | -0.028 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.2.weight
| 0.000 | -0.035 | 0.036 | 0.009 | torch.Size([256, 128, 2, 2]) || m_down2.4.weight
| -0.000 | -0.019 | 0.022 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.0.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.2.weight
| 0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.0.weight
| -0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.2.weight
| -0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.0.weight
| -0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.2.weight
| -0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.0.weight
| 0.000 | -0.022 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.2.weight
| -0.000 | -0.028 | 0.030 | 0.006 | torch.Size([512, 256, 2, 2]) || m_down3.4.weight
| -0.000 | -0.015 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.0.weight
| -0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.2.weight
| -0.000 | -0.016 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.0.weight
| -0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.2.weight
| -0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.0.weight
| -0.000 | -0.014 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.2.weight
| -0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.0.weight
| -0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.2.weight
| -0.000 | -0.030 | 0.028 | 0.006 | torch.Size([512, 256, 2, 2]) || m_up3.0.weight
| -0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.0.weight
| -0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.2.weight
| -0.000 | -0.022 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.0.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.2.weight
| 0.000 | -0.021 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.0.weight
| 0.000 | -0.021 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.2.weight
| -0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.0.weight
| -0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.2.weight
| 0.000 | -0.038 | 0.037 | 0.009 | torch.Size([256, 128, 2, 2]) || m_up2.0.weight
| -0.000 | -0.025 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.0.weight
| -0.000 | -0.025 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.2.weight
| -0.000 | -0.025 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.0.weight
| 0.000 | -0.027 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.2.weight
| 0.000 | -0.027 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.0.weight
| 0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.2.weight
| 0.000 | -0.024 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.0.weight
| 0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.2.weight
| 0.000 | -0.057 | 0.049 | 0.013 | torch.Size([128, 64, 2, 2]) || m_up1.0.weight
| 0.000 | -0.036 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.0.weight
| -0.000 | -0.036 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.2.weight
| 0.000 | -0.033 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.0.weight
| 0.000 | -0.033 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.2.weight
| -0.000 | -0.031 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.0.weight
| -0.000 | -0.034 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.2.weight
| 0.000 | -0.035 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.0.weight
| 0.000 | -0.036 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.2.weight
| 0.000 | -0.024 | 0.028 | 0.008 | torch.Size([1, 64, 3, 3]) || m_tail.weight
23-03-25 14:39:48.437 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../imgs
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-25 14:39:48.672 : Number of train images: 7,740, iters: 121
23-03-25 14:39:49.377 :
Networks name: UNetRes
Params number: 32638656
Net structure:
UNetRes(
(m_head): Conv2d(2, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(m_down1): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(64, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down2): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(128, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down3): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(256, 512, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_body): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up3): Sequential(
(0): ConvTranspose2d(512, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up2): Sequential(
(0): ConvTranspose2d(256, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up1): Sequential(
(0): ConvTranspose2d(128, 64, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_tail): Conv2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
23-03-25 14:39:49.393 :
| mean | min | max | std || shape
| -0.001 | -0.078 | 0.091 | 0.025 | torch.Size([64, 2, 3, 3]) || m_head.weight
| 0.000 | -0.036 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.0.weight
| -0.000 | -0.036 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.2.weight
| 0.000 | -0.031 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.0.weight
| -0.000 | -0.032 | 0.030 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.2.weight
| -0.000 | -0.031 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.0.weight
| -0.000 | -0.034 | 0.031 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.2.weight
| -0.000 | -0.035 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.0.weight
| 0.000 | -0.036 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.2.weight
| -0.000 | -0.054 | 0.050 | 0.012 | torch.Size([128, 64, 2, 2]) || m_down1.4.weight
| 0.000 | -0.024 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.0.weight
| 0.000 | -0.026 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.2.weight
| -0.000 | -0.025 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.0.weight
| 0.000 | -0.024 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.2.weight
| 0.000 | -0.029 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.0.weight
| 0.000 | -0.028 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.2.weight
| -0.000 | -0.025 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.0.weight
| 0.000 | -0.025 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.2.weight
| 0.000 | -0.040 | 0.043 | 0.009 | torch.Size([256, 128, 2, 2]) || m_down2.4.weight
| 0.000 | -0.021 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.0.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.2.weight
| 0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.0.weight
| 0.000 | -0.021 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.2.weight
| -0.000 | -0.020 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.0.weight
| -0.000 | -0.021 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.2.weight
| -0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.0.weight
| -0.000 | -0.021 | 0.022 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.2.weight
| -0.000 | -0.032 | 0.032 | 0.006 | torch.Size([512, 256, 2, 2]) || m_down3.4.weight
| -0.000 | -0.017 | 0.016 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.0.weight
| -0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.2.weight
| -0.000 | -0.017 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.0.weight
| -0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.2.weight
| -0.000 | -0.016 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.0.weight
| -0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.2.weight
| -0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.0.weight
| -0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.2.weight
| -0.000 | -0.027 | 0.030 | 0.006 | torch.Size([512, 256, 2, 2]) || m_up3.0.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.0.weight
| -0.000 | -0.021 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.2.weight
| -0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.0.weight
| 0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.2.weight
| -0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.0.weight
| -0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.2.weight
| -0.000 | -0.020 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.0.weight
| 0.000 | -0.022 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.2.weight
| -0.000 | -0.038 | 0.038 | 0.009 | torch.Size([256, 128, 2, 2]) || m_up2.0.weight
| -0.000 | -0.027 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.0.weight
| -0.000 | -0.025 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.2.weight
| -0.000 | -0.027 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.0.weight
| 0.000 | -0.026 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.2.weight
| -0.000 | -0.026 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.0.weight
| 0.000 | -0.023 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.2.weight
| 0.000 | -0.028 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.0.weight
| -0.000 | -0.027 | 0.029 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.2.weight
| 0.000 | -0.056 | 0.045 | 0.013 | torch.Size([128, 64, 2, 2]) || m_up1.0.weight
| -0.000 | -0.036 | 0.039 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.0.weight
| -0.000 | -0.033 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.2.weight
| -0.000 | -0.037 | 0.031 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.0.weight
| 0.000 | -0.035 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.2.weight
| 0.000 | -0.033 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.0.weight
| -0.000 | -0.034 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.2.weight
| -0.000 | -0.036 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.0.weight
| 0.000 | -0.033 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.2.weight
| 0.000 | -0.024 | 0.031 | 0.008 | torch.Size([1, 64, 3, 3]) || m_tail.weight
23-03-25 15:22:05.510 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../images_reduced
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-25 15:22:05.732 : Number of train images: 774, iters: 13
23-03-25 15:22:06.462 :
Networks name: UNetRes
Params number: 32638656
Net structure:
UNetRes(
(m_head): Conv2d(2, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(m_down1): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(64, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down2): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(128, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down3): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(256, 512, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_body): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up3): Sequential(
(0): ConvTranspose2d(512, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up2): Sequential(
(0): ConvTranspose2d(256, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up1): Sequential(
(0): ConvTranspose2d(128, 64, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_tail): Conv2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
23-03-25 15:22:06.479 :
| mean | min | max | std || shape
| 0.001 | -0.080 | 0.078 | 0.025 | torch.Size([64, 2, 3, 3]) || m_head.weight
| 0.000 | -0.035 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.0.weight
| -0.000 | -0.032 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.2.weight
| -0.000 | -0.035 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.0.weight
| 0.000 | -0.034 | 0.037 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.2.weight
| -0.000 | -0.035 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.0.weight
| -0.000 | -0.034 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.2.weight
| 0.000 | -0.036 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.0.weight
| 0.000 | -0.036 | 0.037 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.2.weight
| 0.000 | -0.051 | 0.045 | 0.013 | torch.Size([128, 64, 2, 2]) || m_down1.4.weight
| -0.000 | -0.025 | 0.029 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.0.weight
| -0.000 | -0.028 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.2.weight
| -0.000 | -0.026 | 0.029 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.0.weight
| 0.000 | -0.027 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.2.weight
| -0.000 | -0.028 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.0.weight
| 0.000 | -0.027 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.2.weight
| 0.000 | -0.027 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.0.weight
| -0.000 | -0.028 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.2.weight
| -0.000 | -0.035 | 0.040 | 0.009 | torch.Size([256, 128, 2, 2]) || m_down2.4.weight
| 0.000 | -0.021 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.0.weight
| -0.000 | -0.020 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.2.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.0.weight
| -0.000 | -0.022 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.2.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.0.weight
| 0.000 | -0.021 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.2.weight
| 0.000 | -0.020 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.0.weight
| 0.000 | -0.018 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.2.weight
| 0.000 | -0.033 | 0.031 | 0.006 | torch.Size([512, 256, 2, 2]) || m_down3.4.weight
| 0.000 | -0.016 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.0.weight
| -0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.2.weight
| 0.000 | -0.016 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.0.weight
| 0.000 | -0.016 | 0.016 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.2.weight
| 0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.0.weight
| 0.000 | -0.017 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.2.weight
| -0.000 | -0.016 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.0.weight
| -0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.2.weight
| -0.000 | -0.032 | 0.028 | 0.006 | torch.Size([512, 256, 2, 2]) || m_up3.0.weight
| -0.000 | -0.021 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.0.weight
| -0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.2.weight
| -0.000 | -0.022 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.0.weight
| 0.000 | -0.019 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.2.weight
| -0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.0.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.2.weight
| 0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.0.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.2.weight
| 0.000 | -0.041 | 0.037 | 0.009 | torch.Size([256, 128, 2, 2]) || m_up2.0.weight
| 0.000 | -0.024 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.0.weight
| -0.000 | -0.025 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.2.weight
| 0.000 | -0.025 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.0.weight
| -0.000 | -0.025 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.2.weight
| -0.000 | -0.025 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.0.weight
| 0.000 | -0.025 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.2.weight
| 0.000 | -0.027 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.0.weight
| 0.000 | -0.026 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.2.weight
| -0.000 | -0.059 | 0.046 | 0.013 | torch.Size([128, 64, 2, 2]) || m_up1.0.weight
| 0.000 | -0.037 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.0.weight
| 0.000 | -0.034 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.2.weight
| 0.000 | -0.035 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.0.weight
| 0.000 | -0.033 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.2.weight
| -0.000 | -0.034 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.0.weight
| -0.000 | -0.033 | 0.041 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.2.weight
| -0.000 | -0.035 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.0.weight
| -0.000 | -0.033 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.2.weight
| 0.000 | -0.027 | 0.027 | 0.008 | torch.Size([1, 64, 3, 3]) || m_tail.weight
23-03-25 16:26:57.835 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../images_reduced
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-25 16:26:58.057 : Number of train images: 774, iters: 13
23-03-25 16:26:58.762 :
Networks name: UNetRes
Params number: 32638656
Net structure:
UNetRes(
(m_head): Conv2d(2, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(m_down1): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(64, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down2): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(128, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down3): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(256, 512, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_body): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up3): Sequential(
(0): ConvTranspose2d(512, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up2): Sequential(
(0): ConvTranspose2d(256, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up1): Sequential(
(0): ConvTranspose2d(128, 64, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_tail): Conv2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
23-03-25 16:26:58.780 :
| mean | min | max | std || shape
| 0.000 | -0.073 | 0.088 | 0.025 | torch.Size([64, 2, 3, 3]) || m_head.weight
| 0.000 | -0.032 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.0.weight
| 0.000 | -0.041 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.2.weight
| -0.000 | -0.038 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.0.weight
| -0.000 | -0.038 | 0.031 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.2.weight
| 0.000 | -0.036 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.0.weight
| 0.000 | -0.034 | 0.031 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.2.weight
| 0.000 | -0.033 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.0.weight
| 0.000 | -0.036 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.2.weight
| 0.000 | -0.058 | 0.050 | 0.013 | torch.Size([128, 64, 2, 2]) || m_down1.4.weight
| 0.000 | -0.027 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.0.weight
| 0.000 | -0.026 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.2.weight
| 0.000 | -0.025 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.0.weight
| -0.000 | -0.027 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.2.weight
| 0.000 | -0.024 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.0.weight
| -0.000 | -0.025 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.2.weight
| -0.000 | -0.025 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.0.weight
| 0.000 | -0.026 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.2.weight
| 0.000 | -0.038 | 0.043 | 0.009 | torch.Size([256, 128, 2, 2]) || m_down2.4.weight
| -0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.0.weight
| 0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.2.weight
| -0.000 | -0.019 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.0.weight
| -0.000 | -0.021 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.2.weight
| -0.000 | -0.020 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.0.weight
| 0.000 | -0.020 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.2.weight
| -0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.0.weight
| 0.000 | -0.020 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.2.weight
| -0.000 | -0.030 | 0.027 | 0.006 | torch.Size([512, 256, 2, 2]) || m_down3.4.weight
| -0.000 | -0.014 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.0.weight
| -0.000 | -0.014 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.2.weight
| 0.000 | -0.015 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.0.weight
| 0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.2.weight
| -0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.0.weight
| 0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.2.weight
| -0.000 | -0.016 | 0.016 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.0.weight
| 0.000 | -0.014 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.2.weight
| -0.000 | -0.030 | 0.027 | 0.006 | torch.Size([512, 256, 2, 2]) || m_up3.0.weight
| -0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.0.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.2.weight
| -0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.0.weight
| 0.000 | -0.018 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.2.weight
| -0.000 | -0.021 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.0.weight
| 0.000 | -0.020 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.2.weight
| 0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.0.weight
| 0.000 | -0.019 | 0.022 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.2.weight
| 0.000 | -0.037 | 0.040 | 0.009 | torch.Size([256, 128, 2, 2]) || m_up2.0.weight
| 0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.0.weight
| 0.000 | -0.027 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.2.weight
| -0.000 | -0.026 | 0.029 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.0.weight
| 0.000 | -0.027 | 0.023 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.2.weight
| -0.000 | -0.026 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.0.weight
| 0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.2.weight
| -0.000 | -0.025 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.0.weight
| -0.000 | -0.027 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.2.weight
| 0.000 | -0.055 | 0.053 | 0.012 | torch.Size([128, 64, 2, 2]) || m_up1.0.weight
| -0.000 | -0.038 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.0.weight
| -0.000 | -0.034 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.2.weight
| 0.000 | -0.032 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.0.weight
| -0.000 | -0.032 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.2.weight
| 0.000 | -0.036 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.0.weight
| -0.000 | -0.038 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.2.weight
| -0.000 | -0.035 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.0.weight
| 0.000 | -0.034 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.2.weight
| -0.000 | -0.029 | 0.024 | 0.008 | torch.Size([1, 64, 3, 3]) || m_tail.weight
23-03-25 16:31:49.065 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../images_reduced
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-25 16:31:49.289 : Number of train images: 774, iters: 13
23-03-25 16:31:49.990 :
Networks name: UNetRes
Params number: 32638656
Net structure:
UNetRes(
(m_head): Conv2d(2, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(m_down1): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(64, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down2): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(128, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down3): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(256, 512, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_body): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up3): Sequential(
(0): ConvTranspose2d(512, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up2): Sequential(
(0): ConvTranspose2d(256, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up1): Sequential(
(0): ConvTranspose2d(128, 64, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_tail): Conv2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
23-03-25 16:31:50.008 :
| mean | min | max | std || shape
| -0.001 | -0.079 | 0.085 | 0.025 | torch.Size([64, 2, 3, 3]) || m_head.weight
| -0.000 | -0.037 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.0.weight
| 0.000 | -0.033 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.2.weight
| -0.000 | -0.034 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.0.weight
| 0.000 | -0.036 | 0.040 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.2.weight
| 0.000 | -0.036 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.0.weight
| 0.000 | -0.034 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.2.weight
| -0.000 | -0.032 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.0.weight
| 0.000 | -0.040 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.2.weight
| -0.000 | -0.050 | 0.057 | 0.012 | torch.Size([128, 64, 2, 2]) || m_down1.4.weight
| -0.000 | -0.029 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.0.weight
| -0.000 | -0.028 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.2.weight
| -0.000 | -0.031 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.0.weight
| 0.000 | -0.025 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.2.weight
| 0.000 | -0.025 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.0.weight
| -0.000 | -0.027 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.2.weight
| 0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.0.weight
| 0.000 | -0.025 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.2.weight
| 0.000 | -0.035 | 0.038 | 0.009 | torch.Size([256, 128, 2, 2]) || m_down2.4.weight
| 0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.0.weight
| 0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.2.weight
| -0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.0.weight
| -0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.2.weight
| -0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.0.weight
| -0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.2.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.0.weight
| -0.000 | -0.021 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.2.weight
| 0.000 | -0.029 | 0.029 | 0.006 | torch.Size([512, 256, 2, 2]) || m_down3.4.weight
| -0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.0.weight
| 0.000 | -0.015 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.2.weight
| -0.000 | -0.014 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.0.weight
| 0.000 | -0.016 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.2.weight
| -0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.0.weight
| -0.000 | -0.013 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.2.weight
| 0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.0.weight
| 0.000 | -0.014 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.2.weight
| 0.000 | -0.031 | 0.030 | 0.006 | torch.Size([512, 256, 2, 2]) || m_up3.0.weight
| -0.000 | -0.018 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.0.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.2.weight
| -0.000 | -0.018 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.0.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.2.weight
| 0.000 | -0.019 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.0.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.2.weight
| -0.000 | -0.021 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.0.weight
| -0.000 | -0.021 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.2.weight
| 0.000 | -0.037 | 0.040 | 0.009 | torch.Size([256, 128, 2, 2]) || m_up2.0.weight
| -0.000 | -0.026 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.0.weight
| -0.000 | -0.027 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.2.weight
| -0.000 | -0.025 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.0.weight
| -0.000 | -0.029 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.2.weight
| -0.000 | -0.027 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.0.weight
| -0.000 | -0.026 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.2.weight
| 0.000 | -0.025 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.0.weight
| -0.000 | -0.025 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.2.weight
| -0.000 | -0.052 | 0.053 | 0.013 | torch.Size([128, 64, 2, 2]) || m_up1.0.weight
| 0.000 | -0.042 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.0.weight
| -0.000 | -0.035 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.2.weight
| -0.000 | -0.037 | 0.037 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.0.weight
| -0.000 | -0.031 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.2.weight
| 0.000 | -0.036 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.0.weight
| -0.000 | -0.034 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.2.weight
| 0.000 | -0.032 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.0.weight
| -0.000 | -0.036 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.2.weight
| 0.000 | -0.025 | 0.025 | 0.008 | torch.Size([1, 64, 3, 3]) || m_tail.weight
23-03-25 16:40:08.637 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../images_reduced
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 5000
checkpoint_save: 5000
checkpoint_print: 200
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-25 16:40:08.859 : Number of train images: 774, iters: 13
23-03-25 16:40:09.576 :
Networks name: UNetRes
Params number: 32638656
Net structure:
UNetRes(
(m_head): Conv2d(2, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(m_down1): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(64, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down2): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(128, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down3): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(256, 512, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_body): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up3): Sequential(
(0): ConvTranspose2d(512, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up2): Sequential(
(0): ConvTranspose2d(256, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up1): Sequential(
(0): ConvTranspose2d(128, 64, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_tail): Conv2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
23-03-25 16:40:09.597 :
| mean | min | max | std || shape
| -0.000 | -0.076 | 0.087 | 0.025 | torch.Size([64, 2, 3, 3]) || m_head.weight
| 0.000 | -0.032 | 0.030 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.0.weight
| 0.000 | -0.032 | 0.031 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.2.weight
| -0.000 | -0.033 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.0.weight
| -0.000 | -0.033 | 0.037 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.2.weight
| -0.000 | -0.035 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.0.weight
| 0.000 | -0.037 | 0.037 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.2.weight
| 0.000 | -0.032 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.0.weight
| -0.000 | -0.035 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.2.weight
| -0.000 | -0.055 | 0.055 | 0.013 | torch.Size([128, 64, 2, 2]) || m_down1.4.weight
| 0.000 | -0.025 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.0.weight
| -0.000 | -0.026 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.2.weight
| 0.000 | -0.024 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.0.weight
| 0.000 | -0.026 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.2.weight
| -0.000 | -0.025 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.0.weight
| 0.000 | -0.025 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.2.weight
| -0.000 | -0.026 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.0.weight
| 0.000 | -0.026 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.2.weight
| -0.000 | -0.037 | 0.038 | 0.009 | torch.Size([256, 128, 2, 2]) || m_down2.4.weight
| -0.000 | -0.021 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.0.weight
| 0.000 | -0.020 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.2.weight
| -0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.0.weight
| -0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.2.weight
| -0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.0.weight
| 0.000 | -0.020 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.2.weight
| 0.000 | -0.022 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.0.weight
| -0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.2.weight
| 0.000 | -0.032 | 0.029 | 0.006 | torch.Size([512, 256, 2, 2]) || m_down3.4.weight
| 0.000 | -0.014 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.0.weight
| 0.000 | -0.014 | 0.016 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.2.weight
| -0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.0.weight
| -0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.2.weight
| -0.000 | -0.016 | 0.016 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.0.weight
| 0.000 | -0.014 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.2.weight
| 0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.0.weight
| -0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.2.weight
| 0.000 | -0.030 | 0.029 | 0.006 | torch.Size([512, 256, 2, 2]) || m_up3.0.weight
| -0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.0.weight
| 0.000 | -0.021 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.2.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.0.weight
| 0.000 | -0.020 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.2.weight
| -0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.0.weight
| -0.000 | -0.021 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.2.weight
| -0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.0.weight
| 0.000 | -0.020 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.2.weight
| -0.000 | -0.036 | 0.035 | 0.009 | torch.Size([256, 128, 2, 2]) || m_up2.0.weight
| -0.000 | -0.027 | 0.032 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.0.weight
| 0.000 | -0.025 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.2.weight
| 0.000 | -0.026 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.0.weight
| 0.000 | -0.029 | 0.029 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.2.weight
| -0.000 | -0.028 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.0.weight
| 0.000 | -0.025 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.2.weight
| -0.000 | -0.025 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.0.weight
| 0.000 | -0.030 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.2.weight
| -0.000 | -0.056 | 0.055 | 0.012 | torch.Size([128, 64, 2, 2]) || m_up1.0.weight
| 0.000 | -0.034 | 0.039 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.0.weight
| -0.000 | -0.034 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.2.weight
| -0.000 | -0.036 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.0.weight
| -0.000 | -0.034 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.2.weight
| 0.000 | -0.035 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.0.weight
| -0.000 | -0.032 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.2.weight
| 0.000 | -0.039 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.0.weight
| -0.000 | -0.032 | 0.031 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.2.weight
| 0.000 | -0.028 | 0.026 | 0.008 | torch.Size([1, 64, 3, 3]) || m_tail.weight
23-03-25 17:01:32.657 : task: drunet
model: plain
gpu_ids: [0]
scale: 1
n_channels: 2
n_channels_datasetload: 3
sigma: [0, 50]
sigma_test: 25
path:[
root: denoising
pretrained_netG: None
task: denoising/drunet
log: denoising/drunet
options: denoising/drunet/options
models: denoising/drunet/models
images: denoising/drunet/images
pretrained_optimizerG: None
]
datasets:[
train:[
name: train_dataset
dataset_type: ffdnet
dataroot_H: ../../images_reduced
dataroot_L: None
H_size: 128
dataloader_shuffle: True
dataloader_num_workers: 8
dataloader_batch_size: 64
phase: train
scale: 1
n_channels: 2
]
test:[
name: test_dataset
dataset_type: ffdnet
dataroot_H: testsets/set5
dataroot_L: None
phase: test
scale: 1
n_channels: 2
]
]
netG:[
net_type: drunet
in_nc: 2
out_nc: 1
nc: [64, 128, 256, 512]
nb: 4
gc: 32
ng: 2
reduction: 16
act_mode: R
upsample_mode: convtranspose
downsample_mode: strideconv
bias: False
init_type: orthogonal
init_bn_type: uniform
init_gain: 0.2
scale: 1
]
train:[
epochs: 15
G_lossfn_type: l1
G_lossfn_weight: 1.0
G_tvloss_weight: 1.0
G_optimizer_type: adam
G_optimizer_lr: 0.0001
G_optimizer_clipgrad: None
G_scheduler_type: MultiStepLR
G_scheduler_milestones: [100000, 200000, 300000, 400000]
G_scheduler_gamma: 0.5
G_regularizer_orthstep: None
G_regularizer_clipstep: None
checkpoint_test: 800
checkpoint_save: 800
checkpoint_print: 20
F_feature_layer: 34
F_weights: 1.0
F_lossfn_type: l1
F_use_input_norm: True
F_use_range_norm: False
G_optimizer_betas: [0.9, 0.999]
G_scheduler_restart_weights: 1
G_optimizer_wd: 0
G_optimizer_reuse: False
G_param_strict: True
E_param_strict: True
E_decay: 0
]
opt_path: options/train_drunet.json
is_train: True
merge_bn: False
merge_bn_startpoint: -1
find_unused_parameters: True
use_static_graph: False
dist: False
num_gpu: 1
rank: 0
world_size: 1
23-03-25 17:01:32.878 : Number of train images: 774, iters: 13
23-03-25 17:01:33.581 :
Networks name: UNetRes
Params number: 32638656
Net structure:
UNetRes(
(m_head): Conv2d(2, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(m_down1): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(64, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down2): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(128, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_down3): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): Conv2d(256, 512, kernel_size=(2, 2), stride=(2, 2), bias=False)
)
(m_body): Sequential(
(0): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up3): Sequential(
(0): ConvTranspose2d(512, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up2): Sequential(
(0): ConvTranspose2d(256, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_up1): Sequential(
(0): ConvTranspose2d(128, 64, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(2): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(3): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
(4): ResBlock(
(res): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
)
)
(m_tail): Conv2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
)
23-03-25 17:01:33.600 :
| mean | min | max | std || shape
| -0.001 | -0.080 | 0.075 | 0.025 | torch.Size([64, 2, 3, 3]) || m_head.weight
| -0.000 | -0.033 | 0.036 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.0.weight
| -0.000 | -0.034 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.0.res.2.weight
| 0.000 | -0.032 | 0.037 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.0.weight
| 0.000 | -0.034 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.1.res.2.weight
| 0.000 | -0.033 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.0.weight
| 0.000 | -0.030 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.2.res.2.weight
| -0.000 | -0.032 | 0.040 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.0.weight
| -0.000 | -0.035 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_down1.3.res.2.weight
| -0.000 | -0.054 | 0.053 | 0.013 | torch.Size([128, 64, 2, 2]) || m_down1.4.weight
| -0.000 | -0.027 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.0.weight
| 0.000 | -0.026 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.0.res.2.weight
| -0.000 | -0.026 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.0.weight
| 0.000 | -0.025 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.1.res.2.weight
| -0.000 | -0.026 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.0.weight
| -0.000 | -0.029 | 0.027 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.2.res.2.weight
| 0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.0.weight
| -0.000 | -0.027 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_down2.3.res.2.weight
| -0.000 | -0.039 | 0.044 | 0.009 | torch.Size([256, 128, 2, 2]) || m_down2.4.weight
| 0.000 | -0.022 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.0.weight
| 0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.0.res.2.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.0.weight
| 0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.1.res.2.weight
| -0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.0.weight
| -0.000 | -0.018 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.2.res.2.weight
| -0.000 | -0.022 | 0.021 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.0.weight
| 0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_down3.3.res.2.weight
| 0.000 | -0.030 | 0.027 | 0.006 | torch.Size([512, 256, 2, 2]) || m_down3.4.weight
| -0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.0.weight
| 0.000 | -0.014 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.0.res.2.weight
| -0.000 | -0.017 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.0.weight
| -0.000 | -0.015 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.1.res.2.weight
| 0.000 | -0.015 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.0.weight
| -0.000 | -0.015 | 0.015 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.2.res.2.weight
| -0.000 | -0.014 | 0.016 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.0.weight
| 0.000 | -0.015 | 0.014 | 0.003 | torch.Size([512, 512, 3, 3]) || m_body.3.res.2.weight
| -0.000 | -0.030 | 0.030 | 0.006 | torch.Size([512, 256, 2, 2]) || m_up3.0.weight
| 0.000 | -0.019 | 0.018 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.0.weight
| 0.000 | -0.018 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.1.res.2.weight
| 0.000 | -0.021 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.0.weight
| 0.000 | -0.019 | 0.022 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.2.res.2.weight
| 0.000 | -0.020 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.0.weight
| -0.000 | -0.019 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.3.res.2.weight
| 0.000 | -0.021 | 0.019 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.0.weight
| -0.000 | -0.019 | 0.020 | 0.004 | torch.Size([256, 256, 3, 3]) || m_up3.4.res.2.weight
| 0.000 | -0.038 | 0.038 | 0.009 | torch.Size([256, 128, 2, 2]) || m_up2.0.weight
| 0.000 | -0.025 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.0.weight
| -0.000 | -0.025 | 0.024 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.1.res.2.weight
| 0.000 | -0.028 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.0.weight
| -0.000 | -0.030 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.2.res.2.weight
| 0.000 | -0.027 | 0.026 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.0.weight
| -0.000 | -0.028 | 0.028 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.3.res.2.weight
| -0.000 | -0.028 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.0.weight
| 0.000 | -0.026 | 0.025 | 0.006 | torch.Size([128, 128, 3, 3]) || m_up2.4.res.2.weight
| 0.000 | -0.053 | 0.053 | 0.012 | torch.Size([128, 64, 2, 2]) || m_up1.0.weight
| -0.000 | -0.032 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.0.weight
| 0.000 | -0.033 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.1.res.2.weight
| -0.000 | -0.035 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.0.weight
| -0.000 | -0.032 | 0.038 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.2.res.2.weight
| 0.000 | -0.032 | 0.035 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.0.weight
| 0.000 | -0.032 | 0.034 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.3.res.2.weight
| 0.000 | -0.037 | 0.033 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.0.weight
| -0.000 | -0.038 | 0.032 | 0.008 | torch.Size([64, 64, 3, 3]) || m_up1.4.res.2.weight
| 0.000 | -0.026 | 0.026 | 0.008 | torch.Size([1, 64, 3, 3]) || m_tail.weight
23-03-25 17:50:54.848 : <epoch: 1, iter: 20, lr:1.000e-04> G_loss: 6.285e-01
23-03-25 18:54:42.648 : <epoch: 3, iter: 40, lr:1.000e-04> G_loss: 6.208e-01
23-03-25 19:40:43.718 : <epoch: 4, iter: 60, lr:1.000e-04> G_loss: 4.357e-01
23-03-25 20:30:08.600 : <epoch: 6, iter: 80, lr:1.000e-04> G_loss: 4.554e-01
23-03-25 21:34:07.332 : <epoch: 8, iter: 100, lr:1.000e-04> G_loss: 4.449e-01
23-03-25 22:20:11.169 : <epoch: 9, iter: 120, lr:1.000e-04> G_loss: 5.174e-01
23-03-25 23:09:54.077 : <epoch: 11, iter: 140, lr:1.000e-04> G_loss: 4.483e-01
23-03-26 00:13:39.315 : <epoch: 13, iter: 160, lr:1.000e-04> G_loss: 4.610e-01
23-03-26 00:59:39.362 : <epoch: 14, iter: 180, lr:1.000e-04> G_loss: 4.139e-01