deep-tempest/end-to-end/options/train_drunet.json

84 lines
4.2 KiB
JSON
Raw Normal View History

2023-03-18 16:11:22 -07:00
{
"task": "drunet" // root/task/images-models-options
, "model": "plain" // "plain"
, "gpu_ids": [0]
2023-03-18 16:11:22 -07:00
, "scale": 0 // broadcast to "netG" if SISR
, "n_channels": 1 // broadcast to "datasets", 1 for grayscale, 3 for color
, "n_channels_datasetload": 3 // broadcast to image training set
2024-05-31 10:53:05 -07:00
, "use_abs_value": true // use absolute value of capture (true) or use real and complex values (false)
2023-03-18 16:11:22 -07:00
, "path": {
2023-10-20 14:15:21 -07:00
"root": "drunet_training" // "denoising" | "superresolution"
, "pretrained_netG": "path/to/model" // path of pretrained model, if model from scratch type: null
2023-03-18 16:11:22 -07:00
}
, "datasets": {
"train": {
"name": "train_dataset" // just name
, "dataset_type": "ffdnet" // "dncnn" | "dnpatch" for dncnn, | "fdncnn" | "ffdnet" | "sr" | "srmd" | "dpsr" | "plain" | "plainpatch"
2023-10-20 14:15:21 -07:00
, "dataroot_H": "path/to/train_original"// path of H training dataset
, "dataroot_L": "path/to/train_degraded" // path of L training dataset, if using noisy H type: null
, "sigma": [0, 20] // 15, 25, 50 for DnCNN | [0, 75] for FFDNet and FDnCNN
, "use_all_patches": true // use or not all image patches
, "skip_natural_patches": false// keep only non-natural image patches/text based image patches
, "num_patches_per_image": 21 // number of random patches of training image, if not using all patches
, "H_size": 256 // patch size 40 | 64 | 96 | 128 | 192
, "dataloader_shuffle": true
2023-03-18 16:11:22 -07:00
, "dataloader_num_workers": 8
, "dataloader_batch_size": 32 // batch size 1 | 16 | 32 | 48 | 64 | 128
2023-03-18 16:11:22 -07:00
}
, "test": {
"name": "test_dataset" // just name
, "dataset_type": "ffdnet" // "dncnn" | "dnpatch" for dncnn, | "fdncnn" | "ffdnet" | "sr" | "srmd" | "dpsr" | "plain" | "plainpatch"
2023-10-20 14:15:21 -07:00
, "dataroot_H": "path/to/val_original" // path of H testing dataset
, "dataroot_L": "path/to/val_degraded" // path of L testing dataset
, "sigma_test": 15 // 15, 25, 50 for DnCNN and ffdnet
2023-03-18 16:11:22 -07:00
}
}
, "netG": {
"net_type": "drunet" // "dncnn" | "fdncnn" | "ffdnet" | "srmd" | "dpsr" | "srresnet0" | "srresnet1" | "rrdbnet"
, "in_nc": 2 // input channel number
, "out_nc": 1 // ouput channel number
2023-03-18 16:11:22 -07:00
, "nc": [64, 128, 256, 512] // 64 for "dncnn"
, "nb": 4 // 17 for "dncnn", 20 for dncnn3, 16 for "srresnet"
, "gc": 32 // unused
, "ng": 2 // unused
, "reduction": 16 // unused
, "act_mode": "R" // "BR" for BN+ReLU | "R" for ReLU
, "upsample_mode": "convtranspose" // "pixelshuffle" | "convtranspose" | "upconv"
, "downsample_mode": "strideconv" // "strideconv" | "avgpool" | "maxpool"
, "bias": false//
, "init_type": "kaiming_normal" // "orthogonal" | "normal" | "uniform" | "xavier_normal" | "xavier_uniform" | "kaiming_normal" | "kaiming_uniform"
2023-03-18 16:11:22 -07:00
, "init_bn_type": "uniform" // "uniform" | "constant"
, "init_gain": 0.2
}
, "train": {
"epochs": 10 // number of epochs to train
, "G_lossfn_type": "tv" // "l1" preferred | tv | "l2sum" | "l2" | "ssim"
, "G_lossfn_weight": 1.0 // default
, "G_tvloss_weight": 2.225479128512864e-13 // total variation weight
, "G_tvloss_reduction": "mean" // "sum" | "mean": Reduction for TV loss
2023-03-18 16:11:22 -07:00
, "G_optimizer_type": "adam" // fixed, adam is enough
, "G_optimizer_lr": 1.5435483950260915e-05 // learning rate
2023-03-18 16:11:22 -07:00
, "G_optimizer_clipgrad": null // unused
, "G_scheduler_type": "MultiStepLR" // "MultiStepLR" is enough
, "G_scheduler_milestones": [] // as batch iters
, "G_scheduler_iter_step": 90180000 // use gamma factor on lr every this number of iter steps, null if using milestones
, "G_scheduler_gamma": 0.5 //
2023-03-18 16:11:22 -07:00
, "G_regularizer_orthstep": null // unused
, "G_regularizer_clipstep": null // unused
// epoch checkpoints
, "checkpoint_test": 1 // for testing print
, "checkpoint_test_save": 5 // for testing image saving
, "checkpoint_save": 10 // for saving model
, "checkpoint_print": 1 // for loss print
2023-03-18 16:11:22 -07:00
}
}