deep-tempest/end-to-end/options/train_drunet_finetuning.json

88 lines
4.7 KiB
JSON

{
"task": "drunet" // root/task/images-models-options
, "model": "plain" // "plain"
, "gpu_ids": [0]
, "scale": 0 // broadcast to "netG" if SISR
, "n_channels": 1 // broadcast to "datasets", 1 for grayscale, 3 for color
, "n_channels_datasetload": 3 // broadcast to image training set
, "use_abs_value": false // use absolute value of capture (true) or use real and complex values (false)
, "path": {
"root": "finetuning" // "finetuning_{border, middle}"
, "pretrained_netG": "path/to/model" // path of pretrained model, if model from scratch type: null
}
, "datasets": {
"train": {
"name": "train_dataset" // just name
, "dataset_type": "drunet_finetune" // "dncnn" | "dnpatch" for dncnn, | "fdncnn" | "ffdnet" | "sr" | "srmd" | "dpsr" | "plain" | "plainpatch"
, "dataroot_H": "path/to/train_original" // path of H training dataset
, "dataroot_L": null // path of L training dataset, not used for finetuning
, "sigma": [0, 0] // 15, 25, 50 for DnCNN | [0, 75] for FFDNet and FDnCNN
, "use_all_patches": true // use or not all image patches
, "skip_natural_patches": false// keep only non-natural image patches/text based image patches
, "num_patches_per_image": 20 // number of random patches of training image, if not using all patches
, "H_size": 256 // patch size 40 | 64 | 96 | 128 | 192
, "dataloader_shuffle": true
, "dataloader_num_workers": 8
, "dataloader_batch_size": 32 // batch size 1 | 16 | 32 | 48 | 64 | 128
, "dataset_percentage": 100 // percentage of the whole dataset to train
}
, "test": {
"name": "test_dataset" // just name
, "dataset_type": "drunet_finetune" // "drunet" | "drunet_finetune" | "dncnn" | "dnpatch" for dncnn, | "fdncnn" | "ffdnet" | "sr" | "srmd" | "dpsr" | "plain" | "plainpatch"
, "dataroot_H": "path/to/val_original" // path of H testing dataset
, "dataroot_L": null // path of L testing dataset, not used for finetuning
, "sigma_test": 0 // 15, 25, 50 for DnCNN and ffdnet
}
}
, "netG": {
"net_type": "drunet" // "dncnn" | "fdncnn" | "ffdnet" | "srmd" | "dpsr" | "srresnet0" | "srresnet1" | "rrdbnet"
, "in_nc": 2 // input channel number
, "out_nc": 1 // ouput channel number
, "nc": [64, 128, 256, 512] // 64 for "dncnn"
, "nb": 4 // 17 for "dncnn", 20 for dncnn3, 16 for "srresnet"
, "gc": 32 // unused
, "ng": 2 // unused
, "reduction": 16 // unused
, "act_mode": "R" // "BR" for BN+ReLU | "R" for ReLU
, "upsample_mode": "convtranspose" // "pixelshuffle" | "convtranspose" | "upconv"
, "downsample_mode": "strideconv" // "strideconv" | "avgpool" | "maxpool"
, "bias": false //
, "init_type": "kaiming_normal" // "orthogonal" | "normal" | "uniform" | "xavier_normal" | "xavier_uniform" | "kaiming_normal" | "kaiming_uniform"
, "init_bn_type": "uniform" // "uniform" | "constant"
, "init_gain": 0.2
, "frozen_layers": [] // ["m_down3", "m_body", "m_up3"] for border layers finetuning
// ["m_head", "m_down1", "m_down2", "m_up2", "m_up1", "m_tail"] for middle layers
// ["m_head", "m_down1", "m_body", "m_up1", "m_tail"] for halfway layers
}
, "train": {
"epochs": 50 // number of epochs to train
, "G_lossfn_type": "tv" // "l1" preferred | tv | "l2sum" | "l2" | "ssim"
, "G_lossfn_weight": 1.0 // default
, "G_tvloss_weight": 2.225479128512864e-13 // total variation weight
, "G_tvloss_reduction": "mean" // "sum" | "mean": Reduction for TV loss
, "G_optimizer_type": "adam" // fixed, adam is enough
, "G_optimizer_lr": 4.630645185e-05 // learning rate
, "G_optimizer_clipgrad": null // unused
, "G_scheduler_type": "MultiStepLR" // "MultiStepLR" is enough
, "G_scheduler_milestones": [] // as batch iters
, "G_scheduler_iter_step": 90180000 // use gamma factor on lr every this number of iter steps, null if using milestones
, "G_scheduler_gamma": 0.5 //
, "G_regularizer_orthstep": null // unused
, "G_regularizer_clipstep": null // unused
// epoch checkpoints
, "checkpoint_test": 1 // for testing print
, "checkpoint_test_save": 5 // for testing image saving
, "checkpoint_save": 10 // for saving model
, "checkpoint_print": 1 // for loss print
}
}