deep-tempest/end-to-end/options/test_drunet.json

78 lines
3.6 KiB
JSON

{
"task": "drunet" // root/task/images-models-options
, "model": "plain" // "plain"
, "gpu_ids": [0]
, "scale": 0 // broadcast to "netG" if SISR
, "n_channels": 1 // broadcast to "datasets", 1 for grayscale, 3 for color
, "n_channels_datasetload": 3 // broadcast to image training set
, "path": {
"root": "drunet_inference" // "denoising" | "superresolution"
, "pretrained_netG": "path/to/model" // path of pretrained model, if model from scratch type: null
}
, "datasets": {
"train": {
"name": "train_dataset" // just name
, "dataset_type": "drunet" // "dncnn" | "dnpatch" for dncnn, | "fdncnn" | "ffdnet" | "sr" | "srmd" | "dpsr" | "plain" | "plainpatch"
, "dataroot_H": "web_subset/ground_truth" // path of H training dataset
, "sigma": [0, 0] // 15, 25, 50 for DnCNN | [0, 75] for FFDNet and FDnCNN
, "num_patches_per_image": 20 // number of random patches of training image
, "dataroot_L": "web_subset/capturas" // path of L training dataset, if using noisy H type: null
, "H_size": 128 // patch size 40 | 64 | 96 | 128 | 192
, "dataloader_shuffle": false
, "dataloader_num_workers": 8
, "dataloader_batch_size": 64 // batch size 1 | 16 | 32 | 48 | 64 | 128
}
, "test": {
"name": "test_dataset" // just name
, "dataset_type": "ffdnet" // "dncnn" | "dnpatch" for dncnn, | "fdncnn" | "ffdnet" | "sr" | "srmd" | "dpsr" | "plain" | "plainpatch"
, "dataroot_H": "web_subset/capturas_splitter" // path of H testing dataset
, "dataroot_L": "web_subset/capturas_splitter" // path of L testing dataset
, "sigma_test": 0 // 15, 25, 50 for DnCNN and ffdnet
}
}
, "netG": {
"net_type": "drunet" // "dncnn" | "fdncnn" | "ffdnet" | "srmd" | "dpsr" | "srresnet0" | "srresnet1" | "rrdbnet"
, "in_nc": 2 // input channel number
, "out_nc": 1 // ouput channel number
, "nc": [64, 128, 256, 512] // 64 for "dncnn"
, "nb": 4 // 17 for "dncnn", 20 for dncnn3, 16 for "srresnet"
, "gc": 32 // unused
, "ng": 2 // unused
, "reduction": 16 // unused
, "act_mode": "R" // "BR" for BN+ReLU | "R" for ReLU
, "upsample_mode": "convtranspose" // "pixelshuffle" | "convtranspose" | "upconv"
, "downsample_mode": "strideconv" // "strideconv" | "avgpool" | "maxpool"
, "bias": false//
, "init_type": "orthogonal" // "orthogonal" | "normal" | "uniform" | "xavier_normal" | "xavier_uniform" | "kaiming_normal" | "kaiming_uniform"
, "init_bn_type": "uniform" // "uniform" | "constant"
, "init_gain": 0.2
}
, "train": {
"epochs": 1000 // number of epochs to train
, "G_lossfn_type": "l2" // "l1" preferred | "l2sum" | "l2" | "ssim"
, "G_lossfn_weight": 1.0 // default
, "G_tvloss_weight": 0.1 // total variation weight
, "G_optimizer_type": "adam" // fixed, adam is enough
, "G_optimizer_lr": 1e-4 // learning rate
, "G_optimizer_clipgrad": null // unused
, "G_scheduler_type": "MultiStepLR" // "MultiStepLR" is enough
, "G_scheduler_milestones": [1600, 3200, 4800, 6400, 8000, 9600, 11200, 12800, 14400]
, "G_scheduler_gamma": 0.1 //
, "G_regularizer_orthstep": null // unused
, "G_regularizer_clipstep": null // unused
// iteration (batch step) checkpoints
, "checkpoint_test": 1600 // for testing
, "checkpoint_save": 3999 // for saving model
, "checkpoint_print": 16 // for print
}
}