2023-03-18 16:11:22 -07:00
|
|
|
import random
|
|
|
|
import numpy as np
|
|
|
|
import torch
|
|
|
|
import torch.utils.data as data
|
|
|
|
import utils.utils_image as util
|
2023-03-29 13:05:44 -07:00
|
|
|
import itertools
|
2023-03-18 16:11:22 -07:00
|
|
|
|
|
|
|
|
|
|
|
class DatasetFFDNet(data.Dataset):
|
|
|
|
"""
|
|
|
|
# -----------------------------------------
|
|
|
|
# Get L/H/M for denosing on AWGN with a range of sigma.
|
|
|
|
# Only dataroot_H is needed.
|
|
|
|
# -----------------------------------------
|
|
|
|
# e.g., FFDNet, H = f(L, sigma), sigma is noise level
|
|
|
|
# -----------------------------------------
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, opt):
|
|
|
|
super(DatasetFFDNet, self).__init__()
|
|
|
|
self.opt = opt
|
|
|
|
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
|
2023-03-26 08:18:46 -07:00
|
|
|
self.n_channels_datasetload = opt['n_channels_datasetload'] if opt['n_channels_datasetload'] else 3
|
2023-03-18 16:11:22 -07:00
|
|
|
self.patch_size = self.opt['H_size'] if opt['H_size'] else 64
|
|
|
|
self.sigma = opt['sigma'] if opt['sigma'] else [0, 75]
|
|
|
|
self.sigma_min, self.sigma_max = self.sigma[0], self.sigma[1]
|
|
|
|
self.sigma_test = opt['sigma_test'] if opt['sigma_test'] else 25
|
2023-04-17 18:06:48 -07:00
|
|
|
self.use_all_patches = opt['use_all_patches'] if opt['use_all_patches'] else False
|
|
|
|
self.num_patches_per_image = opt['num_patches_per_image'] if not(self.use_all_patches) else ((1280**2)//(self.patch_size)**2)
|
|
|
|
self.num_images = 4 # Number of images to use
|
2023-03-18 16:11:22 -07:00
|
|
|
|
|
|
|
# -------------------------------------
|
|
|
|
# get the path of H, return None if input is None
|
|
|
|
# -------------------------------------
|
2023-04-17 18:06:48 -07:00
|
|
|
self.paths_H = util.get_image_paths(opt['dataroot_H'])[:self.num_images] # Edit: overfittear con las primeras 100 imagenes
|
|
|
|
self.paths_L = util.get_image_paths(opt['dataroot_L'])[:self.num_images] # Edit: las primeras 9 imagenes pertenecen a test
|
2023-03-29 15:33:45 -07:00
|
|
|
|
2023-04-02 16:02:12 -07:00
|
|
|
# Repeat every image in path list to get more than one patch per image
|
2023-03-29 13:05:44 -07:00
|
|
|
if self.opt['phase'] == 'train':
|
|
|
|
listOfLists = [list(itertools.repeat(path, self.num_patches_per_image)) for path in self.paths_H]
|
|
|
|
self.paths_H = list(itertools.chain.from_iterable(listOfLists))
|
|
|
|
|
|
|
|
listOfLists = [list(itertools.repeat(path, self.num_patches_per_image)) for path in self.paths_L]
|
|
|
|
self.paths_L = list(itertools.chain.from_iterable(listOfLists))
|
2023-03-18 16:11:22 -07:00
|
|
|
|
|
|
|
def __getitem__(self, index):
|
2023-03-27 09:07:44 -07:00
|
|
|
|
2023-03-18 16:11:22 -07:00
|
|
|
# -------------------------------------
|
2023-03-27 09:07:44 -07:00
|
|
|
# get H and L image
|
2023-03-18 16:11:22 -07:00
|
|
|
# -------------------------------------
|
|
|
|
H_path = self.paths_H[index]
|
2023-03-27 09:07:44 -07:00
|
|
|
L_path = self.paths_L[index]
|
|
|
|
|
|
|
|
H_file, L_file = H_path.split('/')[-1], L_path.split('/')[-1]
|
|
|
|
H_name, L_name = H_file.split('.')[0], L_file.split('.')[0]
|
|
|
|
|
|
|
|
assert H_name==L_name, 'Both high and low quality images MUST have same name'
|
|
|
|
|
|
|
|
img_H = util.imread_uint(H_path, self.n_channels_datasetload)
|
2023-03-18 16:11:22 -07:00
|
|
|
|
2023-03-27 09:07:44 -07:00
|
|
|
|
|
|
|
img_L = util.imread_uint(L_path, self.n_channels_datasetload)[:,:,:2]
|
2023-03-18 16:11:22 -07:00
|
|
|
|
2023-04-02 16:02:12 -07:00
|
|
|
# Get module of complex image, stretch and to uint8
|
2023-04-06 15:31:46 -07:00
|
|
|
# img_L = img_L.astype('float')
|
|
|
|
# img_L = np.abs(img_L[:,:,0]+1j*img_L[:,:,1])
|
|
|
|
# img_L = 255*(img_L - img_L.min())/(img_L.max() - img_L.min())
|
|
|
|
# img_L = img_L.astype('uint8')
|
2023-04-02 16:02:12 -07:00
|
|
|
|
2023-03-18 16:11:22 -07:00
|
|
|
if self.opt['phase'] == 'train':
|
|
|
|
"""
|
|
|
|
# --------------------------------
|
|
|
|
# get L/H/M patch pairs
|
|
|
|
# --------------------------------
|
|
|
|
"""
|
|
|
|
H, W = img_H.shape[:2]
|
2023-04-17 18:06:48 -07:00
|
|
|
|
|
|
|
if self.use_all_patches:
|
|
|
|
|
|
|
|
# ---------------------------------
|
|
|
|
# Start or continue image patching
|
|
|
|
# ---------------------------------
|
|
|
|
img_patch_index = index % self.num_patches_per_image # Resets to 0 every time index overflows num_patches
|
|
|
|
h_index = img_patch_index // W
|
|
|
|
w_index = img_patch_index % W
|
|
|
|
patch_h = min(self.patch_size * h_index, H - self.patch_size)
|
|
|
|
patch_w = min(self.patch_size * w_index, W - self.patch_size)
|
|
|
|
|
|
|
|
else:
|
|
|
|
# ---------------------------------
|
|
|
|
# randomly crop the patch
|
|
|
|
# ---------------------------------
|
|
|
|
patch_h = random.randint(0, max(0, H - self.patch_size))
|
|
|
|
patch_w = random.randint(0, max(0, W - self.patch_size))
|
2023-03-20 19:00:33 -07:00
|
|
|
|
|
|
|
# Ground-truth as channels mean
|
2023-04-17 18:06:48 -07:00
|
|
|
patch_H = np.mean(img_H[patch_h:patch_h + self.patch_size, patch_w:patch_w + self.patch_size, :],axis=2)
|
2023-03-26 08:18:46 -07:00
|
|
|
|
2023-03-23 19:27:58 -07:00
|
|
|
# Get the patch from the simulation
|
2023-04-17 18:06:48 -07:00
|
|
|
patch_L = img_L[patch_h:patch_h + self.patch_size, patch_w:patch_w + self.patch_size,:]
|
2023-03-18 16:11:22 -07:00
|
|
|
|
|
|
|
# ---------------------------------
|
|
|
|
# HWC to CHW, numpy(uint) to tensor
|
|
|
|
# ---------------------------------
|
|
|
|
img_H = util.uint2tensor3(patch_H)
|
2023-03-20 19:00:33 -07:00
|
|
|
img_L = util.uint2tensor3(patch_L)
|
2023-03-18 16:11:22 -07:00
|
|
|
|
|
|
|
# ---------------------------------
|
|
|
|
# get noise level
|
|
|
|
# ---------------------------------
|
|
|
|
# noise_level = torch.FloatTensor([np.random.randint(self.sigma_min, self.sigma_max)])/255.0
|
|
|
|
noise_level = torch.FloatTensor([np.random.uniform(self.sigma_min, self.sigma_max)])/255.0
|
|
|
|
|
|
|
|
# ---------------------------------
|
|
|
|
# add noise
|
|
|
|
# ---------------------------------
|
|
|
|
noise = torch.randn(img_L.size()).mul_(noise_level).float()
|
|
|
|
img_L.add_(noise)
|
|
|
|
|
|
|
|
else:
|
|
|
|
"""
|
|
|
|
# --------------------------------
|
|
|
|
# get L/H/sigma image pairs
|
|
|
|
# --------------------------------
|
|
|
|
"""
|
2023-03-23 19:27:58 -07:00
|
|
|
|
|
|
|
# Ground-truth as mean value of RGB channels
|
|
|
|
img_H = np.mean(img_H,axis=2)
|
2023-03-27 09:07:44 -07:00
|
|
|
img_H = img_H[:,:,np.newaxis]
|
2023-03-18 16:11:22 -07:00
|
|
|
img_H = util.uint2single(img_H)
|
2023-03-23 19:27:58 -07:00
|
|
|
|
2023-04-07 15:37:47 -07:00
|
|
|
img_L = util.uint2single(img_L)
|
2023-03-29 15:33:45 -07:00
|
|
|
|
2023-03-18 16:11:22 -07:00
|
|
|
np.random.seed(seed=0)
|
2023-03-27 09:07:44 -07:00
|
|
|
img_L = img_L + np.random.normal(0, self.sigma_test/255.0, img_L.shape)
|
2023-03-18 16:11:22 -07:00
|
|
|
noise_level = torch.FloatTensor([self.sigma_test/255.0])
|
|
|
|
|
|
|
|
# ---------------------------------
|
|
|
|
# L/H image pairs
|
|
|
|
# ---------------------------------
|
|
|
|
img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L)
|
|
|
|
|
|
|
|
noise_level = noise_level.unsqueeze(1).unsqueeze(1)
|
|
|
|
|
|
|
|
|
|
|
|
return {'L': img_L, 'H': img_H, 'C': noise_level, 'L_path': L_path, 'H_path': H_path}
|
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
return len(self.paths_H)
|