Chen Zheng
commited on
Commit
·
9c116e0
1
Parent(s):
9b644e0
ICCV 2023
Browse filesFormer-commit-id: af824155fb8ed74383da1f2edf27b09dfab3e6fe
- basicsr/data/data_util.py +2 -5
- basicsr/data/paired_image_dataset.py +9 -31
- basicsr/data/single_image_dataset.py +69 -0
- basicsr/models/dat_model.py +127 -0
- datasets/README.md +3 -0
- datasets/single/.gitattributes +1 -0
- datasets/single/test_1.png +3 -0
- datasets/single/test_2.jpg +3 -0
- datasets/single/test_3.jpg +3 -0
- options/Test/test_DAT_2_x2.yml +2 -2
- options/Test/test_DAT_2_x3.yml +2 -2
- options/Test/test_DAT_2_x4.yml +2 -2
- options/Test/test_DAT_S_x2.yml +2 -2
- options/Test/test_DAT_S_x3.yml +2 -2
- options/Test/test_DAT_S_x4.yml +2 -2
- options/Test/test_DAT_x2.yml +2 -2
- options/Test/test_DAT_x3.yml +2 -2
- options/Test/test_DAT_x4.yml +2 -2
- options/Test/test_single_x2.yml +40 -0
- options/Test/test_single_x3.yml +40 -0
- options/Test/test_single_x4.yml +40 -0
- options/Train/train_DAT_2_x2.yml +1 -1
- options/Train/train_DAT_2_x3.yml +1 -1
- options/Train/train_DAT_2_x4.yml +1 -1
- options/Train/train_DAT_S_x2.yml +2 -2
- options/Train/train_DAT_S_x3.yml +1 -1
- options/Train/train_DAT_S_x4.yml +1 -1
- options/Train/train_DAT_x2.yml +1 -1
- options/Train/train_DAT_x3.yml +1 -1
- options/Train/train_DAT_x4.yml +1 -1
basicsr/data/data_util.py
CHANGED
|
@@ -162,7 +162,7 @@ def paired_paths_from_meta_info_file(folders, keys, meta_info_file, filename_tmp
|
|
| 162 |
return paths
|
| 163 |
|
| 164 |
|
| 165 |
-
def paired_paths_from_folder(folders, keys, filename_tmpl
|
| 166 |
"""Generate paired paths from folders.
|
| 167 |
|
| 168 |
Args:
|
|
@@ -190,10 +190,7 @@ def paired_paths_from_folder(folders, keys, filename_tmpl, task):
|
|
| 190 |
paths = []
|
| 191 |
for gt_path in gt_paths:
|
| 192 |
basename, ext = osp.splitext(osp.basename(gt_path))
|
| 193 |
-
|
| 194 |
-
input_name = f'{filename_tmpl.format(basename)}.jpg'
|
| 195 |
-
else:
|
| 196 |
-
input_name = f'{filename_tmpl.format(basename)}{ext}'
|
| 197 |
input_path = osp.join(input_folder, input_name)
|
| 198 |
assert input_name in input_paths, f'{input_name} is not in {input_key}_paths.'
|
| 199 |
gt_path = osp.join(gt_folder, gt_path)
|
|
|
|
| 162 |
return paths
|
| 163 |
|
| 164 |
|
| 165 |
+
def paired_paths_from_folder(folders, keys, filename_tmpl):
|
| 166 |
"""Generate paired paths from folders.
|
| 167 |
|
| 168 |
Args:
|
|
|
|
| 190 |
paths = []
|
| 191 |
for gt_path in gt_paths:
|
| 192 |
basename, ext = osp.splitext(osp.basename(gt_path))
|
| 193 |
+
input_name = f'{filename_tmpl.format(basename)}{ext}'
|
|
|
|
|
|
|
|
|
|
| 194 |
input_path = osp.join(input_folder, input_name)
|
| 195 |
assert input_name in input_paths, f'{input_name} is not in {input_key}_paths.'
|
| 196 |
gt_path = osp.join(gt_folder, gt_path)
|
basicsr/data/paired_image_dataset.py
CHANGED
|
@@ -46,7 +46,6 @@ class PairedImageDataset(data.Dataset):
|
|
| 46 |
self.file_client = None
|
| 47 |
self.io_backend_opt = opt['io_backend']
|
| 48 |
self.mean = opt['mean'] if 'mean' in opt else None
|
| 49 |
-
self.task = opt['task'] if 'task' in opt else None
|
| 50 |
self.std = opt['std'] if 'std' in opt else None
|
| 51 |
|
| 52 |
self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq']
|
|
@@ -63,7 +62,7 @@ class PairedImageDataset(data.Dataset):
|
|
| 63 |
self.paths = paired_paths_from_meta_info_file([self.lq_folder, self.gt_folder], ['lq', 'gt'],
|
| 64 |
self.opt['meta_info_file'], self.filename_tmpl)
|
| 65 |
else:
|
| 66 |
-
self.paths = paired_paths_from_folder([self.lq_folder, self.gt_folder], ['lq', 'gt'], self.filename_tmpl
|
| 67 |
|
| 68 |
def __getitem__(self, index):
|
| 69 |
if self.file_client is None:
|
|
@@ -72,35 +71,14 @@ class PairedImageDataset(data.Dataset):
|
|
| 72 |
scale = self.opt['scale']
|
| 73 |
|
| 74 |
# Load gt and lq images. Dimension order: HWC; channel order: BGR;
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
img_lq = imfrombytes(img_bytes, flag='grayscale', float32=False)
|
| 84 |
-
img_gt = np.expand_dims(img_gt, axis=2).astype(np.float32) / 255.
|
| 85 |
-
img_lq = np.expand_dims(img_lq, axis=2).astype(np.float32) / 255.
|
| 86 |
-
|
| 87 |
-
elif self.task == 'Color-DN':
|
| 88 |
-
gt_path = self.paths[index]['gt_path']
|
| 89 |
-
lq_path = gt_path
|
| 90 |
-
img_bytes = self.file_client.get(gt_path, 'gt')
|
| 91 |
-
img_gt = imfrombytes(img_bytes, float32=True)
|
| 92 |
-
if self.opt['phase'] != 'train':
|
| 93 |
-
np.random.seed(seed=0)
|
| 94 |
-
img_lq = img_gt + np.random.normal(0, self.noise/255., img_gt.shape)
|
| 95 |
-
|
| 96 |
-
else:
|
| 97 |
-
# image range: [0, 1], float32., H W 3
|
| 98 |
-
gt_path = self.paths[index]['gt_path']
|
| 99 |
-
img_bytes = self.file_client.get(gt_path, 'gt')
|
| 100 |
-
img_gt = imfrombytes(img_bytes, float32=True)
|
| 101 |
-
lq_path = self.paths[index]['lq_path']
|
| 102 |
-
img_bytes = self.file_client.get(lq_path, 'lq')
|
| 103 |
-
img_lq = imfrombytes(img_bytes, float32=True)
|
| 104 |
|
| 105 |
# augmentation for training
|
| 106 |
if self.opt['phase'] == 'train':
|
|
|
|
| 46 |
self.file_client = None
|
| 47 |
self.io_backend_opt = opt['io_backend']
|
| 48 |
self.mean = opt['mean'] if 'mean' in opt else None
|
|
|
|
| 49 |
self.std = opt['std'] if 'std' in opt else None
|
| 50 |
|
| 51 |
self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq']
|
|
|
|
| 62 |
self.paths = paired_paths_from_meta_info_file([self.lq_folder, self.gt_folder], ['lq', 'gt'],
|
| 63 |
self.opt['meta_info_file'], self.filename_tmpl)
|
| 64 |
else:
|
| 65 |
+
self.paths = paired_paths_from_folder([self.lq_folder, self.gt_folder], ['lq', 'gt'], self.filename_tmpl)
|
| 66 |
|
| 67 |
def __getitem__(self, index):
|
| 68 |
if self.file_client is None:
|
|
|
|
| 71 |
scale = self.opt['scale']
|
| 72 |
|
| 73 |
# Load gt and lq images. Dimension order: HWC; channel order: BGR;
|
| 74 |
+
|
| 75 |
+
# image range: [0, 1], float32., H W 3
|
| 76 |
+
gt_path = self.paths[index]['gt_path']
|
| 77 |
+
img_bytes = self.file_client.get(gt_path, 'gt')
|
| 78 |
+
img_gt = imfrombytes(img_bytes, float32=True)
|
| 79 |
+
lq_path = self.paths[index]['lq_path']
|
| 80 |
+
img_bytes = self.file_client.get(lq_path, 'lq')
|
| 81 |
+
img_lq = imfrombytes(img_bytes, float32=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
# augmentation for training
|
| 84 |
if self.opt['phase'] == 'train':
|
basicsr/data/single_image_dataset.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from os import path as osp
|
| 2 |
+
from torch.utils import data as data
|
| 3 |
+
from torchvision.transforms.functional import normalize
|
| 4 |
+
|
| 5 |
+
from basicsr.data.data_util import paths_from_lmdb
|
| 6 |
+
from basicsr.utils import FileClient, imfrombytes, img2tensor, scandir
|
| 7 |
+
from basicsr.utils.matlab_functions import rgb2ycbcr
|
| 8 |
+
from basicsr.utils.registry import DATASET_REGISTRY
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@DATASET_REGISTRY.register()
|
| 12 |
+
class SingleImageDataset(data.Dataset):
|
| 13 |
+
"""Read only lq images in the test phase.
|
| 14 |
+
|
| 15 |
+
Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc).
|
| 16 |
+
|
| 17 |
+
There are two modes:
|
| 18 |
+
1. 'meta_info_file': Use meta information file to generate paths.
|
| 19 |
+
2. 'folder': Scan folders to generate paths.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
opt (dict): Config for train datasets. It contains the following keys:
|
| 23 |
+
dataroot_lq (str): Data root path for lq.
|
| 24 |
+
meta_info_file (str): Path for meta information file.
|
| 25 |
+
io_backend (dict): IO backend type and other kwarg.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
def __init__(self, opt):
|
| 29 |
+
super(SingleImageDataset, self).__init__()
|
| 30 |
+
self.opt = opt
|
| 31 |
+
# file client (io backend)
|
| 32 |
+
self.file_client = None
|
| 33 |
+
self.io_backend_opt = opt['io_backend']
|
| 34 |
+
self.mean = opt['mean'] if 'mean' in opt else None
|
| 35 |
+
self.std = opt['std'] if 'std' in opt else None
|
| 36 |
+
self.lq_folder = opt['dataroot_lq']
|
| 37 |
+
|
| 38 |
+
if self.io_backend_opt['type'] == 'lmdb':
|
| 39 |
+
self.io_backend_opt['db_paths'] = [self.lq_folder]
|
| 40 |
+
self.io_backend_opt['client_keys'] = ['lq']
|
| 41 |
+
self.paths = paths_from_lmdb(self.lq_folder)
|
| 42 |
+
elif 'meta_info_file' in self.opt:
|
| 43 |
+
with open(self.opt['meta_info_file'], 'r') as fin:
|
| 44 |
+
self.paths = [osp.join(self.lq_folder, line.rstrip().split(' ')[0]) for line in fin]
|
| 45 |
+
else:
|
| 46 |
+
self.paths = sorted(list(scandir(self.lq_folder, full_path=True)))
|
| 47 |
+
|
| 48 |
+
def __getitem__(self, index):
|
| 49 |
+
if self.file_client is None:
|
| 50 |
+
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
|
| 51 |
+
|
| 52 |
+
# load lq image
|
| 53 |
+
lq_path = self.paths[index]
|
| 54 |
+
img_bytes = self.file_client.get(lq_path, 'lq')
|
| 55 |
+
img_lq = imfrombytes(img_bytes, float32=True)
|
| 56 |
+
|
| 57 |
+
# color space transform
|
| 58 |
+
if 'color' in self.opt and self.opt['color'] == 'y':
|
| 59 |
+
img_lq = rgb2ycbcr(img_lq, y_only=True)[..., None]
|
| 60 |
+
|
| 61 |
+
# BGR to RGB, HWC to CHW, numpy to tensor
|
| 62 |
+
img_lq = img2tensor(img_lq, bgr2rgb=True, float32=True)
|
| 63 |
+
# normalize
|
| 64 |
+
if self.mean is not None or self.std is not None:
|
| 65 |
+
normalize(img_lq, self.mean, self.std, inplace=True)
|
| 66 |
+
return {'lq': img_lq, 'lq_path': lq_path}
|
| 67 |
+
|
| 68 |
+
def __len__(self):
|
| 69 |
+
return len(self.paths)
|
basicsr/models/dat_model.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch.nn import functional as F
|
| 3 |
+
|
| 4 |
+
from basicsr.utils.registry import MODEL_REGISTRY
|
| 5 |
+
from basicsr.models.sr_model import SRModel
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@MODEL_REGISTRY.register()
|
| 9 |
+
class DATModle(SRModel):
|
| 10 |
+
|
| 11 |
+
def test(self):
|
| 12 |
+
self.use_chop = self.opt['val']['use_chop'] if 'use_chop' in self.opt['val'] else False
|
| 13 |
+
if not self.use_chop:
|
| 14 |
+
if hasattr(self, 'net_g_ema'):
|
| 15 |
+
self.net_g_ema.eval()
|
| 16 |
+
with torch.no_grad():
|
| 17 |
+
self.output = self.net_g_ema(self.lq)
|
| 18 |
+
else:
|
| 19 |
+
self.net_g.eval()
|
| 20 |
+
with torch.no_grad():
|
| 21 |
+
self.output = self.net_g(self.lq)
|
| 22 |
+
self.net_g.train()
|
| 23 |
+
|
| 24 |
+
# test by partitioning
|
| 25 |
+
else:
|
| 26 |
+
_, C, h, w = self.lq.size()
|
| 27 |
+
split_token_h = h // 200 + 1 # number of horizontal cut sections
|
| 28 |
+
split_token_w = w // 200 + 1 # number of vertical cut sections
|
| 29 |
+
|
| 30 |
+
patch_size_tmp_h = split_token_h
|
| 31 |
+
patch_size_tmp_w = split_token_w
|
| 32 |
+
|
| 33 |
+
# padding
|
| 34 |
+
mod_pad_h, mod_pad_w = 0, 0
|
| 35 |
+
if h % patch_size_tmp_h != 0:
|
| 36 |
+
mod_pad_h = patch_size_tmp_h - h % patch_size_tmp_h
|
| 37 |
+
if w % patch_size_tmp_w != 0:
|
| 38 |
+
mod_pad_w = patch_size_tmp_w - w % patch_size_tmp_w
|
| 39 |
+
|
| 40 |
+
img = self.lq
|
| 41 |
+
img = torch.cat([img, torch.flip(img, [2])], 2)[:, :, :h+mod_pad_h, :]
|
| 42 |
+
img = torch.cat([img, torch.flip(img, [3])], 3)[:, :, :, :w+mod_pad_w]
|
| 43 |
+
|
| 44 |
+
_, _, H, W = img.size()
|
| 45 |
+
split_h = H // split_token_h # height of each partition
|
| 46 |
+
split_w = W // split_token_w # width of each partition
|
| 47 |
+
|
| 48 |
+
# overlapping
|
| 49 |
+
shave_h = 16
|
| 50 |
+
shave_w = 16
|
| 51 |
+
scale = self.opt.get('scale', 1)
|
| 52 |
+
ral = H // split_h
|
| 53 |
+
row = W // split_w
|
| 54 |
+
slices = [] # list of partition borders
|
| 55 |
+
for i in range(ral):
|
| 56 |
+
for j in range(row):
|
| 57 |
+
if i == 0 and i == ral - 1:
|
| 58 |
+
top = slice(i * split_h, (i + 1) * split_h)
|
| 59 |
+
elif i == 0:
|
| 60 |
+
top = slice(i*split_h, (i+1)*split_h+shave_h)
|
| 61 |
+
elif i == ral - 1:
|
| 62 |
+
top = slice(i*split_h-shave_h, (i+1)*split_h)
|
| 63 |
+
else:
|
| 64 |
+
top = slice(i*split_h-shave_h, (i+1)*split_h+shave_h)
|
| 65 |
+
if j == 0 and j == row - 1:
|
| 66 |
+
left = slice(j*split_w, (j+1)*split_w)
|
| 67 |
+
elif j == 0:
|
| 68 |
+
left = slice(j*split_w, (j+1)*split_w+shave_w)
|
| 69 |
+
elif j == row - 1:
|
| 70 |
+
left = slice(j*split_w-shave_w, (j+1)*split_w)
|
| 71 |
+
else:
|
| 72 |
+
left = slice(j*split_w-shave_w, (j+1)*split_w+shave_w)
|
| 73 |
+
temp = (top, left)
|
| 74 |
+
slices.append(temp)
|
| 75 |
+
img_chops = [] # list of partitions
|
| 76 |
+
for temp in slices:
|
| 77 |
+
top, left = temp
|
| 78 |
+
img_chops.append(img[..., top, left])
|
| 79 |
+
if hasattr(self, 'net_g_ema'):
|
| 80 |
+
self.net_g_ema.eval()
|
| 81 |
+
with torch.no_grad():
|
| 82 |
+
outputs = []
|
| 83 |
+
for chop in img_chops:
|
| 84 |
+
out = self.net_g_ema(chop) # image processing of each partition
|
| 85 |
+
outputs.append(out)
|
| 86 |
+
_img = torch.zeros(1, C, H * scale, W * scale)
|
| 87 |
+
# merge
|
| 88 |
+
for i in range(ral):
|
| 89 |
+
for j in range(row):
|
| 90 |
+
top = slice(i * split_h * scale, (i + 1) * split_h * scale)
|
| 91 |
+
left = slice(j * split_w * scale, (j + 1) * split_w * scale)
|
| 92 |
+
if i == 0:
|
| 93 |
+
_top = slice(0, split_h * scale)
|
| 94 |
+
else:
|
| 95 |
+
_top = slice(shave_h*scale, (shave_h+split_h)*scale)
|
| 96 |
+
if j == 0:
|
| 97 |
+
_left = slice(0, split_w*scale)
|
| 98 |
+
else:
|
| 99 |
+
_left = slice(shave_w*scale, (shave_w+split_w)*scale)
|
| 100 |
+
_img[..., top, left] = outputs[i * row + j][..., _top, _left]
|
| 101 |
+
self.output = _img
|
| 102 |
+
else:
|
| 103 |
+
self.net_g.eval()
|
| 104 |
+
with torch.no_grad():
|
| 105 |
+
outputs = []
|
| 106 |
+
for chop in img_chops:
|
| 107 |
+
out = self.net_g(chop) # image processing of each partition
|
| 108 |
+
outputs.append(out)
|
| 109 |
+
_img = torch.zeros(1, C, H * scale, W * scale)
|
| 110 |
+
# merge
|
| 111 |
+
for i in range(ral):
|
| 112 |
+
for j in range(row):
|
| 113 |
+
top = slice(i * split_h * scale, (i + 1) * split_h * scale)
|
| 114 |
+
left = slice(j * split_w * scale, (j + 1) * split_w * scale)
|
| 115 |
+
if i == 0:
|
| 116 |
+
_top = slice(0, split_h * scale)
|
| 117 |
+
else:
|
| 118 |
+
_top = slice(shave_h * scale, (shave_h + split_h) * scale)
|
| 119 |
+
if j == 0:
|
| 120 |
+
_left = slice(0, split_w * scale)
|
| 121 |
+
else:
|
| 122 |
+
_left = slice(shave_w * scale, (shave_w + split_w) * scale)
|
| 123 |
+
_img[..., top, left] = outputs[i * row + j][..., _top, _left]
|
| 124 |
+
self.output = _img
|
| 125 |
+
self.net_g.train()
|
| 126 |
+
_, _, h, w = self.output.size()
|
| 127 |
+
self.output = self.output[:, :, 0:h - mod_pad_h * scale, 0:w - mod_pad_w * scale]
|
datasets/README.md
CHANGED
|
@@ -2,6 +2,9 @@ For training and testing, the directory structure is as follows:
|
|
| 2 |
|
| 3 |
```shell
|
| 4 |
|-- datasets
|
|
|
|
|
|
|
|
|
|
| 5 |
# train
|
| 6 |
|-- DF2K
|
| 7 |
|-- HR
|
|
|
|
| 2 |
|
| 3 |
```shell
|
| 4 |
|-- datasets
|
| 5 |
+
# test your own imgs
|
| 6 |
+
|-- single
|
| 7 |
+
|
| 8 |
# train
|
| 9 |
|-- DF2K
|
| 10 |
|-- HR
|
datasets/single/.gitattributes
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
*.{png,jpg} filter=lfs diff=lfs merge=lfs -text
|
datasets/single/test_1.png
ADDED
|
Git LFS Details
|
datasets/single/test_2.jpg
ADDED
|
Git LFS Details
|
datasets/single/test_3.jpg
ADDED
|
Git LFS Details
|
options/Test/test_DAT_2_x2.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_2_x2
|
| 3 |
-
model_type:
|
| 4 |
scale: 2
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
@@ -80,7 +80,7 @@ path:
|
|
| 80 |
val:
|
| 81 |
save_img: True
|
| 82 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 83 |
-
use_chop: False
|
| 84 |
|
| 85 |
metrics:
|
| 86 |
psnr: # metric name, can be arbitrary
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_2_x2
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 2
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
|
|
| 80 |
val:
|
| 81 |
save_img: True
|
| 82 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 83 |
+
use_chop: False # True to save memory, if img too large
|
| 84 |
|
| 85 |
metrics:
|
| 86 |
psnr: # metric name, can be arbitrary
|
options/Test/test_DAT_2_x3.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_2_x3
|
| 3 |
-
model_type:
|
| 4 |
scale: 3
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
@@ -79,7 +79,7 @@ path:
|
|
| 79 |
val:
|
| 80 |
save_img: True
|
| 81 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 82 |
-
use_chop: False
|
| 83 |
|
| 84 |
metrics:
|
| 85 |
psnr: # metric name, can be arbitrary
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_2_x3
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 3
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
|
|
| 79 |
val:
|
| 80 |
save_img: True
|
| 81 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 82 |
+
use_chop: False # True to save memory, if img too large
|
| 83 |
|
| 84 |
metrics:
|
| 85 |
psnr: # metric name, can be arbitrary
|
options/Test/test_DAT_2_x4.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_2_x4
|
| 3 |
-
model_type:
|
| 4 |
scale: 4
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
@@ -80,7 +80,7 @@ path:
|
|
| 80 |
val:
|
| 81 |
save_img: True
|
| 82 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 83 |
-
use_chop: False
|
| 84 |
|
| 85 |
metrics:
|
| 86 |
psnr: # metric name, can be arbitrary
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_2_x4
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 4
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
|
|
| 80 |
val:
|
| 81 |
save_img: True
|
| 82 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 83 |
+
use_chop: False # True to save memory, if img too large
|
| 84 |
|
| 85 |
metrics:
|
| 86 |
psnr: # metric name, can be arbitrary
|
options/Test/test_DAT_S_x2.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_S_x2
|
| 3 |
-
model_type:
|
| 4 |
scale: 2
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
@@ -80,7 +80,7 @@ path:
|
|
| 80 |
val:
|
| 81 |
save_img: True
|
| 82 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 83 |
-
use_chop: False
|
| 84 |
|
| 85 |
metrics:
|
| 86 |
psnr: # metric name, can be arbitrary
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_S_x2
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 2
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
|
|
| 80 |
val:
|
| 81 |
save_img: True
|
| 82 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 83 |
+
use_chop: False # True to save memory, if img too large
|
| 84 |
|
| 85 |
metrics:
|
| 86 |
psnr: # metric name, can be arbitrary
|
options/Test/test_DAT_S_x3.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_S_x3
|
| 3 |
-
model_type:
|
| 4 |
scale: 3
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
@@ -79,7 +79,7 @@ path:
|
|
| 79 |
val:
|
| 80 |
save_img: True
|
| 81 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 82 |
-
use_chop: False
|
| 83 |
|
| 84 |
metrics:
|
| 85 |
psnr: # metric name, can be arbitrary
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_S_x3
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 3
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
|
|
| 79 |
val:
|
| 80 |
save_img: True
|
| 81 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 82 |
+
use_chop: False # True to save memory, if img too large
|
| 83 |
|
| 84 |
metrics:
|
| 85 |
psnr: # metric name, can be arbitrary
|
options/Test/test_DAT_S_x4.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_S_x4
|
| 3 |
-
model_type:
|
| 4 |
scale: 4
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
@@ -80,7 +80,7 @@ path:
|
|
| 80 |
val:
|
| 81 |
save_img: True
|
| 82 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 83 |
-
use_chop: False
|
| 84 |
|
| 85 |
metrics:
|
| 86 |
psnr: # metric name, can be arbitrary
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_S_x4
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 4
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
|
|
| 80 |
val:
|
| 81 |
save_img: True
|
| 82 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 83 |
+
use_chop: False # True to save memory, if img too large
|
| 84 |
|
| 85 |
metrics:
|
| 86 |
psnr: # metric name, can be arbitrary
|
options/Test/test_DAT_x2.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_x2
|
| 3 |
-
model_type:
|
| 4 |
scale: 2
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
@@ -80,7 +80,7 @@ path:
|
|
| 80 |
val:
|
| 81 |
save_img: True
|
| 82 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 83 |
-
use_chop: False
|
| 84 |
|
| 85 |
metrics:
|
| 86 |
psnr: # metric name, can be arbitrary
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_x2
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 2
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
|
|
| 80 |
val:
|
| 81 |
save_img: True
|
| 82 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 83 |
+
use_chop: False # True to save memory, if img too large
|
| 84 |
|
| 85 |
metrics:
|
| 86 |
psnr: # metric name, can be arbitrary
|
options/Test/test_DAT_x3.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_x3
|
| 3 |
-
model_type:
|
| 4 |
scale: 3
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
@@ -79,7 +79,7 @@ path:
|
|
| 79 |
val:
|
| 80 |
save_img: True
|
| 81 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 82 |
-
use_chop: False
|
| 83 |
|
| 84 |
metrics:
|
| 85 |
psnr: # metric name, can be arbitrary
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_x3
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 3
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
|
|
| 79 |
val:
|
| 80 |
save_img: True
|
| 81 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 82 |
+
use_chop: False # True to save memory, if img too large
|
| 83 |
|
| 84 |
metrics:
|
| 85 |
psnr: # metric name, can be arbitrary
|
options/Test/test_DAT_x4.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_x4
|
| 3 |
-
model_type:
|
| 4 |
scale: 4
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
@@ -80,7 +80,7 @@ path:
|
|
| 80 |
val:
|
| 81 |
save_img: True
|
| 82 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 83 |
-
use_chop: False
|
| 84 |
|
| 85 |
metrics:
|
| 86 |
psnr: # metric name, can be arbitrary
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_x4
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 4
|
| 5 |
num_gpu: 1
|
| 6 |
manual_seed: 10
|
|
|
|
| 80 |
val:
|
| 81 |
save_img: True
|
| 82 |
suffix: ~ # add suffix to saved images, if None, use exp name
|
| 83 |
+
use_chop: False # True to save memory, if img too large
|
| 84 |
|
| 85 |
metrics:
|
| 86 |
psnr: # metric name, can be arbitrary
|
options/Test/test_single_x2.yml
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# general settings
|
| 2 |
+
name: test_single_x2
|
| 3 |
+
model_type: DATModle
|
| 4 |
+
scale: 2
|
| 5 |
+
num_gpu: 1
|
| 6 |
+
manual_seed: 10
|
| 7 |
+
|
| 8 |
+
datasets:
|
| 9 |
+
test_1: # the 1st test dataset
|
| 10 |
+
name: Single
|
| 11 |
+
type: SingleImageDataset
|
| 12 |
+
dataroot_lq: datasets/single
|
| 13 |
+
io_backend:
|
| 14 |
+
type: disk
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# network structures
|
| 18 |
+
network_g:
|
| 19 |
+
type: DAT
|
| 20 |
+
upscale: 2
|
| 21 |
+
in_chans: 3
|
| 22 |
+
img_size: 64
|
| 23 |
+
img_range: 1.
|
| 24 |
+
split_size: [8,32]
|
| 25 |
+
depth: [6,6,6,6,6,6]
|
| 26 |
+
embed_dim: 180
|
| 27 |
+
num_heads: [6,6,6,6,6,6]
|
| 28 |
+
expansion_factor: 4
|
| 29 |
+
resi_connection: '1conv'
|
| 30 |
+
|
| 31 |
+
# path
|
| 32 |
+
path:
|
| 33 |
+
pretrain_network_g: experiments/pretrained_models/DAT/DAT_x2.pth
|
| 34 |
+
strict_load_g: True
|
| 35 |
+
|
| 36 |
+
# validation settings
|
| 37 |
+
val:
|
| 38 |
+
save_img: True
|
| 39 |
+
suffix: 'x2' # add suffix to saved images, if None, use exp name
|
| 40 |
+
use_chop: False # True to save memory, if img too large
|
options/Test/test_single_x3.yml
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# general settings
|
| 2 |
+
name: test_single_x3
|
| 3 |
+
model_type: DATModle
|
| 4 |
+
scale: 3
|
| 5 |
+
num_gpu: 1
|
| 6 |
+
manual_seed: 10
|
| 7 |
+
|
| 8 |
+
datasets:
|
| 9 |
+
test_1: # the 1st test dataset
|
| 10 |
+
name: Single
|
| 11 |
+
type: SingleImageDataset
|
| 12 |
+
dataroot_lq: datasets/single
|
| 13 |
+
io_backend:
|
| 14 |
+
type: disk
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# network structures
|
| 18 |
+
network_g:
|
| 19 |
+
type: DAT
|
| 20 |
+
upscale: 3
|
| 21 |
+
in_chans: 3
|
| 22 |
+
img_size: 64
|
| 23 |
+
img_range: 1.
|
| 24 |
+
split_size: [8,32]
|
| 25 |
+
depth: [6,6,6,6,6,6]
|
| 26 |
+
embed_dim: 180
|
| 27 |
+
num_heads: [6,6,6,6,6,6]
|
| 28 |
+
expansion_factor: 4
|
| 29 |
+
resi_connection: '1conv'
|
| 30 |
+
|
| 31 |
+
# path
|
| 32 |
+
path:
|
| 33 |
+
pretrain_network_g: experiments/pretrained_models/DAT/DAT_x3.pth
|
| 34 |
+
strict_load_g: True
|
| 35 |
+
|
| 36 |
+
# validation settings
|
| 37 |
+
val:
|
| 38 |
+
save_img: True
|
| 39 |
+
suffix: 'x3' # add suffix to saved images, if None, use exp name
|
| 40 |
+
use_chop: False # True to save memory, if img too large
|
options/Test/test_single_x4.yml
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# general settings
|
| 2 |
+
name: test_single_x4
|
| 3 |
+
model_type: DATModle
|
| 4 |
+
scale: 4
|
| 5 |
+
num_gpu: 1
|
| 6 |
+
manual_seed: 10
|
| 7 |
+
|
| 8 |
+
datasets:
|
| 9 |
+
test_1: # the 1st test dataset
|
| 10 |
+
name: Single
|
| 11 |
+
type: SingleImageDataset
|
| 12 |
+
dataroot_lq: datasets/single
|
| 13 |
+
io_backend:
|
| 14 |
+
type: disk
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# network structures
|
| 18 |
+
network_g:
|
| 19 |
+
type: DAT
|
| 20 |
+
upscale: 4
|
| 21 |
+
in_chans: 3
|
| 22 |
+
img_size: 64
|
| 23 |
+
img_range: 1.
|
| 24 |
+
split_size: [8,32]
|
| 25 |
+
depth: [6,6,6,6,6,6]
|
| 26 |
+
embed_dim: 180
|
| 27 |
+
num_heads: [6,6,6,6,6,6]
|
| 28 |
+
expansion_factor: 4
|
| 29 |
+
resi_connection: '1conv'
|
| 30 |
+
|
| 31 |
+
# path
|
| 32 |
+
path:
|
| 33 |
+
pretrain_network_g: experiments/pretrained_models/DAT/DAT_x4.pth
|
| 34 |
+
strict_load_g: True
|
| 35 |
+
|
| 36 |
+
# validation settings
|
| 37 |
+
val:
|
| 38 |
+
save_img: True
|
| 39 |
+
suffix: 'x4' # add suffix to saved images, if None, use exp name
|
| 40 |
+
use_chop: False # True to save memory, if img too large
|
options/Train/train_DAT_2_x2.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: train_DAT_2_x2
|
| 3 |
-
model_type:
|
| 4 |
scale: 2
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: train_DAT_2_x2
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 2
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
options/Train/train_DAT_2_x3.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: train_DAT_2_x3
|
| 3 |
-
model_type:
|
| 4 |
scale: 3
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: train_DAT_2_x3
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 3
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
options/Train/train_DAT_2_x4.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_2_x4
|
| 3 |
-
model_type:
|
| 4 |
scale: 4
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_2_x4
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 4
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
options/Train/train_DAT_S_x2.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: train_DAT_S_x2
|
| 3 |
-
model_type:
|
| 4 |
scale: 2
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
|
@@ -24,7 +24,7 @@ datasets:
|
|
| 24 |
# data loader
|
| 25 |
use_shuffle: True
|
| 26 |
num_worker_per_gpu: 12
|
| 27 |
-
batch_size_per_gpu:
|
| 28 |
dataset_enlarge_ratio: 100
|
| 29 |
prefetch_mode: ~
|
| 30 |
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: train_DAT_S_x2
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 2
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
|
|
|
| 24 |
# data loader
|
| 25 |
use_shuffle: True
|
| 26 |
num_worker_per_gpu: 12
|
| 27 |
+
batch_size_per_gpu: 2
|
| 28 |
dataset_enlarge_ratio: 100
|
| 29 |
prefetch_mode: ~
|
| 30 |
|
options/Train/train_DAT_S_x3.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: train_DAT_S_x3
|
| 3 |
-
model_type:
|
| 4 |
scale: 3
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: train_DAT_S_x3
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 3
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
options/Train/train_DAT_S_x4.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_S_x4
|
| 3 |
-
model_type:
|
| 4 |
scale: 4
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_S_x4
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 4
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
options/Train/train_DAT_x2.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: train_DAT_x2
|
| 3 |
-
model_type:
|
| 4 |
scale: 2
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: train_DAT_x2
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 2
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
options/Train/train_DAT_x3.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: train_DAT_x3
|
| 3 |
-
model_type:
|
| 4 |
scale: 3
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: train_DAT_x3
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 3
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
options/Train/train_DAT_x4.yml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_x4
|
| 3 |
-
model_type:
|
| 4 |
scale: 4
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|
|
|
|
| 1 |
# general settings
|
| 2 |
name: test_DAT_x4
|
| 3 |
+
model_type: DATModle
|
| 4 |
scale: 4
|
| 5 |
num_gpu: auto
|
| 6 |
manual_seed: 10
|