apdrawing / APDrawingGAN2 /options /base_options.py
hylee's picture
init
06aeeda
raw
history blame
No virus
11.4 kB
import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--dataroot', type=str, default='', help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--loadSize', type=int, default=512, help='scale images to this size')
parser.add_argument('--fineSize', type=int, default=512, help='then crop to this size')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='selects model to use for netD')
parser.add_argument('--netG', type=str, default='unet_256', help='selects model to use for netG')
parser.add_argument('--nnG', type=int, default=9, help='specify nblock for resnet_nblocks, ndown for unet for unet_ndown')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--gpu_ids', type=str, default='', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--gpu_ids_p', type=str, default='', help='gpu ids for pretrained auxiliary models: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--dataset_mode', type=str, default='aligned', help='chooses how datasets are loaded. [unaligned | aligned | single]')
parser.add_argument('--model', type=str, default='apdrawing',
help='chooses which model to use. cycle_gan, pix2pix, test, autoencoder')
parser.add_argument('--use_local', action='store_true', help='use local part network')
parser.add_argument('--lm_dir', type=str, default='dataset/landmark/', help='path to facial landmarks')
parser.add_argument('--nose_ae', action='store_true', help='use nose autoencoder')
parser.add_argument('--others_ae', action='store_true', help='use autoencoder for eyes and mouth too')
parser.add_argument('--nose_ae_net', type=str, default='autoencoderfc', help='net for nose autoencoder [autoencoder | autoencoderfc]')
parser.add_argument('--comb_op', type=int, default=1, help='use min-pooling(1) or max-pooling(0) for overlapping regions')
parser.add_argument('--hair_local', action='store_true', help='add hair part')
parser.add_argument('--bg_local', action='store_true', help='use background mask to seperate background')
parser.add_argument('--bg_dir', default='dataset/mask/bg/', type=str, help='choose bg_dir')
parser.add_argument('--region_enm', type=int, default=0, help='region type for eyes nose mouth: 0 for rectangle, 1 for campact mask in rectangle, 2 for mask no rectangle (1,2 must have compactmask, 0 use compactmask for AE)')
parser.add_argument('--soft_border', type=int, default=0, help='use mask with soft border')
parser.add_argument('--EYE_H', type=int, default=40, help='EYE_H')
parser.add_argument('--EYE_W', type=int, default=56, help='EYE_W')
parser.add_argument('--NOSE_H', type=int, default=48, help='NOSE_H')
parser.add_argument('--NOSE_W', type=int, default=48, help='NOSE_W')
parser.add_argument('--MOUTH_H', type=int, default=40, help='MOUTH_H')
parser.add_argument('--MOUTH_W', type=int, default=64, help='MOUTH_W')
parser.add_argument('--average_pos', action='store_true', help='use avg pos in partCombiner')
parser.add_argument('--combiner_type', type=str, default='combiner', help='choose combiner type')
parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--auxiliary_root', type=str, default='auxiliary', help='auxiliary model folder')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}')
# compact mask
parser.add_argument('--compactmask', action='store_true', help='use compact mask as input and apply to loss')# "when you calculate the (ae) loss, you should also restrict to nose pixels"
parser.add_argument('--cmask_dir', type=str, default='dataset/mask/', help='compact mask directory')
parser.add_argument('--ae_latentno', type=int, default=1024 ,help='latent space dim for pretrained NOSE AEwithfc')
parser.add_argument('--ae_latentmo', type=int, default=1024 ,help='latent space dim for pretrained MOUTH AEwithfc')
parser.add_argument('--ae_latenteye', type=int, default=1024 ,help='latent space dim for pretrained EYEL/EYER AEwithfc')
parser.add_argument('--ae_small', type=int, default=0 ,help='use latent dim smaller than default 1024 in 4 AEs')
# below for autoencoder
parser.add_argument('--ae_latent', type=int, default=1024 ,help='latent space dim for autoencoderfc')
parser.add_argument('--ae_multiple', type=float, default=2 ,help='filter number change in ae encoder')
parser.add_argument('--ae_h', type=int, default=96 ,help='ae input h')
parser.add_argument('--ae_w', type=int, default=96 ,help='ae input w')
parser.add_argument('--ae_region', type=str, default='nose' ,help='autoencoder for which region')
parser.add_argument('--no_ae', action='store_true', help='no ae')
self.initialized = True
return parser
def gather_options(self):
# initialize parser with basic options
if not self.initialized:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with the new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self, print=True):
opt = self.gather_options()
if opt.use_local:
opt.loadSize = opt.fineSize
if opt.region_enm in [1,2]:
opt.compactmask = True
if opt.nose_ae or opt.others_ae:
opt.compactmask = True
if opt.ae_latentno < 1024 and opt.ae_latentmo < 1024 and opt.ae_latenteye < 1024:
opt.ae_small = 1
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
if self.isTrain and opt.pretrain:
opt.nose_ae = False
opt.others_ae = False
opt.compactmask = False
opt.chamfer_loss = False
if not self.isTrain and opt.pretrain:
opt.nose_ae = False
opt.others_ae = False
opt.compactmask = False
if opt.no_ae:
opt.nose_ae = False
opt.others_ae = False
opt.compactmask = False
if self.isTrain and opt.no_dtremap:
opt.dt_nonlinear = ''
opt.lambda_chamfer = 0.1
opt.lambda_chamfer2 = 0.1
if self.isTrain and opt.no_dt:
opt.chamfer_loss = False
if print:
self.print_options(opt)
# set gpu ids
opt.gpu_ids = False
# set gpu ids
opt.gpu_ids_p = False
self.opt = opt
return self.opt