Python config.train() Examples
The following are 4
code examples of config.train().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
config
, or try the search function
.
Example #1
Source File: train.py From disentangling_conditional_gans with MIT License | 4 votes |
def __init__( self, cur_nimg, training_set, lod_initial_resolution = 4, # Image resolution used at the beginning. lod_training_kimg = 600, # Thousands of real images to show before doubling the resolution. lod_transition_kimg = 600, # Thousands of real images to show when fading in new layers. minibatch_base = 16, # Maximum minibatch size, divided evenly among GPUs. minibatch_dict = {}, # Resolution-specific overrides. max_minibatch_per_gpu = {}, # Resolution-specific maximum minibatch size per GPU. G_lrate_base = 0.001, # Learning rate for the generator. G_lrate_dict = {}, # Resolution-specific overrides. D_lrate_base = 0.001, # Learning rate for the discriminator. D_lrate_dict = {}, # Resolution-specific overrides. tick_kimg_base = 160, # Default interval of progress snapshots. tick_kimg_dict = {4: 160, 8:140, 16:120, 32:100, 64:80, 128:60, 256:40, 512:20, 1024:10}): # Resolution-specific overrides. # Training phase. self.kimg = cur_nimg / 1000.0 phase_dur = lod_training_kimg + lod_transition_kimg phase_idx = int(np.floor(self.kimg / phase_dur)) if phase_dur > 0 else 0 phase_kimg = self.kimg - phase_idx * phase_dur # Level-of-detail and resolution. self.lod = training_set.resolution_log2 self.lod -= np.floor(np.log2(lod_initial_resolution)) self.lod -= phase_idx if lod_transition_kimg > 0: self.lod -= max(phase_kimg - lod_training_kimg, 0.0) / lod_transition_kimg self.lod = max(self.lod, 0.0) self.resolution = 2 ** (training_set.resolution_log2 - int(np.floor(self.lod))) # Minibatch size. self.minibatch = minibatch_dict.get(self.resolution, minibatch_base) self.minibatch -= self.minibatch % config.num_gpus if self.resolution in max_minibatch_per_gpu: self.minibatch = min(self.minibatch, max_minibatch_per_gpu[self.resolution] * config.num_gpus) # Other parameters. self.G_lrate = G_lrate_dict.get(self.resolution, G_lrate_base) self.D_lrate = D_lrate_dict.get(self.resolution, D_lrate_base) self.tick_kimg = tick_kimg_dict.get(self.resolution, tick_kimg_base) #---------------------------------------------------------------------------- # Main training script. # To run, comment/uncomment appropriate lines in config.py and launch train.py.
Example #2
Source File: train.py From transparent_latent_gan with MIT License | 4 votes |
def __init__( self, cur_nimg, training_set, lod_initial_resolution = 4, # Image resolution used at the beginning. lod_training_kimg = 600, # Thousands of real images to show before doubling the resolution. lod_transition_kimg = 600, # Thousands of real images to show when fading in new layers. minibatch_base = 16, # Maximum minibatch size, divided evenly among GPUs. minibatch_dict = {}, # Resolution-specific overrides. max_minibatch_per_gpu = {}, # Resolution-specific maximum minibatch size per GPU. G_lrate_base = 0.001, # Learning rate for the generator. G_lrate_dict = {}, # Resolution-specific overrides. D_lrate_base = 0.001, # Learning rate for the discriminator. D_lrate_dict = {}, # Resolution-specific overrides. tick_kimg_base = 160, # Default interval of progress snapshots. tick_kimg_dict = {4: 160, 8:140, 16:120, 32:100, 64:80, 128:60, 256:40, 512:20, 1024:10}): # Resolution-specific overrides. # Training phase. self.kimg = cur_nimg / 1000.0 phase_dur = lod_training_kimg + lod_transition_kimg phase_idx = int(np.floor(self.kimg / phase_dur)) if phase_dur > 0 else 0 phase_kimg = self.kimg - phase_idx * phase_dur # Level-of-detail and resolution. self.lod = training_set.resolution_log2 self.lod -= np.floor(np.log2(lod_initial_resolution)) self.lod -= phase_idx if lod_transition_kimg > 0: self.lod -= max(phase_kimg - lod_training_kimg, 0.0) / lod_transition_kimg self.lod = max(self.lod, 0.0) self.resolution = 2 ** (training_set.resolution_log2 - int(np.floor(self.lod))) # Minibatch size. self.minibatch = minibatch_dict.get(self.resolution, minibatch_base) self.minibatch -= self.minibatch % config.num_gpus if self.resolution in max_minibatch_per_gpu: self.minibatch = min(self.minibatch, max_minibatch_per_gpu[self.resolution] * config.num_gpus) # Other parameters. self.G_lrate = G_lrate_dict.get(self.resolution, G_lrate_base) self.D_lrate = D_lrate_dict.get(self.resolution, D_lrate_base) self.tick_kimg = tick_kimg_dict.get(self.resolution, tick_kimg_base) #---------------------------------------------------------------------------- # Main training script. # To run, comment/uncomment appropriate lines in config.py and launch train.py.
Example #3
Source File: train.py From higan with MIT License | 4 votes |
def __init__( self, cur_nimg, training_set, lod_initial_resolution = 4, # Image resolution used at the beginning. lod_training_kimg = 600, # Thousands of real images to show before doubling the resolution. lod_transition_kimg = 600, # Thousands of real images to show when fading in new layers. minibatch_base = 16, # Maximum minibatch size, divided evenly among GPUs. minibatch_dict = {}, # Resolution-specific overrides. max_minibatch_per_gpu = {}, # Resolution-specific maximum minibatch size per GPU. G_lrate_base = 0.001, # Learning rate for the generator. G_lrate_dict = {}, # Resolution-specific overrides. D_lrate_base = 0.001, # Learning rate for the discriminator. D_lrate_dict = {}, # Resolution-specific overrides. tick_kimg_base = 160, # Default interval of progress snapshots. tick_kimg_dict = {4: 160, 8:140, 16:120, 32:100, 64:80, 128:60, 256:40, 512:20, 1024:10}): # Resolution-specific overrides. # Training phase. self.kimg = cur_nimg / 1000.0 phase_dur = lod_training_kimg + lod_transition_kimg phase_idx = int(np.floor(self.kimg / phase_dur)) if phase_dur > 0 else 0 phase_kimg = self.kimg - phase_idx * phase_dur # Level-of-detail and resolution. self.lod = training_set.resolution_log2 self.lod -= np.floor(np.log2(lod_initial_resolution)) self.lod -= phase_idx if lod_transition_kimg > 0: self.lod -= max(phase_kimg - lod_training_kimg, 0.0) / lod_transition_kimg self.lod = max(self.lod, 0.0) self.resolution = 2 ** (training_set.resolution_log2 - int(np.floor(self.lod))) # Minibatch size. self.minibatch = minibatch_dict.get(self.resolution, minibatch_base) self.minibatch -= self.minibatch % config.num_gpus if self.resolution in max_minibatch_per_gpu: self.minibatch = min(self.minibatch, max_minibatch_per_gpu[self.resolution] * config.num_gpus) # Other parameters. self.G_lrate = G_lrate_dict.get(self.resolution, G_lrate_base) self.D_lrate = D_lrate_dict.get(self.resolution, D_lrate_base) self.tick_kimg = tick_kimg_dict.get(self.resolution, tick_kimg_base) #---------------------------------------------------------------------------- # Main training script. # To run, comment/uncomment appropriate lines in config.py and launch train.py.
Example #4
Source File: train.py From interfacegan with MIT License | 4 votes |
def __init__( self, cur_nimg, training_set, lod_initial_resolution = 4, # Image resolution used at the beginning. lod_training_kimg = 600, # Thousands of real images to show before doubling the resolution. lod_transition_kimg = 600, # Thousands of real images to show when fading in new layers. minibatch_base = 16, # Maximum minibatch size, divided evenly among GPUs. minibatch_dict = {}, # Resolution-specific overrides. max_minibatch_per_gpu = {}, # Resolution-specific maximum minibatch size per GPU. G_lrate_base = 0.001, # Learning rate for the generator. G_lrate_dict = {}, # Resolution-specific overrides. D_lrate_base = 0.001, # Learning rate for the discriminator. D_lrate_dict = {}, # Resolution-specific overrides. tick_kimg_base = 160, # Default interval of progress snapshots. tick_kimg_dict = {4: 160, 8:140, 16:120, 32:100, 64:80, 128:60, 256:40, 512:20, 1024:10}): # Resolution-specific overrides. # Training phase. self.kimg = cur_nimg / 1000.0 phase_dur = lod_training_kimg + lod_transition_kimg phase_idx = int(np.floor(self.kimg / phase_dur)) if phase_dur > 0 else 0 phase_kimg = self.kimg - phase_idx * phase_dur # Level-of-detail and resolution. self.lod = training_set.resolution_log2 self.lod -= np.floor(np.log2(lod_initial_resolution)) self.lod -= phase_idx if lod_transition_kimg > 0: self.lod -= max(phase_kimg - lod_training_kimg, 0.0) / lod_transition_kimg self.lod = max(self.lod, 0.0) self.resolution = 2 ** (training_set.resolution_log2 - int(np.floor(self.lod))) # Minibatch size. self.minibatch = minibatch_dict.get(self.resolution, minibatch_base) self.minibatch -= self.minibatch % config.num_gpus if self.resolution in max_minibatch_per_gpu: self.minibatch = min(self.minibatch, max_minibatch_per_gpu[self.resolution] * config.num_gpus) # Other parameters. self.G_lrate = G_lrate_dict.get(self.resolution, G_lrate_base) self.D_lrate = D_lrate_dict.get(self.resolution, D_lrate_base) self.tick_kimg = tick_kimg_dict.get(self.resolution, tick_kimg_base) #---------------------------------------------------------------------------- # Main training script. # To run, comment/uncomment appropriate lines in config.py and launch train.py.