Python tensorflow.contrib.tpu.python.tpu.tpu_optimizer.CrossShardOptimizer() Examples
The following are 6
code examples of tensorflow.contrib.tpu.python.tpu.tpu_optimizer.CrossShardOptimizer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.contrib.tpu.python.tpu.tpu_optimizer
, or try the search function
.
Example #1
Source File: base_estimator.py From yolo_v2 with Apache License 2.0 | 4 votes |
def get_train_op(self, loss): """Creates a training op. Args: loss: A float32 `Tensor` representing the total training loss. Returns: train_op: A slim.learning.create_train_op train_op. Raises: ValueError: If specified optimizer isn't supported. """ # Get variables to train (defined in subclass). assert self.variables_to_train # Define a learning rate schedule. decay_steps = self._config.learning.decay_steps decay_factor = self._config.learning.decay_factor learning_rate = float(self._config.learning.learning_rate) # Define a learning rate schedule. global_step = slim.get_or_create_global_step() learning_rate = tf.train.exponential_decay( learning_rate, global_step, decay_steps, decay_factor, staircase=True) # Create an optimizer. opt_type = self._config.learning.optimizer if opt_type == 'adam': opt = tf.train.AdamOptimizer(learning_rate) elif opt_type == 'momentum': opt = tf.train.MomentumOptimizer(learning_rate, 0.9) elif opt_type == 'rmsprop': opt = tf.train.RMSPropOptimizer(learning_rate, momentum=0.9, epsilon=1.0, decay=0.9) else: raise ValueError('Unsupported optimizer %s' % opt_type) if self._config.use_tpu: opt = tpu_optimizer.CrossShardOptimizer(opt) # Create a training op. # train_op = opt.minimize(loss, var_list=self.variables_to_train) # Create a training op. train_op = slim.learning.create_train_op( loss, optimizer=opt, variables_to_train=self.variables_to_train, update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)) return train_op
Example #2
Source File: base_estimator.py From Gun-Detector with Apache License 2.0 | 4 votes |
def get_train_op(self, loss): """Creates a training op. Args: loss: A float32 `Tensor` representing the total training loss. Returns: train_op: A slim.learning.create_train_op train_op. Raises: ValueError: If specified optimizer isn't supported. """ # Get variables to train (defined in subclass). assert self.variables_to_train # Define a learning rate schedule. decay_steps = self._config.learning.decay_steps decay_factor = self._config.learning.decay_factor learning_rate = float(self._config.learning.learning_rate) # Define a learning rate schedule. global_step = slim.get_or_create_global_step() learning_rate = tf.train.exponential_decay( learning_rate, global_step, decay_steps, decay_factor, staircase=True) # Create an optimizer. opt_type = self._config.learning.optimizer if opt_type == 'adam': opt = tf.train.AdamOptimizer(learning_rate) elif opt_type == 'momentum': opt = tf.train.MomentumOptimizer(learning_rate, 0.9) elif opt_type == 'rmsprop': opt = tf.train.RMSPropOptimizer(learning_rate, momentum=0.9, epsilon=1.0, decay=0.9) else: raise ValueError('Unsupported optimizer %s' % opt_type) if self._config.use_tpu: opt = tpu_optimizer.CrossShardOptimizer(opt) # Create a training op. # train_op = opt.minimize(loss, var_list=self.variables_to_train) # Create a training op. train_op = slim.learning.create_train_op( loss, optimizer=opt, variables_to_train=self.variables_to_train, update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)) return train_op
Example #3
Source File: base_estimator.py From object_detection_with_tensorflow with MIT License | 4 votes |
def get_train_op(self, loss): """Creates a training op. Args: loss: A float32 `Tensor` representing the total training loss. Returns: train_op: A slim.learning.create_train_op train_op. Raises: ValueError: If specified optimizer isn't supported. """ # Get variables to train (defined in subclass). assert self.variables_to_train # Define a learning rate schedule. decay_steps = self._config.learning.decay_steps decay_factor = self._config.learning.decay_factor learning_rate = float(self._config.learning.learning_rate) # Define a learning rate schedule. global_step = slim.get_or_create_global_step() learning_rate = tf.train.exponential_decay( learning_rate, global_step, decay_steps, decay_factor, staircase=True) # Create an optimizer. opt_type = self._config.learning.optimizer if opt_type == 'adam': opt = tf.train.AdamOptimizer(learning_rate) elif opt_type == 'momentum': opt = tf.train.MomentumOptimizer(learning_rate, 0.9) elif opt_type == 'rmsprop': opt = tf.train.RMSPropOptimizer(learning_rate, momentum=0.9, epsilon=1.0, decay=0.9) else: raise ValueError('Unsupported optimizer %s' % opt_type) if self._config.use_tpu: opt = tpu_optimizer.CrossShardOptimizer(opt) # Create a training op. # train_op = opt.minimize(loss, var_list=self.variables_to_train) # Create a training op. train_op = slim.learning.create_train_op( loss, optimizer=opt, variables_to_train=self.variables_to_train, update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)) return train_op
Example #4
Source File: base_estimator.py From g-tensorflow-models with Apache License 2.0 | 4 votes |
def get_train_op(self, loss): """Creates a training op. Args: loss: A float32 `Tensor` representing the total training loss. Returns: train_op: A slim.learning.create_train_op train_op. Raises: ValueError: If specified optimizer isn't supported. """ # Get variables to train (defined in subclass). assert self.variables_to_train # Define a learning rate schedule. decay_steps = self._config.learning.decay_steps decay_factor = self._config.learning.decay_factor learning_rate = float(self._config.learning.learning_rate) # Define a learning rate schedule. global_step = slim.get_or_create_global_step() learning_rate = tf.train.exponential_decay( learning_rate, global_step, decay_steps, decay_factor, staircase=True) # Create an optimizer. opt_type = self._config.learning.optimizer if opt_type == 'adam': opt = tf.train.AdamOptimizer(learning_rate) elif opt_type == 'momentum': opt = tf.train.MomentumOptimizer(learning_rate, 0.9) elif opt_type == 'rmsprop': opt = tf.train.RMSPropOptimizer(learning_rate, momentum=0.9, epsilon=1.0, decay=0.9) else: raise ValueError('Unsupported optimizer %s' % opt_type) if self._config.use_tpu: opt = tpu_optimizer.CrossShardOptimizer(opt) # Create a training op. # train_op = opt.minimize(loss, var_list=self.variables_to_train) # Create a training op. train_op = slim.learning.create_train_op( loss, optimizer=opt, variables_to_train=self.variables_to_train, update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)) return train_op
Example #5
Source File: base_estimator.py From models with Apache License 2.0 | 4 votes |
def get_train_op(self, loss): """Creates a training op. Args: loss: A float32 `Tensor` representing the total training loss. Returns: train_op: A slim.learning.create_train_op train_op. Raises: ValueError: If specified optimizer isn't supported. """ # Get variables to train (defined in subclass). assert self.variables_to_train # Define a learning rate schedule. decay_steps = self._config.learning.decay_steps decay_factor = self._config.learning.decay_factor learning_rate = float(self._config.learning.learning_rate) # Define a learning rate schedule. global_step = slim.get_or_create_global_step() learning_rate = tf.train.exponential_decay( learning_rate, global_step, decay_steps, decay_factor, staircase=True) # Create an optimizer. opt_type = self._config.learning.optimizer if opt_type == 'adam': opt = tf.train.AdamOptimizer(learning_rate) elif opt_type == 'momentum': opt = tf.train.MomentumOptimizer(learning_rate, 0.9) elif opt_type == 'rmsprop': opt = tf.train.RMSPropOptimizer(learning_rate, momentum=0.9, epsilon=1.0, decay=0.9) else: raise ValueError('Unsupported optimizer %s' % opt_type) if self._config.use_tpu: opt = tpu_optimizer.CrossShardOptimizer(opt) # Create a training op. # train_op = opt.minimize(loss, var_list=self.variables_to_train) # Create a training op. train_op = slim.learning.create_train_op( loss, optimizer=opt, variables_to_train=self.variables_to_train, update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)) return train_op
Example #6
Source File: base_estimator.py From multilabel-image-classification-tensorflow with MIT License | 4 votes |
def get_train_op(self, loss): """Creates a training op. Args: loss: A float32 `Tensor` representing the total training loss. Returns: train_op: A slim.learning.create_train_op train_op. Raises: ValueError: If specified optimizer isn't supported. """ # Get variables to train (defined in subclass). assert self.variables_to_train # Define a learning rate schedule. decay_steps = self._config.learning.decay_steps decay_factor = self._config.learning.decay_factor learning_rate = float(self._config.learning.learning_rate) # Define a learning rate schedule. global_step = slim.get_or_create_global_step() learning_rate = tf.train.exponential_decay( learning_rate, global_step, decay_steps, decay_factor, staircase=True) # Create an optimizer. opt_type = self._config.learning.optimizer if opt_type == 'adam': opt = tf.train.AdamOptimizer(learning_rate) elif opt_type == 'momentum': opt = tf.train.MomentumOptimizer(learning_rate, 0.9) elif opt_type == 'rmsprop': opt = tf.train.RMSPropOptimizer(learning_rate, momentum=0.9, epsilon=1.0, decay=0.9) else: raise ValueError('Unsupported optimizer %s' % opt_type) if self._config.use_tpu: opt = tpu_optimizer.CrossShardOptimizer(opt) # Create a training op. # train_op = opt.minimize(loss, var_list=self.variables_to_train) # Create a training op. train_op = slim.learning.create_train_op( loss, optimizer=opt, variables_to_train=self.variables_to_train, update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)) return train_op