Python tensorflow.python.ops.clip_ops.clip_by_global_norm() Examples

The following are 22 code examples of tensorflow.python.ops.clip_ops.clip_by_global_norm(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.clip_ops , or try the search function .
Example #1
Source File: composable_model.py    From lambda-packs with MIT License 6 votes vote down vote up
def get_train_step(self, loss):
    """Returns the ops to run to perform a training step on this estimator.

    Args:
      loss: The loss to use when calculating gradients.

    Returns:
      The ops to run to perform a training step.
    """
    my_vars = self._get_vars()
    if not (self._get_feature_columns() or my_vars):
      return []

    grads = gradients.gradients(loss, my_vars)
    if self._gradient_clip_norm:
      grads, _ = clip_ops.clip_by_global_norm(grads, self._gradient_clip_norm)
    return [self._get_optimizer().apply_gradients(zip(grads, my_vars))] 
Example #2
Source File: composable_model.py    From keras-lambda with MIT License 6 votes vote down vote up
def get_train_step(self, loss):
    """Returns the ops to run to perform a training step on this estimator.

    Args:
      loss: The loss to use when calculating gradients.

    Returns:
      The ops to run to perform a training step.
    """
    my_vars = self._get_vars()
    if not (self._get_feature_columns() or my_vars):
      return []

    grads = gradients.gradients(loss, my_vars)
    if self._gradient_clip_norm:
      grads, _ = clip_ops.clip_by_global_norm(grads, self._gradient_clip_norm)
    return [self._get_optimizer().apply_gradients(zip(grads, my_vars))] 
Example #3
Source File: composable_model.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def get_train_step(self, loss):
    """Returns the ops to run to perform a training step on this estimator.

    Args:
      loss: The loss to use when calculating gradients.

    Returns:
      The ops to run to perform a training step.
    """
    my_vars = self._get_vars()
    if not (self._get_feature_columns() or my_vars):
      return []

    grads = gradients.gradients(loss, my_vars)
    if self._gradient_clip_norm:
      grads, _ = clip_ops.clip_by_global_norm(grads, self._gradient_clip_norm)
    return [self._get_optimizer().apply_gradients(zip(grads, my_vars))] 
Example #4
Source File: composable_model.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def get_train_step(self, loss):
    """Returns the ops to run to perform a training step on this estimator.

    Args:
      loss: The loss to use when calculating gradients.

    Returns:
      The ops to run to perform a training step.
    """
    my_vars = self._get_vars()
    if not (self._get_feature_columns() or my_vars):
      return []

    grads = gradients.gradients(loss, my_vars)
    if self._gradient_clip_norm:
      grads, _ = clip_ops.clip_by_global_norm(grads, self._gradient_clip_norm)
    return [self._get_optimizer().apply_gradients(zip(grads, my_vars))] 
Example #5
Source File: composable_model.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def __init__(self,
               num_label_columns,
               optimizer,
               gradient_clip_norm,
               num_ps_replicas,
               scope):
    """Common initialization for all _ComposableModel objects.

    Args:
      num_label_columns: The number of label columns.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Scope for variables created in this model.
    """
    self._num_label_columns = num_label_columns
    self._optimizer = optimizer
    self._gradient_clip_norm = gradient_clip_norm
    self._num_ps_replicas = num_ps_replicas
    self._scope = scope
    self._feature_columns = None 
Example #6
Source File: optimizers.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
  """Clips gradients by global norm."""
  gradients, variables = zip(*grads_and_vars)
  clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients)
  return list(zip(clipped_gradients, variables)) 
Example #7
Source File: optimizers.py    From keras-lambda with MIT License 5 votes vote down vote up
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
  """Clips gradients by global norm."""
  gradients, variables = zip(*grads_and_vars)
  clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients)
  return list(zip(clipped_gradients, variables)) 
Example #8
Source File: composable_model.py    From keras-lambda with MIT License 5 votes vote down vote up
def __init__(self,
               num_label_columns,
               optimizer=None,
               _joint_weights=False,
               gradient_clip_norm=None,
               num_ps_replicas=0,
               scope=None,
               trainable=True):
    """Initializes LinearComposableModel objects.

    Args:
      num_label_columns: The number of label columns.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      _joint_weights: If True use a single (possibly partitioned) variable
        to store all weights in this model. Faster, but requires that all
        feature columns are sparse and have the 'sum' combiner.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Optional scope for variables created in this model. If scope
        is not supplied, it will default to 'linear'.
      trainable: True if this model contains variables that can be trained.
        False otherwise (in cases where the variables are used strictly for
        transforming input labels for training).
    """
    scope = "linear" if not scope else scope
    super(LinearComposableModel, self).__init__(
        num_label_columns=num_label_columns,
        optimizer=optimizer,
        gradient_clip_norm=gradient_clip_norm,
        num_ps_replicas=num_ps_replicas,
        scope=scope,
        trainable=trainable)
    self._joint_weights = _joint_weights 
Example #9
Source File: composable_model.py    From keras-lambda with MIT License 5 votes vote down vote up
def __init__(self,
               num_label_columns,
               optimizer,
               gradient_clip_norm,
               num_ps_replicas,
               scope,
               trainable=True):
    """Common initialization for all _ComposableModel objects.

    Args:
      num_label_columns: The number of label columns.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Scope for variables created in this model.
      trainable: True if this model contains variables that can be trained.
        False otherwise (in cases where the variables are used strictly for
        transforming input labels for training).
    """
    self._num_label_columns = num_label_columns
    self._optimizer = optimizer
    self._gradient_clip_norm = gradient_clip_norm
    self._num_ps_replicas = num_ps_replicas
    self._scope = scope
    self._trainable = trainable
    self._feature_columns = None 
Example #10
Source File: dynamic_rnn_estimator.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _process_gradients(self, gradients_vars):
    """Process gradients (e.g. clipping) before applying them to weights."""
    with ops.name_scope('process_gradients'):
      gradients, variables = zip(*gradients_vars)
      if self._gradient_clipping_norm is not None:
        gradients, _ = clip_ops.clip_by_global_norm(
            gradients, self._gradient_clipping_norm)
      return zip(gradients, variables) 
Example #11
Source File: composable_model.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               num_label_columns,
               hidden_units,
               optimizer=None,
               activation_fn=nn.relu,
               dropout=None,
               gradient_clip_norm=None,
               num_ps_replicas=0,
               scope=None):
    """Initializes DNNComposableModel objects.

    Args:
      num_label_columns: The number of label columns.
      hidden_units: List of hidden units per layer. All layers are fully
        connected.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      activation_fn: Activation function applied to each layer. If `None`,
        will use `tf.nn.relu`.
      dropout: When not None, the probability we will drop out
        a given coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Optional scope for variables created in this model. If not scope
        is supplied, one is generated.
    """
    scope = "dnn" if not scope else scope
    super(DNNComposableModel, self).__init__(
        num_label_columns=num_label_columns,
        optimizer=optimizer,
        gradient_clip_norm=gradient_clip_norm,
        num_ps_replicas=num_ps_replicas,
        scope=scope)
    self._hidden_units = hidden_units
    self._activation_fn = activation_fn
    self._dropout = dropout 
Example #12
Source File: composable_model.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               num_label_columns,
               optimizer=None,
               _joint_weights=False,
               gradient_clip_norm=None,
               num_ps_replicas=0,
               scope=None):
    """Initializes LinearComposableModel objects.

    Args:
      num_label_columns: The number of label columns.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      _joint_weights: If True use a single (possibly partitioned) variable
        to store all weights in this model. Faster, but requires that all
        feature columns are sparse and have the 'sum' combiner.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Optional scope for variables created in this model. If scope
        is not supplied, it will default to 'linear'.
    """
    scope = "linear" if not scope else scope
    super(LinearComposableModel, self).__init__(
        num_label_columns=num_label_columns,
        optimizer=optimizer,
        gradient_clip_norm=gradient_clip_norm,
        num_ps_replicas=num_ps_replicas,
        scope=scope)
    self._joint_weights = _joint_weights 
Example #13
Source File: optimizers.py    From tensornets with MIT License 5 votes vote down vote up
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
  """Clips gradients by global norm."""
  gradients, variables = zip(*grads_and_vars)
  clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients)
  return list(zip(clipped_gradients, variables)) 
Example #14
Source File: optimizers.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
  """Clips gradients by global norm."""
  gradients, variables = zip(*grads_and_vars)
  clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients)
  return list(zip(clipped_gradients, variables)) 
Example #15
Source File: composable_model.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def __init__(self,
               num_label_columns,
               optimizer=None,
               _joint_weights=False,
               gradient_clip_norm=None,
               num_ps_replicas=0,
               scope=None,
               trainable=True):
    """Initializes LinearComposableModel objects.

    Args:
      num_label_columns: The number of label columns.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      _joint_weights: If True use a single (possibly partitioned) variable
        to store all weights in this model. Faster, but requires that all
        feature columns are sparse and have the 'sum' combiner.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Optional scope for variables created in this model. If scope
        is not supplied, it will default to 'linear'.
      trainable: True if this model contains variables that can be trained.
        False otherwise (in cases where the variables are used strictly for
        transforming input labels for training).
    """
    scope = "linear" if not scope else scope
    super(LinearComposableModel, self).__init__(
        num_label_columns=num_label_columns,
        optimizer=optimizer,
        gradient_clip_norm=gradient_clip_norm,
        num_ps_replicas=num_ps_replicas,
        scope=scope,
        trainable=trainable)
    self._joint_weights = _joint_weights 
Example #16
Source File: composable_model.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def __init__(self,
               num_label_columns,
               optimizer,
               gradient_clip_norm,
               num_ps_replicas,
               scope,
               trainable=True):
    """Common initialization for all _ComposableModel objects.

    Args:
      num_label_columns: The number of label columns.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Scope for variables created in this model.
      trainable: True if this model contains variables that can be trained.
        False otherwise (in cases where the variables are used strictly for
        transforming input labels for training).
    """
    self._num_label_columns = num_label_columns
    self._optimizer = optimizer
    self._gradient_clip_norm = gradient_clip_norm
    self._num_ps_replicas = num_ps_replicas
    self._scope = scope
    self._trainable = trainable
    self._feature_columns = None 
Example #17
Source File: composable_model.py    From lambda-packs with MIT License 5 votes vote down vote up
def __init__(self,
               num_label_columns,
               optimizer=None,
               _joint_weights=False,
               gradient_clip_norm=None,
               num_ps_replicas=0,
               scope=None,
               trainable=True):
    """Initializes LinearComposableModel objects.

    Args:
      num_label_columns: The number of label columns.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      _joint_weights: If True use a single (possibly partitioned) variable
        to store all weights in this model. Faster, but requires that all
        feature columns are sparse and have the 'sum' combiner.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Optional scope for variables created in this model. If scope
        is not supplied, it will default to 'linear'.
      trainable: True if this model contains variables that can be trained.
        False otherwise (in cases where the variables are used strictly for
        transforming input labels for training).
    """
    scope = "linear" if not scope else scope
    super(LinearComposableModel, self).__init__(
        num_label_columns=num_label_columns,
        optimizer=optimizer,
        gradient_clip_norm=gradient_clip_norm,
        num_ps_replicas=num_ps_replicas,
        scope=scope,
        trainable=trainable)
    self._joint_weights = _joint_weights 
Example #18
Source File: composable_model.py    From lambda-packs with MIT License 5 votes vote down vote up
def __init__(self,
               num_label_columns,
               optimizer,
               gradient_clip_norm,
               num_ps_replicas,
               scope,
               trainable=True):
    """Common initialization for all _ComposableModel objects.

    Args:
      num_label_columns: The number of label columns.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Scope for variables created in this model.
      trainable: True if this model contains variables that can be trained.
        False otherwise (in cases where the variables are used strictly for
        transforming input labels for training).
    """
    self._num_label_columns = num_label_columns
    self._optimizer = optimizer
    self._gradient_clip_norm = gradient_clip_norm
    self._num_ps_replicas = num_ps_replicas
    self._scope = scope
    self._trainable = trainable
    self._feature_columns = None 
Example #19
Source File: composable_model.py    From auto-alt-text-lambda-api with MIT License 4 votes vote down vote up
def __init__(self,
               num_label_columns,
               hidden_units,
               optimizer=None,
               activation_fn=nn.relu,
               dropout=None,
               gradient_clip_norm=None,
               num_ps_replicas=0,
               scope=None,
               trainable=True):
    """Initializes DNNComposableModel objects.

    Args:
      num_label_columns: The number of label columns.
      hidden_units: List of hidden units per layer. All layers are fully
        connected.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      activation_fn: Activation function applied to each layer. If `None`,
        will use `tf.nn.relu`.
      dropout: When not None, the probability we will drop out
        a given coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Optional scope for variables created in this model. If not scope
        is supplied, one is generated.
      trainable: True if this model contains variables that can be trained.
        False otherwise (in cases where the variables are used strictly for
        transforming input labels for training).
    """
    scope = "dnn" if not scope else scope
    super(DNNComposableModel, self).__init__(
        num_label_columns=num_label_columns,
        optimizer=optimizer,
        gradient_clip_norm=gradient_clip_norm,
        num_ps_replicas=num_ps_replicas,
        scope=scope,
        trainable=trainable)
    self._hidden_units = hidden_units
    self._activation_fn = activation_fn
    self._dropout = dropout 
Example #20
Source File: composable_model.py    From lambda-packs with MIT License 4 votes vote down vote up
def __init__(self,
               num_label_columns,
               hidden_units,
               optimizer=None,
               activation_fn=nn.relu,
               dropout=None,
               gradient_clip_norm=None,
               num_ps_replicas=0,
               scope=None,
               trainable=True):
    """Initializes DNNComposableModel objects.

    Args:
      num_label_columns: The number of label columns.
      hidden_units: List of hidden units per layer. All layers are fully
        connected.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      activation_fn: Activation function applied to each layer. If `None`,
        will use `tf.nn.relu`.
      dropout: When not None, the probability we will drop out
        a given coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Optional scope for variables created in this model. If not scope
        is supplied, one is generated.
      trainable: True if this model contains variables that can be trained.
        False otherwise (in cases where the variables are used strictly for
        transforming input labels for training).
    """
    scope = "dnn" if not scope else scope
    super(DNNComposableModel, self).__init__(
        num_label_columns=num_label_columns,
        optimizer=optimizer,
        gradient_clip_norm=gradient_clip_norm,
        num_ps_replicas=num_ps_replicas,
        scope=scope,
        trainable=trainable)
    self._hidden_units = hidden_units
    self._activation_fn = activation_fn
    self._dropout = dropout 
Example #21
Source File: composable_model.py    From keras-lambda with MIT License 4 votes vote down vote up
def __init__(self,
               num_label_columns,
               hidden_units,
               optimizer=None,
               activation_fn=nn.relu,
               dropout=None,
               gradient_clip_norm=None,
               num_ps_replicas=0,
               scope=None,
               trainable=True):
    """Initializes DNNComposableModel objects.

    Args:
      num_label_columns: The number of label columns.
      hidden_units: List of hidden units per layer. All layers are fully
        connected.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      activation_fn: Activation function applied to each layer. If `None`,
        will use `tf.nn.relu`.
      dropout: When not None, the probability we will drop out
        a given coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Optional scope for variables created in this model. If not scope
        is supplied, one is generated.
      trainable: True if this model contains variables that can be trained.
        False otherwise (in cases where the variables are used strictly for
        transforming input labels for training).
    """
    scope = "dnn" if not scope else scope
    super(DNNComposableModel, self).__init__(
        num_label_columns=num_label_columns,
        optimizer=optimizer,
        gradient_clip_norm=gradient_clip_norm,
        num_ps_replicas=num_ps_replicas,
        scope=scope,
        trainable=trainable)
    self._hidden_units = hidden_units
    self._activation_fn = activation_fn
    self._dropout = dropout 
Example #22
Source File: linear.py    From lambda-packs with MIT License 4 votes vote down vote up
def __init__(self,  # _joint_weights: pylint: disable=invalid-name
               feature_columns,
               head,
               model_dir=None,
               weight_column_name=None,
               optimizer=None,
               gradient_clip_norm=None,
               _joint_weights=False,
               config=None,
               feature_engineering_fn=None):
    """Construct a `LinearEstimator` object.

    Args:
      feature_columns: An iterable containing all the feature columns used by
        the model. All items in the set should be instances of classes derived
        from `FeatureColumn`.
      head: An instance of _Head class.
      model_dir: Directory to save model parameters, graph, etc. This can
        also be used to load checkpoints from the directory into a estimator
        to continue training a previously saved model.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      optimizer: An instance of `tf.Optimizer` used to train the model. If
        `None`, will use an Ftrl optimizer.
      gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        `tf.clip_by_global_norm` for more details.
      _joint_weights: If True use a single (possibly partitioned) variable to
        store the weights. It's faster, but requires all feature columns are
        sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
      config: `RunConfig` object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        labels which are the output of `input_fn` and
                        returns features and labels which will be fed
                        into the model.

    Returns:
      A `LinearEstimator` estimator.

    Raises:
      ValueError: if optimizer is not supported, e.g., SDCAOptimizer
    """
    assert feature_columns
    if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
      raise ValueError("LinearEstimator does not support SDCA optimizer.")

    params = {
        "head": head,
        "feature_columns": feature_columns,
        "optimizer": optimizer,
        "gradient_clip_norm": gradient_clip_norm,
        "joint_weights": _joint_weights,
    }
    super(LinearEstimator, self).__init__(
        model_fn=_linear_model_fn,
        model_dir=model_dir,
        config=config,
        params=params,
        feature_engineering_fn=feature_engineering_fn)