Python tensorflow.python.ops.standard_ops.reduce_sum() Examples

The following are 30 code examples of tensorflow.python.ops.standard_ops.reduce_sum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.standard_ops , or try the search function .
Example #1
Source File: cost.py    From LapSRN-tensorflow with Apache License 2.0 6 votes vote down vote up
def cosine_similarity(v1, v2):
    """Cosine similarity [-1, 1], `wiki <https://en.wikipedia.org/wiki/Cosine_similarity>`_.

    Parameters
    -----------
    v1, v2 : tensor of [batch_size, n_feature], with the same number of features.

    Returns
    -----------
    a tensor of [batch_size, ]
    """
    try: ## TF1.0
        cost = tf.reduce_sum(tf.multiply(v1, v2), 1) / (tf.sqrt(tf.reduce_sum(tf.multiply(v1, v1), 1)) * tf.sqrt(tf.reduce_sum(tf.multiply(v2, v2), 1)))
    except: ## TF0.12
        cost = tf.reduce_sum(tf.mul(v1, v2), reduction_indices=1) / (tf.sqrt(tf.reduce_sum(tf.mul(v1, v1), reduction_indices=1)) * tf.sqrt(tf.reduce_sum(tf.mul(v2, v2), reduction_indices=1)))
    return cost


## Regularization Functions 
Example #2
Source File: cost.py    From deepsleepnet with Apache License 2.0 6 votes vote down vote up
def mean_squared_error(output, target, is_mean=False):
    """Return the TensorFlow expression of mean-squre-error of two distributions.

    Parameters
    ----------
    output : 2D or 4D tensor.
    target : 2D or 4D tensor.
    is_mean : boolean, if True, use ``tf.reduce_mean`` to compute the loss of one data, otherwise, use ``tf.reduce_sum`` (default).

    References
    ------------
    - `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`_
    """
    with tf.name_scope("mean_squared_error_loss"):
        if output.get_shape().ndims == 2:   # [batch_size, n_feature]
            if is_mean:
                mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), 1))
            else:
                mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1))
        elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
            if is_mean:
                mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2, 3]))
            else:
                mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2, 3]))
        return mse 
Example #3
Source File: control_flow_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testIndexedSlicesWithDynamicShapeGradientInWhileLoop(self):
    for dtype in [dtypes.float32, dtypes.float64]:
      with self.test_session() as sess:
        inputs = tf.placeholder(dtype=dtype)
        initial_outputs = tf.TensorArray(dtype=dtype, dynamic_size=True,
                                         size=1)
        initial_i = tf.constant(0, dtype=dtypes.int32)

        def Cond(i, _):
          return i < tf.size(inputs)  # pylint: disable=cell-var-from-loop

        def Body(i, outputs):
          x = tf.gather(inputs, i)  # pylint: disable=cell-var-from-loop
          outputs = outputs.write(i, x)
          return i + 1, outputs

        _, outputs = tf.while_loop(Cond, Body, [initial_i, initial_outputs])

        outputs = tf.reduce_sum(outputs.pack())
        r = tf.gradients([outputs], [inputs])[0]
        grad_wr_inputs = ops.convert_to_tensor(r)
        o, grad = sess.run([outputs, grad_wr_inputs],
                           feed_dict={inputs: [1, 3, 2]})
        self.assertEquals(o, 6)
        self.assertAllEqual(grad, [1] * 3) 
Example #4
Source File: control_flow_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testIndexedSlicesGradient(self):
    with ops.Graph().as_default():
      embedding_matrix = tf.get_variable(
          "embedding_matrix", [5, 5],
          initializer=tf.random_normal_initializer())
      def Cond(it, _):
        return it < 5
      def Body(it, cost):
        embedding = embedding_ops.embedding_lookup(embedding_matrix + 0.0, [0])
        cost += tf.reduce_sum(embedding)
        return it + 1, cost
      _, cost = control_flow_ops.while_loop(
          Cond, Body, [tf.constant(0), tf.constant(0.0)])
      optimizer = momentum.MomentumOptimizer(0.1, 0.9)
      train_op = optimizer.minimize(cost)
      with self.test_session() as sess:
        sess.run(tf.global_variables_initializer())
        for _ in range(10):
          sess.run([train_op]) 
Example #5
Source File: cost.py    From deepsleepnet with Apache License 2.0 6 votes vote down vote up
def cosine_similarity(v1, v2):
    """Cosine similarity [-1, 1], `wiki <https://en.wikipedia.org/wiki/Cosine_similarity>`_.

    Parameters
    -----------
    v1, v2 : tensor of [batch_size, n_feature], with the same number of features.

    Returns
    -----------
    a tensor of [batch_size, ]
    """
    try: ## TF1.0
        cost = tf.reduce_sum(tf.multiply(v1, v2), 1) / (tf.sqrt(tf.reduce_sum(tf.multiply(v1, v1), 1)) * tf.sqrt(tf.reduce_sum(tf.multiply(v2, v2), 1)))
    except: ## TF0.12
        cost = tf.reduce_sum(tf.mul(v1, v2), reduction_indices=1) / (tf.sqrt(tf.reduce_sum(tf.mul(v1, v1), reduction_indices=1)) * tf.sqrt(tf.reduce_sum(tf.mul(v2, v2), reduction_indices=1)))
    return cost


## Regularization Functions 
Example #6
Source File: cost.py    From super-resolution-videos with The Unlicense 6 votes vote down vote up
def mean_squared_error(output, target, is_mean=False):
    """Return the TensorFlow expression of mean-squre-error of two distributions.

    Parameters
    ----------
    output : 2D or 4D tensor.
    target : 2D or 4D tensor.
    is_mean : boolean, if True, use ``tf.reduce_mean`` to compute the loss of one data, otherwise, use ``tf.reduce_sum`` (default).

    References
    ------------
    - `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`_
    """
    with tf.name_scope("mean_squared_error_loss"):
        if output.get_shape().ndims == 2:   # [batch_size, n_feature]
            if is_mean:
                mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), 1))
            else:
                mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1))
        elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
            if is_mean:
                mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2, 3]))
            else:
                mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2, 3]))
        return mse 
Example #7
Source File: cost.py    From super-resolution-videos with The Unlicense 6 votes vote down vote up
def normalized_mean_square_error(output, target):
    """Return the TensorFlow expression of normalized mean-squre-error of two distributions.

    Parameters
    ----------
    output : 2D or 4D tensor.
    target : 2D or 4D tensor.
    """
    with tf.name_scope("mean_squared_error_loss"):
        if output.get_shape().ndims == 2:   # [batch_size, n_feature]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=1))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=1))
        elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1,2,3]))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1,2,3]))
        nmse = tf.reduce_mean(nmse_a / nmse_b)
    return nmse 
Example #8
Source File: cost.py    From LapSRN-tensorflow with Apache License 2.0 6 votes vote down vote up
def normalized_mean_square_error(output, target):
    """Return the TensorFlow expression of normalized mean-squre-error of two distributions.

    Parameters
    ----------
    output : 2D or 4D tensor.
    target : 2D or 4D tensor.
    """
    with tf.name_scope("mean_squared_error_loss"):
        if output.get_shape().ndims == 2:   # [batch_size, n_feature]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=1))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=1))
        elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1,2,3]))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1,2,3]))
        nmse = tf.reduce_mean(nmse_a / nmse_b)
    return nmse 
Example #9
Source File: cost.py    From LapSRN-tensorflow with Apache License 2.0 6 votes vote down vote up
def mean_squared_error(output, target, is_mean=False):
    """Return the TensorFlow expression of mean-squre-error of two distributions.

    Parameters
    ----------
    output : 2D or 4D tensor.
    target : 2D or 4D tensor.
    is_mean : boolean, if True, use ``tf.reduce_mean`` to compute the loss of one data, otherwise, use ``tf.reduce_sum`` (default).

    References
    ------------
    - `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`_
    """
    with tf.name_scope("mean_squared_error_loss"):
        if output.get_shape().ndims == 2:   # [batch_size, n_feature]
            if is_mean:
                mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), 1))
            else:
                mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1))
        elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
            if is_mean:
                mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2, 3]))
            else:
                mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2, 3]))
        return mse 
Example #10
Source File: cost.py    From super-resolution-videos with The Unlicense 6 votes vote down vote up
def cosine_similarity(v1, v2):
    """Cosine similarity [-1, 1], `wiki <https://en.wikipedia.org/wiki/Cosine_similarity>`_.

    Parameters
    -----------
    v1, v2 : tensor of [batch_size, n_feature], with the same number of features.

    Returns
    -----------
    a tensor of [batch_size, ]
    """
    try: ## TF1.0
        cost = tf.reduce_sum(tf.multiply(v1, v2), 1) / (tf.sqrt(tf.reduce_sum(tf.multiply(v1, v1), 1)) * tf.sqrt(tf.reduce_sum(tf.multiply(v2, v2), 1)))
    except: ## TF0.12
        cost = tf.reduce_sum(tf.mul(v1, v2), reduction_indices=1) / (tf.sqrt(tf.reduce_sum(tf.mul(v1, v1), reduction_indices=1)) * tf.sqrt(tf.reduce_sum(tf.mul(v2, v2), reduction_indices=1)))
    return cost


## Regularization Functions 
Example #11
Source File: cost.py    From super-resolution-videos with The Unlicense 5 votes vote down vote up
def dice_hard_coe(output, target, epsilon=1e-10):
    """Non-differentiable Sørensen–Dice coefficient for comparing the similarity of two distributions,
    usually be used for binary image segmentation i.e. labels are binary.
    The coefficient = [0, 1], 1 if totally match.

    Parameters
    -----------
    output : tensor
        A distribution with shape: [batch_size, ....], (any dimensions).
    target : tensor
        A distribution with shape: [batch_size, ....], (any dimensions).
    epsilon : float
        An optional name to attach to this layer.

    Examples
    ---------
    >>> outputs = pixel_wise_softmax(network.outputs)
    >>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5)

    References
    -----------
    - `wiki-dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`_
    """
    output = tf.cast(output > 0.5, dtype=tf.float32)
    target = tf.cast(target > 0.5, dtype=tf.float32)
    inse = tf.reduce_sum( output * target )
    l = tf.reduce_sum( output * output )
    r = tf.reduce_sum( target * target )
    dice = 2 * (inse) / (l + r)
    if epsilon == 0:
        return dice
    else:
        return tf.clip_by_value(dice, 0, 1.0-epsilon) 
Example #12
Source File: cost.py    From super-resolution-videos with The Unlicense 5 votes vote down vote up
def dice_coe(output, target, epsilon=1e-10):
    """Sørensen–Dice coefficient for comparing the similarity of two distributions,
    usually be used for binary image segmentation i.e. labels are binary.
    The coefficient = [0, 1], 1 if totally match.

    Parameters
    -----------
    output : tensor
        A distribution with shape: [batch_size, ....], (any dimensions).
    target : tensor
        A distribution with shape: [batch_size, ....], (any dimensions).
    epsilon : float
        An optional name to attach to this layer.

    Examples
    ---------
    >>> outputs = tl.act.pixel_wise_softmax(network.outputs)
    >>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_, epsilon=1e-5)

    References
    -----------
    - `wiki-dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`_
    """
    # inse = tf.reduce_sum( tf.mul(output, target) )
    # l = tf.reduce_sum( tf.mul(output, output) )
    # r = tf.reduce_sum( tf.mul(target, target) )
    inse = tf.reduce_sum( output * target )
    l = tf.reduce_sum( output * output )
    r = tf.reduce_sum( target * target )
    dice = 2 * (inse) / (l + r)
    if epsilon == 0:
        return dice
    else:
        return tf.clip_by_value(dice, 0, 1.0-epsilon) 
Example #13
Source File: cost.py    From super-resolution-videos with The Unlicense 5 votes vote down vote up
def cross_entropy_seq(logits, target_seqs, batch_size=None):#, batch_size=1, num_steps=None):
    """Returns the expression of cross-entropy of two sequences, implement
    softmax internally. Normally be used for Fixed Length RNN outputs.

    Parameters
    ----------
    logits : Tensorflow variable
        2D tensor, ``network.outputs``, [batch_size*n_steps (n_examples), number of output units]
    target_seqs : Tensorflow variable
        target : 2D tensor [batch_size, n_steps], if the number of step is dynamic, please use ``cross_entropy_seq_with_mask`` instead.
    batch_size : None or int.
        If not None, the return cost will be divided by batch_size.

    Examples
    --------
    >>> see PTB tutorial for more details
    >>> input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
    >>> targets = tf.placeholder(tf.int32, [batch_size, num_steps])
    >>> cost = tl.cost.cross_entropy_seq(network.outputs, targets)
    """
    try: # TF 1.0
        sequence_loss_by_example_fn = tf.contrib.legacy_seq2seq.sequence_loss_by_example
    except:
        sequence_loss_by_example_fn = tf.nn.seq2seq.sequence_loss_by_example

    loss = sequence_loss_by_example_fn(
        [logits],
        [tf.reshape(target_seqs, [-1])],
        [tf.ones_like(tf.reshape(target_seqs, [-1]), dtype=tf.float32)])
        # [tf.ones([batch_size * num_steps])])
    cost = tf.reduce_sum(loss) #/ batch_size
    if batch_size is not None:
        cost = cost / batch_size
    return cost 
Example #14
Source File: regularizers.py    From keras-lambda with MIT License 5 votes vote down vote up
def l1_regularizer(scale, scope=None):
  """Returns a function that can be used to apply L1 regularization to weights.

  L1 regularization encourages sparsity.

  Args:
    scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
    scope: An optional scope name.

  Returns:
    A function with signature `l1(weights)` that apply L1 regularization.

  Raises:
    ValueError: If scale is negative or if scale is not a float.
  """
  if isinstance(scale, numbers.Integral):
    raise ValueError('scale cannot be an integer: %s' % scale)
  if isinstance(scale, numbers.Real):
    if scale < 0.:
      raise ValueError('Setting a scale less than 0 on a regularizer: %g' %
                       scale)
    if scale == 0.:
      logging.info('Scale of 0 disables regularizer.')
      return lambda _: None

  def l1(weights, name=None):
    """Applies L1 regularization to weights."""
    with ops.name_scope(scope, 'l1_regularizer', [weights]) as name:
      my_scale = ops.convert_to_tensor(scale,
                                       dtype=weights.dtype.base_dtype,
                                       name='scale')
      return standard_ops.multiply(
          my_scale,
          standard_ops.reduce_sum(standard_ops.abs(weights)),
          name=name)

  return l1 
Example #15
Source File: cost.py    From super-resolution-videos with The Unlicense 5 votes vote down vote up
def binary_cross_entropy(output, target, epsilon=1e-8, name='bce_loss'):
    """Computes binary cross entropy given `output`.

    For brevity, let `x = output`, `z = target`.  The binary cross entropy loss is

        loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))

    Parameters
    ----------
    output : tensor of type `float32` or `float64`.
    target : tensor of the same type and shape as `output`.
    epsilon : float
        A small value to avoid output is zero.
    name : string
        An optional name to attach to this layer.

    References
    -----------
    - `DRAW <https://github.com/ericjang/draw/blob/master/draw.py#L73>`_
    """
#     from tensorflow.python.framework import ops
#     with ops.op_scope([output, target], name, "bce_loss") as name:
#         output = ops.convert_to_tensor(output, name="preds")
#         target = ops.convert_to_tensor(targets, name="target")
    with tf.name_scope(name):
        return tf.reduce_mean(tf.reduce_sum(-(target * tf.log(output + epsilon) +
                              (1. - target) * tf.log(1. - output + epsilon)), axis=1)) 
Example #16
Source File: cost.py    From deepsleepnet with Apache License 2.0 5 votes vote down vote up
def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details=False, name=None):
    """Returns the expression of cross-entropy of two sequences, implement
    softmax internally. Normally be used for Dynamic RNN outputs.

    Parameters
    -----------
    logits : network identity outputs
        2D tensor, ``network.outputs``, [batch_size, number of output units].
    target_seqs : int of tensor, like word ID.
        [batch_size, ?]
    input_mask : the mask to compute loss
        The same size with target_seqs, normally 0 and 1.
    return_details : boolean
        - If False (default), only returns the loss.
        - If True, returns the loss, losses, weights and targets (reshape to one vetcor).

    Examples
    --------
    - see Image Captioning Example.
    """
    targets = tf.reshape(target_seqs, [-1])   # to one vector
    weights = tf.to_float(tf.reshape(input_mask, [-1]))   # to one vector like targets
    losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name) * weights
    #losses = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name)) # for TF1.0 and others

    try: ## TF1.0
        loss = tf.divide(tf.reduce_sum(losses),   # loss from mask. reduce_sum before element-wise mul with mask !!
                        tf.reduce_sum(weights),
                        name="seq_loss_with_mask")
    except: ## TF0.12
        loss = tf.div(tf.reduce_sum(losses),   # loss from mask. reduce_sum before element-wise mul with mask !!
                        tf.reduce_sum(weights),
                        name="seq_loss_with_mask")
    if return_details:
        return loss, losses, weights, targets
    else:
        return loss 
Example #17
Source File: cost.py    From super-resolution-videos with The Unlicense 5 votes vote down vote up
def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details=False, name=None):
    """Returns the expression of cross-entropy of two sequences, implement
    softmax internally. Normally be used for Dynamic RNN outputs.

    Parameters
    -----------
    logits : network identity outputs
        2D tensor, ``network.outputs``, [batch_size, number of output units].
    target_seqs : int of tensor, like word ID.
        [batch_size, ?]
    input_mask : the mask to compute loss
        The same size with target_seqs, normally 0 and 1.
    return_details : boolean
        - If False (default), only returns the loss.
        - If True, returns the loss, losses, weights and targets (reshape to one vetcor).

    Examples
    --------
    - see Image Captioning Example.
    """
    targets = tf.reshape(target_seqs, [-1])   # to one vector
    weights = tf.to_float(tf.reshape(input_mask, [-1]))   # to one vector like targets
    losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name) * weights
    #losses = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name)) # for TF1.0 and others

    try: ## TF1.0
        loss = tf.divide(tf.reduce_sum(losses),   # loss from mask. reduce_sum before element-wise mul with mask !!
                        tf.reduce_sum(weights),
                        name="seq_loss_with_mask")
    except: ## TF0.12
        loss = tf.div(tf.reduce_sum(losses),   # loss from mask. reduce_sum before element-wise mul with mask !!
                        tf.reduce_sum(weights),
                        name="seq_loss_with_mask")
    if return_details:
        return loss, losses, weights, targets
    else:
        return loss 
Example #18
Source File: cost.py    From deepsleepnet with Apache License 2.0 5 votes vote down vote up
def iou_coe(output, target, threshold=0.5, epsilon=1e-10):
    """Non-differentiable Intersection over Union, usually be used for evaluating binary image segmentation.
    The coefficient = [0, 1], 1 means totally match.

    Parameters
    -----------
    output : tensor
        A distribution with shape: [batch_size, ....], (any dimensions).
    target : tensor
        A distribution with shape: [batch_size, ....], (any dimensions).
    threshold : float
        The threshold value to be true.
    epsilon : float
        A small value to avoid zero denominator when both output and target output nothing.

    Examples
    ---------
    >>> outputs = tl.act.pixel_wise_softmax(network.outputs)
    >>> iou = tl.cost.iou_coe(outputs[:,:,:,0], y_[:,:,:,0])

    Notes
    ------
    - IOU cannot be used as training loss, people usually use dice coefficient for training, and IOU for evaluating.
    """
    pre = tf.cast(output > threshold, dtype=tf.float32)
    truth = tf.cast(target > threshold, dtype=tf.float32)
    intersection = tf.reduce_sum(pre * truth)
    union = tf.reduce_sum(tf.cast((pre + truth) > threshold, dtype=tf.float32))
    return tf.reduce_sum(intersection) / (tf.reduce_sum(union) + epsilon) 
Example #19
Source File: cost.py    From deepsleepnet with Apache License 2.0 5 votes vote down vote up
def dice_hard_coe(output, target, epsilon=1e-10):
    """Non-differentiable Sørensen–Dice coefficient for comparing the similarity of two distributions,
    usually be used for binary image segmentation i.e. labels are binary.
    The coefficient = [0, 1], 1 if totally match.

    Parameters
    -----------
    output : tensor
        A distribution with shape: [batch_size, ....], (any dimensions).
    target : tensor
        A distribution with shape: [batch_size, ....], (any dimensions).
    epsilon : float
        An optional name to attach to this layer.

    Examples
    ---------
    >>> outputs = pixel_wise_softmax(network.outputs)
    >>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5)

    References
    -----------
    - `wiki-dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`_
    """
    output = tf.cast(output > 0.5, dtype=tf.float32)
    target = tf.cast(target > 0.5, dtype=tf.float32)
    inse = tf.reduce_sum( output * target )
    l = tf.reduce_sum( output * output )
    r = tf.reduce_sum( target * target )
    dice = 2 * (inse) / (l + r)
    if epsilon == 0:
        return dice
    else:
        return tf.clip_by_value(dice, 0, 1.0-epsilon) 
Example #20
Source File: cost.py    From deepsleepnet with Apache License 2.0 5 votes vote down vote up
def dice_coe(output, target, epsilon=1e-10):
    """Sørensen–Dice coefficient for comparing the similarity of two distributions,
    usually be used for binary image segmentation i.e. labels are binary.
    The coefficient = [0, 1], 1 if totally match.

    Parameters
    -----------
    output : tensor
        A distribution with shape: [batch_size, ....], (any dimensions).
    target : tensor
        A distribution with shape: [batch_size, ....], (any dimensions).
    epsilon : float
        An optional name to attach to this layer.

    Examples
    ---------
    >>> outputs = tl.act.pixel_wise_softmax(network.outputs)
    >>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_, epsilon=1e-5)

    References
    -----------
    - `wiki-dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`_
    """
    # inse = tf.reduce_sum( tf.mul(output, target) )
    # l = tf.reduce_sum( tf.mul(output, output) )
    # r = tf.reduce_sum( tf.mul(target, target) )
    inse = tf.reduce_sum( output * target )
    l = tf.reduce_sum( output * output )
    r = tf.reduce_sum( target * target )
    dice = 2 * (inse) / (l + r)
    if epsilon == 0:
        return dice
    else:
        return tf.clip_by_value(dice, 0, 1.0-epsilon) 
Example #21
Source File: regularizers.py    From tensornets with MIT License 5 votes vote down vote up
def l1_regularizer(scale, scope=None):
  """Returns a function that can be used to apply L1 regularization to weights.

  L1 regularization encourages sparsity.

  Args:
    scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
    scope: An optional scope name.

  Returns:
    A function with signature `l1(weights)` that apply L1 regularization.

  Raises:
    ValueError: If scale is negative or if scale is not a float.
  """
  if isinstance(scale, numbers.Integral):
    raise ValueError('scale cannot be an integer: %s' % scale)
  if isinstance(scale, numbers.Real):
    if scale < 0.:
      raise ValueError('Setting a scale less than 0 on a regularizer: %g' %
                       scale)
    if scale == 0.:
      logging.info('Scale of 0 disables regularizer.')
      return lambda _: None

  def l1(weights, name=None):
    """Applies L1 regularization to weights."""
    with ops.name_scope(scope, 'l1_regularizer', [weights]) as name:
      my_scale = ops.convert_to_tensor(scale,
                                       dtype=weights.dtype.base_dtype,
                                       name='scale')
      return standard_ops.multiply(
          my_scale,
          standard_ops.reduce_sum(standard_ops.abs(weights)),
          name=name)

  return l1 
Example #22
Source File: regularizers.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def l1_regularizer(scale, scope=None):
  """Returns a function that can be used to apply L1 regularization to weights.

  L1 regularization encourages sparsity.

  Args:
    scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
    scope: An optional scope name.

  Returns:
    A function with signature `l1(weights)` that apply L1 regularization.

  Raises:
    ValueError: If scale is negative or if scale is not a float.
  """
  if isinstance(scale, numbers.Integral):
    raise ValueError('scale cannot be an integer: %s' % scale)
  if isinstance(scale, numbers.Real):
    if scale < 0.:
      raise ValueError('Setting a scale less than 0 on a regularizer: %g' %
                       scale)
    if scale == 0.:
      logging.info('Scale of 0 disables regularizer.')
      return lambda _: None

  def l1(weights, name=None):
    """Applies L1 regularization to weights."""
    with ops.name_scope(scope, 'l1_regularizer', [weights]) as name:
      my_scale = ops.convert_to_tensor(scale,
                                       dtype=weights.dtype.base_dtype,
                                       name='scale')
      return standard_ops.mul(
          my_scale,
          standard_ops.reduce_sum(standard_ops.abs(weights)),
          name=name)

  return l1 
Example #23
Source File: regularizers.py    From lambda-packs with MIT License 5 votes vote down vote up
def l1_regularizer(scale, scope=None):
  """Returns a function that can be used to apply L1 regularization to weights.

  L1 regularization encourages sparsity.

  Args:
    scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
    scope: An optional scope name.

  Returns:
    A function with signature `l1(weights)` that apply L1 regularization.

  Raises:
    ValueError: If scale is negative or if scale is not a float.
  """
  if isinstance(scale, numbers.Integral):
    raise ValueError('scale cannot be an integer: %s' % scale)
  if isinstance(scale, numbers.Real):
    if scale < 0.:
      raise ValueError('Setting a scale less than 0 on a regularizer: %g' %
                       scale)
    if scale == 0.:
      logging.info('Scale of 0 disables regularizer.')
      return lambda _: None

  def l1(weights, name=None):
    """Applies L1 regularization to weights."""
    with ops.name_scope(scope, 'l1_regularizer', [weights]) as name:
      my_scale = ops.convert_to_tensor(scale,
                                       dtype=weights.dtype.base_dtype,
                                       name='scale')
      return standard_ops.multiply(
          my_scale,
          standard_ops.reduce_sum(standard_ops.abs(weights)),
          name=name)

  return l1 
Example #24
Source File: regularizers.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def l1_regularizer(scale, scope=None):
  """Returns a function that can be used to apply L1 regularization to weights.

  L1 regularization encourages sparsity.

  Args:
    scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
    scope: An optional scope name.

  Returns:
    A function with signature `l1(weights)` that apply L1 regularization.

  Raises:
    ValueError: If scale is negative or if scale is not a float.
  """
  if isinstance(scale, numbers.Integral):
    raise ValueError('scale cannot be an integer: %s' % scale)
  if isinstance(scale, numbers.Real):
    if scale < 0.:
      raise ValueError('Setting a scale less than 0 on a regularizer: %g' %
                       scale)
    if scale == 0.:
      logging.info('Scale of 0 disables regularizer.')
      return lambda _: None

  def l1(weights, name=None):
    """Applies L1 regularization to weights."""
    with ops.name_scope(scope, 'l1_regularizer', [weights]) as name:
      my_scale = ops.convert_to_tensor(scale,
                                       dtype=weights.dtype.base_dtype,
                                       name='scale')
      return standard_ops.multiply(
          my_scale,
          standard_ops.reduce_sum(standard_ops.abs(weights)),
          name=name)

  return l1 
Example #25
Source File: cost.py    From LapSRN-tensorflow with Apache License 2.0 5 votes vote down vote up
def binary_cross_entropy(output, target, epsilon=1e-8, name='bce_loss'):
    """Computes binary cross entropy given `output`.

    For brevity, let `x = output`, `z = target`.  The binary cross entropy loss is

        loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))

    Parameters
    ----------
    output : tensor of type `float32` or `float64`.
    target : tensor of the same type and shape as `output`.
    epsilon : float
        A small value to avoid output is zero.
    name : string
        An optional name to attach to this layer.

    References
    -----------
    - `DRAW <https://github.com/ericjang/draw/blob/master/draw.py#L73>`_
    """
#     from tensorflow.python.framework import ops
#     with ops.op_scope([output, target], name, "bce_loss") as name:
#         output = ops.convert_to_tensor(output, name="preds")
#         target = ops.convert_to_tensor(targets, name="target")
    with tf.name_scope(name):
        return tf.reduce_mean(tf.reduce_sum(-(target * tf.log(output + epsilon) +
                              (1. - target) * tf.log(1. - output + epsilon)), axis=1)) 
Example #26
Source File: cost.py    From LapSRN-tensorflow with Apache License 2.0 5 votes vote down vote up
def dice_coe(output, target, epsilon=1e-10):
    """Sørensen–Dice coefficient for comparing the similarity of two distributions,
    usually be used for binary image segmentation i.e. labels are binary.
    The coefficient = [0, 1], 1 if totally match.

    Parameters
    -----------
    output : tensor
        A distribution with shape: [batch_size, ....], (any dimensions).
    target : tensor
        A distribution with shape: [batch_size, ....], (any dimensions).
    epsilon : float
        An optional name to attach to this layer.

    Examples
    ---------
    >>> outputs = tl.act.pixel_wise_softmax(network.outputs)
    >>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_, epsilon=1e-5)

    References
    -----------
    - `wiki-dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`_
    """
    # inse = tf.reduce_sum( tf.mul(output, target) )
    # l = tf.reduce_sum( tf.mul(output, output) )
    # r = tf.reduce_sum( tf.mul(target, target) )
    inse = tf.reduce_sum( output * target )
    l = tf.reduce_sum( output * output )
    r = tf.reduce_sum( target * target )
    dice = 2 * (inse) / (l + r)
    if epsilon == 0:
        return dice
    else:
        return tf.clip_by_value(dice, 0, 1.0-epsilon) 
Example #27
Source File: cost.py    From LapSRN-tensorflow with Apache License 2.0 5 votes vote down vote up
def dice_hard_coe(output, target, epsilon=1e-10):
    """Non-differentiable Sørensen–Dice coefficient for comparing the similarity of two distributions,
    usually be used for binary image segmentation i.e. labels are binary.
    The coefficient = [0, 1], 1 if totally match.

    Parameters
    -----------
    output : tensor
        A distribution with shape: [batch_size, ....], (any dimensions).
    target : tensor
        A distribution with shape: [batch_size, ....], (any dimensions).
    epsilon : float
        An optional name to attach to this layer.

    Examples
    ---------
    >>> outputs = pixel_wise_softmax(network.outputs)
    >>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5)

    References
    -----------
    - `wiki-dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`_
    """
    output = tf.cast(output > 0.5, dtype=tf.float32)
    target = tf.cast(target > 0.5, dtype=tf.float32)
    inse = tf.reduce_sum( output * target )
    l = tf.reduce_sum( output * output )
    r = tf.reduce_sum( target * target )
    dice = 2 * (inse) / (l + r)
    if epsilon == 0:
        return dice
    else:
        return tf.clip_by_value(dice, 0, 1.0-epsilon) 
Example #28
Source File: cost.py    From LapSRN-tensorflow with Apache License 2.0 5 votes vote down vote up
def cross_entropy_seq(logits, target_seqs, batch_size=None):#, batch_size=1, num_steps=None):
    """Returns the expression of cross-entropy of two sequences, implement
    softmax internally. Normally be used for Fixed Length RNN outputs.

    Parameters
    ----------
    logits : Tensorflow variable
        2D tensor, ``network.outputs``, [batch_size*n_steps (n_examples), number of output units]
    target_seqs : Tensorflow variable
        target : 2D tensor [batch_size, n_steps], if the number of step is dynamic, please use ``cross_entropy_seq_with_mask`` instead.
    batch_size : None or int.
        If not None, the return cost will be divided by batch_size.

    Examples
    --------
    >>> see PTB tutorial for more details
    >>> input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
    >>> targets = tf.placeholder(tf.int32, [batch_size, num_steps])
    >>> cost = tl.cost.cross_entropy_seq(network.outputs, targets)
    """
    try: # TF 1.0
        sequence_loss_by_example_fn = tf.contrib.legacy_seq2seq.sequence_loss_by_example
    except:
        sequence_loss_by_example_fn = tf.nn.seq2seq.sequence_loss_by_example

    loss = sequence_loss_by_example_fn(
        [logits],
        [tf.reshape(target_seqs, [-1])],
        [tf.ones_like(tf.reshape(target_seqs, [-1]), dtype=tf.float32)])
        # [tf.ones([batch_size * num_steps])])
    cost = tf.reduce_sum(loss) #/ batch_size
    if batch_size is not None:
        cost = cost / batch_size
    return cost 
Example #29
Source File: cost.py    From LapSRN-tensorflow with Apache License 2.0 5 votes vote down vote up
def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details=False, name=None):
    """Returns the expression of cross-entropy of two sequences, implement
    softmax internally. Normally be used for Dynamic RNN outputs.

    Parameters
    -----------
    logits : network identity outputs
        2D tensor, ``network.outputs``, [batch_size, number of output units].
    target_seqs : int of tensor, like word ID.
        [batch_size, ?]
    input_mask : the mask to compute loss
        The same size with target_seqs, normally 0 and 1.
    return_details : boolean
        - If False (default), only returns the loss.
        - If True, returns the loss, losses, weights and targets (reshape to one vetcor).

    Examples
    --------
    - see Image Captioning Example.
    """
    targets = tf.reshape(target_seqs, [-1])   # to one vector
    weights = tf.to_float(tf.reshape(input_mask, [-1]))   # to one vector like targets
    losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name) * weights
    #losses = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name)) # for TF1.0 and others

    try: ## TF1.0
        loss = tf.divide(tf.reduce_sum(losses),   # loss from mask. reduce_sum before element-wise mul with mask !!
                        tf.reduce_sum(weights),
                        name="seq_loss_with_mask")
    except: ## TF0.12
        loss = tf.div(tf.reduce_sum(losses),   # loss from mask. reduce_sum before element-wise mul with mask !!
                        tf.reduce_sum(weights),
                        name="seq_loss_with_mask")
    if return_details:
        return loss, losses, weights, targets
    else:
        return loss 
Example #30
Source File: regularizers.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def l1_regularizer(scale, scope=None):
  """Returns a function that can be used to apply L1 regularization to weights.

  L1 regularization encourages sparsity.

  Args:
    scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
    scope: An optional scope name.

  Returns:
    A function with signature `l1(weights)` that apply L1 regularization.

  Raises:
    ValueError: If scale is negative or if scale is not a float.
  """
  if isinstance(scale, numbers.Integral):
    raise ValueError('scale cannot be an integer: %s' % scale)
  if isinstance(scale, numbers.Real):
    if scale < 0.:
      raise ValueError('Setting a scale less than 0 on a regularizer: %g' %
                       scale)
    if scale == 0.:
      logging.info('Scale of 0 disables regularizer.')
      return lambda _: None

  def l1(weights, name=None):
    """Applies L1 regularization to weights."""
    with ops.name_scope(scope, 'l1_regularizer', [weights]) as name:
      my_scale = ops.convert_to_tensor(scale,
                                       dtype=weights.dtype.base_dtype,
                                       name='scale')
      return standard_ops.multiply(
          my_scale,
          standard_ops.reduce_sum(standard_ops.abs(weights)),
          name=name)

  return l1