Python tensorflow.sub() Examples

The following are 30 code examples of tensorflow.sub(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: DenoisingAutoencoder.py    From Action_Recognition_Zoo with MIT License 6 votes vote down vote up
def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(),
                 dropout_probability = 0.95):
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.transfer = transfer_function
        self.dropout_probability = dropout_probability
        self.keep_prob = tf.placeholder(tf.float32)

        network_weights = self._initialize_weights()
        self.weights = network_weights

        # model
        self.x = tf.placeholder(tf.float32, [None, self.n_input])
        self.hidden = self.transfer(tf.add(tf.matmul(tf.nn.dropout(self.x, self.keep_prob), self.weights['w1']),
                                           self.weights['b1']))
        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

        # cost
        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.initialize_all_variables()
        self.sess = tf.Session()
        self.sess.run(init) 
Example #2
Source File: DenoisingAutoencoder.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(),
                 dropout_probability = 0.95):
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.transfer = transfer_function
        self.dropout_probability = dropout_probability
        self.keep_prob = tf.placeholder(tf.float32)

        network_weights = self._initialize_weights()
        self.weights = network_weights

        # model
        self.x = tf.placeholder(tf.float32, [None, self.n_input])
        self.hidden = self.transfer(tf.add(tf.matmul(tf.nn.dropout(self.x, self.keep_prob), self.weights['w1']),
                                           self.weights['b1']))
        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

        # cost
        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.initialize_all_variables()
        self.sess = tf.Session()
        self.sess.run(init) 
Example #3
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testFloatBasic(self):
    x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
    y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
    self._compareBoth(x, y, np.add, tf.add, also_compare_variables=True)
    self._compareBoth(x, y, np.subtract, tf.sub)
    self._compareBoth(x, y, np.multiply, tf.mul)
    self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
    self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)
    self._compareBoth(x, y, np.add, _ADD)
    self._compareBoth(x, y, np.subtract, _SUB)
    self._compareBoth(x, y, np.multiply, _MUL)
    self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
    self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
    try:
      from scipy import special  # pylint: disable=g-import-not-at-top
      a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
      x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
      self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)
      self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)
      # Need x > 1
      self._compareBoth(x_pos_small + 1, a_pos_small, special.zeta, tf.zeta)
      n_small = np.arange(0, 15).reshape(1, 3, 5).astype(np.float32)
      self._compareBoth(n_small, x_pos_small, special.polygamma, tf.polygamma)
    except ImportError as e:
      tf.logging.warn("Cannot test special functions: %s" % str(e)) 
Example #4
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testDoubleBasic(self):
    x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
    y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
    self._compareBoth(x, y, np.add, tf.add)
    self._compareBoth(x, y, np.subtract, tf.sub)
    self._compareBoth(x, y, np.multiply, tf.mul)
    self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
    self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)
    self._compareBoth(x, y, np.add, _ADD)
    self._compareBoth(x, y, np.subtract, _SUB)
    self._compareBoth(x, y, np.multiply, _MUL)
    self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
    self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
    try:
      from scipy import special  # pylint: disable=g-import-not-at-top
      a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
      x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
      self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)
      self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)
    except ImportError as e:
      tf.logging.warn("Cannot test special functions: %s" % str(e)) 
Example #5
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testInt32Basic(self):
    x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)
    y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)
    self._compareBoth(x, y, np.add, tf.add)
    self._compareBoth(x, y, np.subtract, tf.sub)
    self._compareBoth(x, y, np.multiply, tf.mul)
    self._compareBoth(x, y, np.true_divide, tf.truediv)
    self._compareBoth(x, y, np.floor_divide, tf.floordiv)
    self._compareBoth(x, y, np.mod, tf.mod)
    self._compareBoth(x, y, np.add, _ADD)
    self._compareBoth(x, y, np.subtract, _SUB)
    self._compareBoth(x, y, np.multiply, _MUL)
    self._compareBoth(x, y, np.true_divide, _TRUEDIV)
    self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
    self._compareBoth(x, y, np.mod, _MOD)
    # _compareBoth tests on GPU only for floating point types, so test
    # _MOD for int32 on GPU by calling _compareGpu
    self._compareGpu(x, y, np.mod, _MOD) 
Example #6
Source File: utilities.py    From learn_prox_ops with GNU General Public License v3.0 6 votes vote down vote up
def tf_mse(a, b, reduction_indices=None, name='mse'):
    """
    Mean squared error for TensorFlow.

    :param a: First input tensor
    :type b: tf.Tensor
    :param a: Second input tensor
    :type b: tf.Tensor
    :param reduction_indices: Dimensions to reduce. If None all dimensions are reduced.
    :type reduction_indices: List or None
    :param name: Variable scope name
    :type reduction_indices: String

    :returns: MSE between a and b
    :rtype: tf.Tensor
    """
    with tf.variable_scope(name):
        return tf.reduce_mean(tf.pow(tf.sub(a, b), 2),
                              reduction_indices=reduction_indices) 
Example #7
Source File: vgsl_input.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _ImageProcessing(image_buffer, shape):
  """Convert a PNG string into an input tensor.

  We allow for fixed and variable sizes.
  Does fixed conversion to floats in the range [-1.28, 1.27].
  Args:
    image_buffer: Tensor containing a PNG encoded image.
    shape:          ImageShape with the desired shape of the input.
  Returns:
    image:        Decoded, normalized image in the range [-1.28, 1.27].
  """
  image = tf.image.decode_png(image_buffer, channels=shape.depth)
  image.set_shape([shape.height, shape.width, shape.depth])
  image = tf.cast(image, tf.float32)
  image = tf.sub(image, 128.0)
  image = tf.mul(image, 1 / 100.0)
  return image 
Example #8
Source File: DenoisingAutoencoder.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(),
                 scale = 0.1):
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.transfer = transfer_function
        self.scale = tf.placeholder(tf.float32)
        self.training_scale = scale
        network_weights = self._initialize_weights()
        self.weights = network_weights

        # model
        self.x = tf.placeholder(tf.float32, [None, self.n_input])
        self.hidden = self.transfer(tf.add(tf.matmul(self.x + scale * tf.random_normal((n_input,)),
                self.weights['w1']),
                self.weights['b1']))
        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

        # cost
        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.initialize_all_variables()
        self.sess = tf.Session()
        self.sess.run(init) 
Example #9
Source File: symbolic_functions.py    From DDRL with Apache License 2.0 6 votes vote down vote up
def class_balanced_binary_class_cross_entropy(pred, label, name='cross_entropy_loss'):
    """
    The class-balanced cross entropy loss for binary classification,
    as in `Holistically-Nested Edge Detection
    <http://arxiv.org/abs/1504.06375>`_.

    :param pred: size: b x ANYTHING. the predictions in [0,1].
    :param label: size: b x ANYTHING. the ground truth in {0,1}.
    :returns: class-balanced binary classification cross entropy loss
    """
    z = batch_flatten(pred)
    y = tf.cast(batch_flatten(label), tf.float32)

    count_neg = tf.reduce_sum(1. - y)
    count_pos = tf.reduce_sum(y)
    beta = count_neg / (count_neg + count_pos)

    eps = 1e-8
    loss_pos = -beta * tf.reduce_mean(y * tf.log(tf.abs(z) + eps), 1)
    loss_neg = (1. - beta) * tf.reduce_mean((1. - y) * tf.log(tf.abs(1. - z) + eps), 1)
    cost = tf.sub(loss_pos, loss_neg)
    cost = tf.reduce_mean(cost, name=name)
    return cost 
Example #10
Source File: LSPModels.py    From deeppose with GNU General Public License v3.0 6 votes vote down vote up
def _activation_summary(x):
    """Helper to create summaries for activations.
    
    Creates a summary that provides a histogram of activations.
    Creates a summary that measure the sparsity of activations.
    
    Args:
      x: Tensor
    Returns:
      nothing
    """
    # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
    # session. This helps the clarity of presentation on tensorboard.
    tensor_name = re.sub('%s_[0-9]*/' % LSPGlobals.TOWER_NAME, '', x.op.name)
    tf.histogram_summary(tensor_name + '/activations', x)
    tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) 
Example #11
Source File: car_models.py    From Cherry-Autonomous-Racecar with MIT License 6 votes vote down vote up
def __init__(self):
        self.x = tf.placeholder(tf.float32, [None, 115, 200, 3])
        self.y_ = tf.placeholder(tf.float32, [None, 1])
        (self.h_conv1, _) = conv_layer(self.x, conv=(5, 5), stride=2, n_filters=24, use_bias=True)
        (self.h_conv2, _) = conv_layer(self.h_conv1, conv=(5, 5), stride=2, n_filters=36, use_bias=True)
        (self.h_conv3, _) = conv_layer(self.h_conv2, conv=(5, 5), stride=2, n_filters=48, use_bias=True)
        (self.h_conv4, _) = conv_layer(self.h_conv3, conv=(3, 3), stride=1, n_filters=64, use_bias=True)
        (self.h_conv5, _) = conv_layer(self.h_conv4, conv=(3, 3), stride=1, n_filters=64, use_bias=True)
        self.h_conv5_flat = flattened(self.h_conv5)
        (self.h_fc1_drop, _, _, self.keep_prob_fc1) = fc_layer(x=self.h_conv5_flat, n_neurons=512, activation=tf.nn.relu, use_bias=True, dropout=True)
        (self.h_fc2_drop, _, _, self.keep_prob_fc2) = fc_layer(self.h_fc1_drop, 100, tf.nn.relu, True, True)
        (self.h_fc3_drop, _, _, self.keep_prob_fc3) = fc_layer(self.h_fc2_drop, 50, tf.nn.relu, True, True)
        (self.h_fc4_drop, _, _, self.keep_prob_fc4) = fc_layer(self.h_fc3_drop, 10, tf.nn.relu, True, True)
        W_fc5 = weight_variable([10, 1])
        b_fc5 = bias_variable([1])
        self.y_out = tf.matmul(self.h_fc4_drop, W_fc5) + b_fc5
        self.loss = tf.reduce_mean(tf.abs(tf.sub(self.y_, self.y_out))) 
Example #12
Source File: car_models.py    From Cherry-Autonomous-Racecar with MIT License 6 votes vote down vote up
def __init__(self):
        self.x = tf.placeholder(tf.float32, [None, 115, 200, 3])
        self.y_ = tf.placeholder(tf.float32, [None, 1])
        (self.h_conv1, _) = conv_layer(self.x, conv=(5, 5), stride=2, n_filters=24, use_bias=True)
        (self.h_conv2, _) = conv_layer(self.h_conv1, conv=(5, 5), stride=2, n_filters=36, use_bias=True)
        (self.h_conv3, _) = conv_layer(self.h_conv2, conv=(5, 5), stride=2, n_filters=48, use_bias=True)
        (self.h_conv4, _) = conv_layer(self.h_conv3, conv=(3, 3), stride=1, n_filters=64, use_bias=True)
        (self.h_conv5, _) = conv_layer(self.h_conv4, conv=(3, 3), stride=1, n_filters=64, use_bias=True)
        self.h_conv5_flat = flattened(self.h_conv5)
        (self.h_fc1_drop, _, _, self.keep_prob_fc1) = fc_layer(x=self.h_conv5_flat, n_neurons=512, activation=tf.nn.relu, use_bias=True, dropout=True)
        (self.h_fc2_drop, _, _, self.keep_prob_fc2) = fc_layer(self.h_fc1_drop, 100, tf.nn.relu, True, True)
        (self.h_fc3_drop, _, _, self.keep_prob_fc3) = fc_layer(self.h_fc2_drop, 50, tf.nn.relu, True, True)
        (self.h_fc4_drop, _, _, self.keep_prob_fc4) = fc_layer(self.h_fc3_drop, 10, tf.nn.relu, True, True)
        W_fc5 = weight_variable([10, 1])
        b_fc5 = bias_variable([1])
        self.y_out = tf.matmul(self.h_fc4_drop, W_fc5) + b_fc5
        self.loss = tf.reduce_mean(tf.abs(tf.sub(self.y_, self.y_out))) 
Example #13
Source File: LSPModels.py    From deeppose with GNU General Public License v3.0 6 votes vote down vote up
def loss(logits, labels):
    """Calculates Mean Pixel Error.
    
    Args:
      logits: Logits from inference().
      labels: Labels from distorted_inputs or inputs(). 1-D tensor
              of shape [batch_size]
    
    Returns:
      Loss tensor of type float.
    """
    
    labelValidity = tf.sign(labels, name='label_validity')
    
    minop = tf.sub(logits, labels, name='Diff_Op')
    
    absop = tf.abs(minop, name='Abs_Op')
    
    lossValues = tf.mul(labelValidity, absop, name='lossValues')
    
    loss_mean = tf.reduce_mean(lossValues, name='MeanPixelError')
    
    tf.add_to_collection('losses', loss_mean)
    
    return tf.add_n(tf.get_collection('losses'), name='total_loss'), loss_mean 
Example #14
Source File: test_computations.py    From ngraph-python with Apache License 2.0 6 votes vote down vote up
def test_binary_ops_combined(self):
        # computation
        a = tf.placeholder(tf.float32, shape=(2, 3))
        b = tf.placeholder(tf.float32, shape=(2, 3))
        c = tf.add(a, b)
        d = tf.mul(c, a)
        e = tf.div(d, b)
        f = tf.sub(a, e)
        g = tf.maximum(a, f)

        # value
        a_val = np.random.rand(*tf_obj_shape(a))
        b_val = np.random.rand(*tf_obj_shape(b))

        # test
        self.run(g, tf_feed_dict={a: a_val, b: b_val}) 
Example #15
Source File: lenet_preprocessing.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.sub(image, 128.0)
  image = tf.div(image, 128.0)
  return image 
Example #16
Source File: control_flow_ops_py_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testCondIndexedSlicesDifferentTypes(self):
    with self.test_session():
      values = tf.constant(10)
      i_32 = tf.convert_to_tensor(0, name="one", dtype=tf.int32)
      i_64 = tf.convert_to_tensor(0, name="one", dtype=tf.int64)
      x = tf.IndexedSlices(values, i_32)
      pred = tf.less(1, 2)
      fn1 = lambda: tf.IndexedSlices(tf.add(x.values, 1), i_32)
      fn2 = lambda: tf.IndexedSlices(tf.sub(x.values, 1), i_64)
      r = tf.cond(pred, fn1, fn2)

      val = r.values.eval()
      ind = r.indices.eval()
    self.assertTrue(check_op_order(x.values.graph))
    self.assertAllEqual(11, val)
    self.assertAllEqual(0, ind)
    self.assertTrue(ind.dtype == np.int64) 
Example #17
Source File: DenoisingAutoencoder.py    From Action_Recognition_Zoo with MIT License 6 votes vote down vote up
def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(),
                 scale = 0.1):
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.transfer = transfer_function
        self.scale = tf.placeholder(tf.float32)
        self.training_scale = scale
        network_weights = self._initialize_weights()
        self.weights = network_weights

        # model
        self.x = tf.placeholder(tf.float32, [None, self.n_input])
        self.hidden = self.transfer(tf.add(tf.matmul(self.x + scale * tf.random_normal((n_input,)),
                self.weights['w1']),
                self.weights['b1']))
        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

        # cost
        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.initialize_all_variables()
        self.sess = tf.Session()
        self.sess.run(init) 
Example #18
Source File: vgsl_input.py    From Action_Recognition_Zoo with MIT License 6 votes vote down vote up
def _ImageProcessing(image_buffer, shape):
  """Convert a PNG string into an input tensor.

  We allow for fixed and variable sizes.
  Does fixed conversion to floats in the range [-1.28, 1.27].
  Args:
    image_buffer: Tensor containing a PNG encoded image.
    shape:          ImageShape with the desired shape of the input.
  Returns:
    image:        Decoded, normalized image in the range [-1.28, 1.27].
  """
  image = tf.image.decode_png(image_buffer, channels=shape.depth)
  image.set_shape([shape.height, shape.width, shape.depth])
  image = tf.cast(image, tf.float32)
  image = tf.sub(image, 128.0)
  image = tf.mul(image, 1 / 100.0)
  return image 
Example #19
Source File: lenet_preprocessing.py    From Action_Recognition_Zoo with MIT License 6 votes vote down vote up
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.sub(image, 128.0)
  image = tf.div(image, 128.0)
  return image 
Example #20
Source File: load_ops.py    From taskonomy with MIT License 6 votes vote down vote up
def rescale_images_tf(input_imgs, dtype, scope=None):
    '''Convert input from [0, 1] -> [-1.0, 1.0] floats.
    Args:
        input_imgs: List of input images (scaled between 0 and 1) with the
                    dimensions specified in the cfg
    Return:
        scaled_input_imgs: List of input images after rescaling to [-1, 1]
    '''
    if scope is None:
        cur_scope = 'rescaled'
    else:
        cur_scope = scope
    with tf.variable_scope(cur_scope):
        input_img = tf.cast(input_img, dtype,
                            name='cast_input')
        input_img = tf.mul(input_img, 2., name='rescale_input')
        scaled_input_imgs = tf.sub(input_img, 1., name='normalize_input')
    return scaled_input_imgs 
Example #21
Source File: control_flow_ops_py_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testWhileCondGrad_UnknownShape(self):
    with self.test_session() as sess:
      v = tf.placeholder(tf.float32)
      n = tf.convert_to_tensor(100.0, name="n")
      one = tf.convert_to_tensor(1.0, name="one")
      c = lambda x: tf.less(x, n)
      # pylint: disable=undefined-variable
      # for OSS build
      b = lambda x: tf.cond(tf.constant(True),
                            lambda: tf.square(x),
                            lambda: tf.sub(x, one))
      # pylint: enable=undefined-variable
      r = tf.while_loop(c, b, [v])
      r = tf.gradients(r, v)[0]
      r = sess.run(r, feed_dict={v: 2.0})
      self.assertAllClose(1024.0, r) 
Example #22
Source File: stack_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _testStackWhileSwap(self, use_gpu):
    with self.test_session(use_gpu=use_gpu):
      n = tf.constant(0)
      h = gen_data_flow_ops._stack(tf.float32, stack_name="foo")

      def c(x):
        return tf.less(x, 10)
      def b(x):
        with tf.control_dependencies([x]):
          a = tf.constant(np.ones(2000), dtype=tf.float32)
          v = gen_data_flow_ops._stack_push(h, a, swap_memory=True)
        with tf.control_dependencies([v]):
          return tf.add(x, 1)
      r = tf.while_loop(c, b, [n])

      v = tf.constant(np.zeros(2000), dtype=tf.float32)
      def c1(x, y):
        return tf.greater(x, 0)
      def b1(x, y):
        nx = tf.sub(x, 1)
        ny = y + gen_data_flow_ops._stack_pop(h, tf.float32)
        return [nx, ny]
      rx, ry = tf.while_loop(c1, b1, [r, v],
                             [r.get_shape(), tensor_shape.unknown_shape()])
      self.assertAllClose(np.ones(2000) * 10.0, ry.eval()) 
Example #23
Source File: nn.py    From dqa-net with Apache License 2.0 6 votes vote down vote up
def softmax_with_base(shape, base_untiled, x, mask=None, name='sig'):
    if mask is not None:
        x += VERY_SMALL_NUMBER * (1.0 - mask)
    base_shape = shape[:-1] + [1]
    for _ in shape:
        base_untiled = tf.expand_dims(base_untiled, -1)
    base = tf.tile(base_untiled, base_shape)

    c_shape = shape[:-1] + [shape[-1] + 1]
    c = tf.concat(len(shape)-1, [base, x])
    c_flat = tf.reshape(c, [reduce(mul, shape[:-1], 1), c_shape[-1]])
    p_flat = tf.nn.softmax(c_flat)
    p_cat = tf.reshape(p_flat, c_shape)
    s_aug = tf.slice(p_cat, [0 for _ in shape], [i for i in shape[:-1]] + [1])
    s = tf.squeeze(s_aug, [len(shape)-1])
    sig = tf.sub(1.0, s, name="sig")
    p = tf.slice(p_cat, [0 for _ in shape[:-1]] + [1], shape)
    return sig, p 
Example #24
Source File: svm.py    From tensorflow_tmva with GNU General Public License v2.0 6 votes vote down vote up
def cost(training, classes, inputs, kernel_type="gaussian", C=1, gamma=1):
    """Returns the kernelised cost to be minimised."""
    beta = tf.Variable(tf.zeros([inputs, 1]))
    offset = tf.Variable(tf.zeros([1]))

    if kernel_type == "linear":
        kernel = linear_kernel(training, inputs, training, inputs)
    elif kernel_type == "gaussian":
        kernel = gaussian_kernel(training, inputs, training, inputs, gamma)

    x = tf.reshape(tf.div(tf.matmul(tf.matmul(
        beta, kernel, transpose_a=True), beta), tf.constant([2.0])), [1])
    y = tf.sub(tf.ones([1]), tf.mul(classes, tf.add(
        tf.matmul(kernel, beta, transpose_a=True), offset)))
    z = tf.mul(tf.reduce_sum(tf.reduce_max(
        tf.concat(1, [y, tf.zeros_like(y)]), reduction_indices=1)),
        tf.constant([C], dtype=tf.float32))
    cost = tf.add(x, z)

    return beta, offset, cost 
Example #25
Source File: clock_model.py    From deep-time-reading with MIT License 6 votes vote down vote up
def _activation_summary(x):
    """Helper to create summaries for activations.

    Creates a summary that provides a histogram of activations.
    Creates a summary that measure the sparsity of activations.

    Args:
      x: Tensor
    Returns:
      nothing
    """
    # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
    # session. This helps the clarity of presentation on tensorboard.
    tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
    tf.histogram_summary(tensor_name + '/activations', x)
    tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) 
Example #26
Source File: symbolic_functions.py    From VDAIC2017 with MIT License 6 votes vote down vote up
def class_balanced_binary_class_cross_entropy(pred, label, name='cross_entropy_loss'):
    """
    The class-balanced cross entropy loss for binary classification,
    as in `Holistically-Nested Edge Detection
    <http://arxiv.org/abs/1504.06375>`_.

    :param pred: size: b x ANYTHING. the predictions in [0,1].
    :param label: size: b x ANYTHING. the ground truth in {0,1}.
    :returns: class-balanced binary classification cross entropy loss
    """
    z = batch_flatten(pred)
    y = tf.cast(batch_flatten(label), tf.float32)

    count_neg = tf.reduce_sum(1. - y)
    count_pos = tf.reduce_sum(y)
    beta = count_neg / (count_neg + count_pos)

    eps = 1e-8
    loss_pos = -beta * tf.reduce_mean(y * tf.log(tf.abs(z) + eps), 1)
    loss_neg = (1. - beta) * tf.reduce_mean((1. - y) * tf.log(tf.abs(1. - z) + eps), 1)
    cost = tf.sub(loss_pos, loss_neg)
    cost = tf.reduce_mean(cost, name=name)
    return cost 
Example #27
Source File: baseline.py    From hyperstar with MIT License 6 votes vote down vote up
def __init__(self, x_size, y_size, w_stddev, **kwargs):
        self.x_size = x_size
        self.y_size = y_size
        self.w_stddev = w_stddev

        self.X = tf.placeholder(tf.float32, shape=[None, self.x_size], name='X')
        self.Y = tf.placeholder(tf.float32, shape=[None, self.y_size], name='Y')
        self.Z = tf.placeholder(tf.float32, shape=[None, self.x_size], name='Z')
        self.W = tf.Variable(tf.random_normal((self.x_size, self.y_size), stddev=self.w_stddev), name='W')

        self.Y_hat = tf.matmul(self.X, self.W)
        self.Y_error = tf.sub(self.Y_hat, self.Y)
        self.Y_norm = self.l2_norm(self.Y_error)

        self.Y_loss = tf.nn.l2_loss(self.Y_norm)

        self.loss = self.Y_loss 
Example #28
Source File: MM2CA.py    From MultimodalDeepLearning with MIT License 6 votes vote down vote up
def calculatCA(_tp1, _tp2, size, _b_size):
        first = True
        tp1 = tf.split(0, _b_size, _tp1)
        tp2 = tf.split(0, _b_size, _tp2)
        for i in range(_b_size):
            input1 = tf.reshape(tp1[i], shape=[size, 1])
            input2 = tf.reshape(tp2[i], shape=[size, 1])

            upper = tf.matmul(tf.transpose(tf.sub(input1, tf.reduce_mean(input1))), tf.sub(input2, tf.reduce_mean(input2)))        
            _tp1 = tf.reduce_sum(tf.mul(tf.sub(input1, tf.reduce_mean(input1)), tf.sub(input1, tf.reduce_mean(input1))))
            _tp2 = tf.reduce_sum(tf.mul(tf.sub(input2, tf.reduce_mean(input2)), tf.sub(input2, tf.reduce_mean(input2))))
            down = tf.sqrt(tf.mul(_tp1, _tp2))
            factor = tf.abs(tf.div(upper, down))
            
            if first:
                output = factor
                first = False
            else:
                output = tf.concat(1, [output, factor])

        return tf.transpose(output)
    
    # Create model 
Example #29
Source File: MM1CA.py    From MultimodalDeepLearning with MIT License 6 votes vote down vote up
def calculatCA(_tp1, _tp2, size, _b_size):
        first = True
        tp1 = tf.split(0, _b_size, _tp1)
        tp2 = tf.split(0, _b_size, _tp2)
        for i in range(_b_size):
            input1 = tf.reshape(tp1[i], shape=[size, 1])
            input2 = tf.reshape(tp2[i], shape=[size, 1])

            upper = tf.matmul(tf.transpose(tf.sub(input1, tf.reduce_mean(input1))), tf.sub(input2, tf.reduce_mean(input2)))        
            _tp1 = tf.reduce_sum(tf.mul(tf.sub(input1, tf.reduce_mean(input1)), tf.sub(input1, tf.reduce_mean(input1))))
            _tp2 = tf.reduce_sum(tf.mul(tf.sub(input2, tf.reduce_mean(input2)), tf.sub(input2, tf.reduce_mean(input2))))
            down = tf.sqrt(tf.mul(_tp1, _tp2))
            factor = tf.abs(tf.div(upper, down))
            
            if first:
                output = factor
                first = False
            else:
                output = tf.concat(1, [output, factor])

        return tf.transpose(output)
    
    # Create model 
Example #30
Source File: MM_RDN_1CA.py    From MultimodalDeepLearning with MIT License 6 votes vote down vote up
def calculatCA(_tp1, _tp2, size, _b_size):
        first = True
        tp1 = tf.split(0, _b_size, _tp1)
        tp2 = tf.split(0, _b_size, _tp2)
        for i in range(_b_size):
            input1 = tf.reshape(tp1[i], shape=[size, 1])
            input2 = tf.reshape(tp2[i], shape=[size, 1])

            upper = tf.matmul(tf.transpose(tf.sub(input1, tf.reduce_mean(input1))), tf.sub(input2, tf.reduce_mean(input2)))        
            _tp1 = tf.reduce_sum(tf.mul(tf.sub(input1, tf.reduce_mean(input1)), tf.sub(input1, tf.reduce_mean(input1))))
            _tp2 = tf.reduce_sum(tf.mul(tf.sub(input2, tf.reduce_mean(input2)), tf.sub(input2, tf.reduce_mean(input2))))
            down = tf.sqrt(tf.mul(_tp1, _tp2))
            factor = tf.abs(tf.div(upper, down))
            
            if first:
                output = factor
                first = False
            else:
                output = tf.concat(1, [output, factor])

        return tf.transpose(output)

    # Create model