Python tensorflow.subtract() Examples

The following are 30 code examples of tensorflow.subtract(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: preprocessor.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def _flip_boxes_up_down(boxes):
  """Up-down flip the boxes.

  Args:
    boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
           Boxes are in normalized form meaning their coordinates vary
           between [0, 1].
           Each row is in the form of [ymin, xmin, ymax, xmax].

  Returns:
    Flipped boxes.
  """
  ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
  flipped_ymin = tf.subtract(1.0, ymax)
  flipped_ymax = tf.subtract(1.0, ymin)
  flipped_boxes = tf.concat([flipped_ymin, xmin, flipped_ymax, xmax], 1)
  return flipped_boxes 
Example #2
Source File: lenet_preprocessing.py    From STORK with MIT License 6 votes vote down vote up
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image 
Example #3
Source File: layers.py    From Pixel2MeshPlusPlus with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def bi_linear_sample(self, img_feat, n, x, y):
        x1 = tf.floor(x)
        x2 = tf.ceil(x)
        y1 = tf.floor(y)
        y2 = tf.ceil(y)
        Q11 = tf.gather_nd(img_feat, tf.stack([n, tf.cast(x1, tf.int32), tf.cast(y1, tf.int32)], 1))
        Q12 = tf.gather_nd(img_feat, tf.stack([n, tf.cast(x1, tf.int32), tf.cast(y2, tf.int32)], 1))
        Q21 = tf.gather_nd(img_feat, tf.stack([n, tf.cast(x2, tf.int32), tf.cast(y1, tf.int32)], 1))
        Q22 = tf.gather_nd(img_feat, tf.stack([n, tf.cast(x2, tf.int32), tf.cast(y2, tf.int32)], 1))

        weights = tf.multiply(tf.subtract(x2, x), tf.subtract(y2, y))
        Q11 = tf.multiply(tf.expand_dims(weights, 1), Q11)
        weights = tf.multiply(tf.subtract(x, x1), tf.subtract(y2, y))
        Q21 = tf.multiply(tf.expand_dims(weights, 1), Q21)
        weights = tf.multiply(tf.subtract(x2, x), tf.subtract(y, y1))
        Q12 = tf.multiply(tf.expand_dims(weights, 1), Q12)
        weights = tf.multiply(tf.subtract(x, x1), tf.subtract(y, y1))
        Q22 = tf.multiply(tf.expand_dims(weights, 1), Q22)
        outputs = tf.add_n([Q11, Q21, Q12, Q22])
        return outputs 
Example #4
Source File: tf_io_pipline_tools.py    From lanenet-lane-detection with Apache License 2.0 6 votes vote down vote up
def normalize(gt_image, gt_binary_image, gt_instance_image):
    """
    Normalize the image data by substracting the imagenet mean value
    :param gt_image:
    :param gt_binary_image:
    :param gt_instance_image:
    :return:
    """

    if gt_image.get_shape().as_list()[-1] != 3 \
            or gt_binary_image.get_shape().as_list()[-1] != 1 \
            or gt_instance_image.get_shape().as_list()[-1] != 1:
        log.error(gt_image.get_shape())
        log.error(gt_binary_image.get_shape())
        log.error(gt_instance_image.get_shape())
        raise ValueError('Input must be of size [height, width, C>0]')

    gt_image = tf.cast(gt_image, dtype=tf.float32)
    gt_image = tf.subtract(tf.divide(gt_image, tf.constant(127.5, dtype=tf.float32)),
                           tf.constant(1.0, dtype=tf.float32))

    return gt_image, gt_binary_image, gt_instance_image 
Example #5
Source File: dqn.py    From reinforcement_learning with MIT License 6 votes vote down vote up
def _build_qnet(self):
    """
    Build q-network
    """
    with tf.variable_scope(self.scope):
      self.state_input = tf.placeholder(tf.float32, [None, self.state_size])
      self.action = tf.placeholder(tf.int32, [None])
      self.target_q = tf.placeholder(tf.float32, [None])

      fc1 = tf_utils.fc(self.state_input, n_output=self.n_hidden_1, activation_fn=tf.nn.relu)
      fc2 = tf_utils.fc(fc1, n_output=self.n_hidden_2, activation_fn=tf.nn.relu)
      self.q_values = tf_utils.fc(fc2, self.action_size, activation_fn=None)

      action_mask = tf.one_hot(self.action, self.action_size, 1.0, 0.0)
      q_value_pred = tf.reduce_sum(self.q_values * action_mask, 1)

      self.loss = tf.reduce_mean(tf.square(tf.subtract(self.target_q, q_value_pred)))
      self.optimizer = tf.train.AdamOptimizer(self.lr)
      self.train_op = self.optimizer.minimize(self.loss, global_step=tf.contrib.framework.get_global_step()) 
Example #6
Source File: read_tfrecord.py    From 2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement with MIT License 6 votes vote down vote up
def _extract_features_batch(self, serialized_batch):
        features = tf.parse_example(
            serialized_batch,
            features={'images': tf.FixedLenFeature([], tf.string),
                'imagepaths': tf.FixedLenFeature([], tf.string),
                'labels': tf.VarLenFeature(tf.int64),
                 })

        bs = features['images'].shape[0]
        images = tf.decode_raw(features['images'], tf.uint8)
        w, h = tuple(CFG.ARCH.INPUT_SIZE)
        images = tf.cast(x=images, dtype=tf.float32)
        #images = tf.subtract(tf.divide(images, 128.0), 1.0)
        images = tf.reshape(images, [bs, h, -1, CFG.ARCH.INPUT_CHANNELS])

        labels = features['labels']
        labels = tf.cast(labels, tf.int32)

        imagepaths = features['imagepaths']

        return images, labels, imagepaths 
Example #7
Source File: preprocessor.py    From object_detector_app with MIT License 6 votes vote down vote up
def flip_boxes(boxes):
  """Left-right flip the boxes.

  Args:
    boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
           Boxes are in normalized form meaning their coordinates vary
           between [0, 1].
           Each row is in the form of [ymin, xmin, ymax, xmax].

  Returns:
    Flipped boxes.
  """
  # Flip boxes.
  ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
  flipped_xmin = tf.subtract(1.0, xmax)
  flipped_xmax = tf.subtract(1.0, xmin)
  flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
  return flipped_boxes 
Example #8
Source File: sparse_covariance.py    From tf-example-models with Apache License 2.0 6 votes vote down vote up
def get_value_updater(self, data, new_mean, gamma_weighted, gamma_sum):
        tf_new_differences = tf.subtract(data, tf.expand_dims(new_mean, 0))
        tf_sq_dist_matrix = tf.matmul(tf.expand_dims(tf_new_differences, 2), tf.expand_dims(tf_new_differences, 1))
        tf_new_covariance = tf.reduce_sum(tf_sq_dist_matrix * tf.expand_dims(tf.expand_dims(gamma_weighted, 1), 2), 0)

        if self.has_prior:
            tf_new_covariance = self.get_prior_adjustment(tf_new_covariance, gamma_sum)

        tf_s, tf_u, _ = tf.svd(tf_new_covariance)

        tf_required_eigvals = tf_s[:self.rank]
        tf_required_eigvecs = tf_u[:, :self.rank]

        tf_new_baseline = (tf.trace(tf_new_covariance) - tf.reduce_sum(tf_required_eigvals)) / self.tf_rest
        tf_new_eigvals = tf_required_eigvals - tf_new_baseline
        tf_new_eigvecs = tf.transpose(tf_required_eigvecs)

        return tf.group(
            self.tf_baseline.assign(tf_new_baseline),
            self.tf_eigvals.assign(tf_new_eigvals),
            self.tf_eigvecs.assign(tf_new_eigvecs)
        ) 
Example #9
Source File: DenoisingAutoencoder.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(),
                 dropout_probability = 0.95):
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.transfer = transfer_function
        self.dropout_probability = dropout_probability
        self.keep_prob = tf.placeholder(tf.float32)

        network_weights = self._initialize_weights()
        self.weights = network_weights

        # model
        self.x = tf.placeholder(tf.float32, [None, self.n_input])
        self.hidden = self.transfer(tf.add(tf.matmul(tf.nn.dropout(self.x, self.keep_prob), self.weights['w1']),
                                           self.weights['b1']))
        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

        # cost
        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init) 
Example #10
Source File: DenoisingAutoencoder.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(),
                 scale = 0.1):
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.transfer = transfer_function
        self.scale = tf.placeholder(tf.float32)
        self.training_scale = scale
        network_weights = self._initialize_weights()
        self.weights = network_weights

        # model
        self.x = tf.placeholder(tf.float32, [None, self.n_input])
        self.hidden = self.transfer(tf.add(tf.matmul(self.x + scale * tf.random_normal((n_input,)),
                self.weights['w1']),
                self.weights['b1']))
        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

        # cost
        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init) 
Example #11
Source File: lenet_preprocessing.py    From ctw-baseline with MIT License 6 votes vote down vote up
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image 
Example #12
Source File: preprocessor.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def flip_boxes(boxes):
  """Left-right flip the boxes.

  Args:
    boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
           Boxes are in normalized form meaning their coordinates vary
           between [0, 1].
           Each row is in the form of [ymin, xmin, ymax, xmax].

  Returns:
    Flipped boxes.
  """
  # Flip boxes.
  ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
  flipped_xmin = tf.subtract(1.0, xmax)
  flipped_xmax = tf.subtract(1.0, xmin)
  flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
  return flipped_boxes 
Example #13
Source File: preprocessor.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def _flip_boxes_left_right(boxes):
  """Left-right flip the boxes.

  Args:
    boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
           Boxes are in normalized form meaning their coordinates vary
           between [0, 1].
           Each row is in the form of [ymin, xmin, ymax, xmax].

  Returns:
    Flipped boxes.
  """
  ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
  flipped_xmin = tf.subtract(1.0, xmax)
  flipped_xmax = tf.subtract(1.0, xmin)
  flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
  return flipped_boxes 
Example #14
Source File: lenet_preprocessing.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image 
Example #15
Source File: facenet.py    From TNT with GNU General Public License v3.0 6 votes vote down vote up
def triplet_loss(anchor, positive, negative, alpha):
    """Calculate the triplet loss according to the FaceNet paper
    
    Args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.
  
    Returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
        neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
        
        basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
      
    return loss 
Example #16
Source File: lenet_preprocessing.py    From tf-pose with Apache License 2.0 6 votes vote down vote up
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image 
Example #17
Source File: model.py    From minimal-entropy-correlation-alignment with MIT License 6 votes vote down vote up
def log_coral_loss(self, h_src, h_trg, gamma=1e-3):
	# regularized covariances result in inf or nan
	# First: subtract the mean from the data matrix
	batch_size = tf.to_float(tf.shape(h_src)[0])
	h_src = h_src - tf.reduce_mean(h_src, axis=0) 
	h_trg = h_trg - tf.reduce_mean(h_trg, axis=0 )
	cov_source = (1./(batch_size-1)) * tf.matmul( h_src, h_src, transpose_a=True) #+ gamma * tf.eye(self.hidden_repr_size)
	cov_target = (1./(batch_size-1)) * tf.matmul( h_trg, h_trg, transpose_a=True) #+ gamma * tf.eye(self.hidden_repr_size)
	#eigen decomposition
	eig_source  = tf.self_adjoint_eig(cov_source)
	eig_target  = tf.self_adjoint_eig(cov_target)
	log_cov_source = tf.matmul( eig_source[1] ,  tf.matmul(tf.diag( tf.log(eig_source[0]) ), eig_source[1], transpose_b=True) )
	log_cov_target = tf.matmul( eig_target[1] ,  tf.matmul(tf.diag( tf.log(eig_target[0]) ), eig_target[1], transpose_b=True) )

	# Returns the Frobenius norm
	return tf.reduce_mean(tf.square( tf.subtract(log_cov_source,log_cov_target))) 
	#~ return tf.reduce_mean(tf.reduce_max(eig_target[0]))
	#~ return tf.to_float(tf.equal(tf.count_nonzero(h_src), tf.count_nonzero(h_src))) 
Example #18
Source File: preprocessor.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def _rot90_boxes(boxes):
  """Rotate boxes counter-clockwise by 90 degrees.

  Args:
    boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
           Boxes are in normalized form meaning their coordinates vary
           between [0, 1].
           Each row is in the form of [ymin, xmin, ymax, xmax].

  Returns:
    Rotated boxes.
  """
  ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
  rotated_ymin = tf.subtract(1.0, xmax)
  rotated_ymax = tf.subtract(1.0, xmin)
  rotated_xmin = ymin
  rotated_xmax = ymax
  rotated_boxes = tf.concat(
      [rotated_ymin, rotated_xmin, rotated_ymax, rotated_xmax], 1)
  return rotated_boxes 
Example #19
Source File: utils.py    From Tensorflow-Cookbook with MIT License 6 votes vote down vote up
def orthogonal_regularizer_fully(scale) :
    """ Defining the Orthogonal regularizer and return the function at last to be used in Fully Connected Layer """

    def ortho_reg_fully(w) :
        """ Reshaping the matrix in to 2D tensor for enforcing orthogonality"""
        _, c = w.get_shape().as_list()

        """Declaring a Identity Tensor of appropriate size"""
        identity = tf.eye(c)
        w_transpose = tf.transpose(w)
        w_mul = tf.matmul(w_transpose, w)
        reg = tf.subtract(w_mul, identity)

        """ Calculating the Loss """
        ortho_loss = tf.nn.l2_loss(reg)

        return scale * ortho_loss

    return ortho_reg_fully 
Example #20
Source File: utils.py    From Tensorflow-Cookbook with MIT License 6 votes vote down vote up
def orthogonal_regularizer(scale) :
    """ Defining the Orthogonal regularizer and return the function at last to be used in Conv layer as kernel regularizer"""

    def ortho_reg(w) :
        """ Reshaping the matrxi in to 2D tensor for enforcing orthogonality"""
        _, _, _, c = w.get_shape().as_list()

        w = tf.reshape(w, [-1, c])

        """ Declaring a Identity Tensor of appropriate size"""
        identity = tf.eye(c)

        """ Regularizer Wt*W - I """
        w_transpose = tf.transpose(w)
        w_mul = tf.matmul(w_transpose, w)
        reg = tf.subtract(w_mul, identity)

        """Calculating the Loss Obtained"""
        ortho_loss = tf.nn.l2_loss(reg)

        return scale * ortho_loss

    return ortho_reg 
Example #21
Source File: main_model_engine.py    From TripletLossFace with MIT License 5 votes vote down vote up
def triplet_loss(self, y_real, output):
		output = tf.nn.l2_normalize(output, 1, 1e-10)
		anchor, positive, negative = tf.unstack(tf.reshape(output, (-1, 3, self.n_features)), num=3, axis=1)

		positive_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
		negative_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)

		loss_1 = tf.add(tf.subtract(positive_dist, negative_dist), 0.2)
		loss = tf.reduce_mean(tf.maximum(loss_1, 0.0), 0)

		return loss 
Example #22
Source File: inception_preprocessing.py    From lambda-deep-learning-demo with Apache License 2.0 5 votes vote down vote up
def preprocess_for_eval(image, height, width,
                        central_fraction=0.875, scope=None):
  """Prepare one image for evaluation.

  If height and width are specified it would output an image with that size by
  applying resize_bilinear.

  If central_fraction is specified it would crop the central fraction of the
  input image.

  Args:
    image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
      [0, 1], otherwise it would converted to tf.float32 assuming that the range
      is [0, MAX], where MAX is largest positive representable number for
      int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
    height: integer
    width: integer
    central_fraction: Optional Float, fraction of the image to crop.
    scope: Optional scope for name_scope.
  Returns:
    3-D float Tensor of prepared image.
  """
  with tf.name_scope(scope, 'eval_image', [image, height, width]):
    if image.dtype != tf.float32:
      image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    # Crop the central region of the image with an area containing 87.5% of
    # the original image.
    if central_fraction:
      image = tf.image.central_crop(image, central_fraction=central_fraction)

    if height and width:
      # Resize the image to the specified height and width.
      image = tf.expand_dims(image, 0)
      image = tf.image.resize_bilinear(image, [height, width],
                                       align_corners=False)
      image = tf.squeeze(image, [0])
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image 
Example #23
Source File: measure_modules.py    From ludwig with Apache License 2.0 5 votes vote down vote up
def squared_error(targets, predictions, output_feature_name):
    # error = tf.compat.v1.get_variable('error_{}'.format(output_feature_name), initializer=tf.subtract(targets, predictions))
    error = tf.subtract(targets, predictions)
    return tf.pow(error, 2, name='squared_error_{}'.format(output_feature_name)) 
Example #24
Source File: measure_modules.py    From ludwig with Apache License 2.0 5 votes vote down vote up
def error(targets, predictions, output_feature_name):
    # return tf.compat.v1.get_variable('error_{}'.format(output_feature_name), initializer=tf.subtract(targets, predictions))
    return tf.subtract(targets, predictions,
                       name='error_{}'.format(output_feature_name)) 
Example #25
Source File: utils.py    From zhusuan with MIT License 5 votes vote down vote up
def __sub__(self, other):
        return tf.subtract(self, other) 
Example #26
Source File: utils.py    From zhusuan with MIT License 5 votes vote down vote up
def __rsub__(self, other):
        return tf.subtract(other, self) 
Example #27
Source File: model.py    From minimal-entropy-correlation-alignment with MIT License 5 votes vote down vote up
def coral_loss(self, h_src, h_trg, gamma=1e-3):
	# regularized covariances (D-Coral is not regularized actually..)
	# First: subtract the mean from the data matrix
	batch_size = tf.to_float(tf.shape(h_src)[0])
	h_src = h_src - tf.reduce_mean(h_src, axis=0) 
	h_trg = h_trg - tf.reduce_mean(h_trg, axis=0 )
	cov_source = (1./(batch_size-1)) * tf.matmul( h_src, h_src, transpose_a=True) #+ gamma * tf.eye(self.hidden_repr_size)
	cov_target = (1./(batch_size-1)) * tf.matmul( h_trg, h_trg, transpose_a=True) #+ gamma * tf.eye(self.hidden_repr_size)
	# Returns the Frobenius norm (there is an extra 1/4 in D-Coral actually)
	# The reduce_mean account for the factor 1/d^2
	return tf.reduce_mean(tf.square( tf.subtract(cov_source,cov_target) )) 
Example #28
Source File: network_base.py    From tf-pose with Apache License 2.0 5 votes vote down vote up
def normalize_nasnet(self, input, name):
        input = tf.divide(input, 255.0, name=name + '_divide')
        input = tf.subtract(input, 0.5, name=name + '_subtract')
        input = tf.multiply(input, 2.0, name=name + '_multiply')
        return input 
Example #29
Source File: measure_modules.py    From ludwig with Apache License 2.0 5 votes vote down vote up
def r2(targets, predictions, output_feature_name):
    y_bar = tf.reduce_mean(targets)
    tot_ss = tf.reduce_sum(tf.pow(targets - y_bar, 2))
    res_ss = tf.reduce_sum(tf.pow(targets - predictions, 2))
    r2 = tf.subtract(1., res_ss / tot_ss,
                     name='r2_{}'.format(output_feature_name))
    return r2 
Example #30
Source File: sarcos.py    From aboleth with Apache License 2.0 5 votes vote down vote up
def r2_metric(labels, predictions):
    SST, update_op1 = tf.metrics.mean_squared_error(
        labels, tf.reduce_mean(labels, axis=0))
    SSE, update_op2 = tf.metrics.mean_squared_error(labels, predictions)
    return tf.subtract(1.0, tf.div(SSE, SST)), tf.group(update_op1, update_op2)