Python tensorflow.divide() Examples

The following are 30 code examples of tensorflow.divide(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: predict_spatial.py    From sign-language-gesture-recognition with MIT License 6 votes vote down vote up
def read_tensor_from_image_file(frames, input_height=299, input_width=299, input_mean=0, input_std=255):
    input_name = "file_reader"
    frames = [(tf.read_file(frame, input_name), frame) for frame in frames]
    decoded_frames = []
    for frame in frames:
        file_name = frame[1]
        file_reader = frame[0]
        if file_name.endswith(".png"):
            image_reader = tf.image.decode_png(file_reader, channels=3, name="png_reader")
        elif file_name.endswith(".gif"):
            image_reader = tf.squeeze(tf.image.decode_gif(file_reader, name="gif_reader"))
        elif file_name.endswith(".bmp"):
            image_reader = tf.image.decode_bmp(file_reader, name="bmp_reader")
        else:
            image_reader = tf.image.decode_jpeg(file_reader, channels=3, name="jpeg_reader")
        decoded_frames.append(image_reader)
    float_caster = [tf.cast(image_reader, tf.float32) for image_reader in decoded_frames]
    float_caster = tf.stack(float_caster)
    resized = tf.image.resize_bilinear(float_caster, [input_height, input_width])
    normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
    sess = tf.Session()
    result = sess.run(normalized)
    return result 
Example #2
Source File: losses.py    From cartoonify with MIT License 6 votes vote down vote up
def _compute_loss(self, prediction_tensor, target_tensor, weights):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing the predicted logits for each class
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing one-hot encoded classification targets
      weights: a float tensor of shape [batch_size, num_anchors]

    Returns:
      loss: a (scalar) tensor representing the value of the loss function
    """
    num_classes = prediction_tensor.get_shape().as_list()[-1]
    prediction_tensor = tf.divide(
        prediction_tensor, self._logit_scale, name='scale_logit')
    per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits(
        labels=tf.reshape(target_tensor, [-1, num_classes]),
        logits=tf.reshape(prediction_tensor, [-1, num_classes])))
    if self._anchorwise_output:
      return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights
    return tf.reduce_sum(per_row_cross_ent * tf.reshape(weights, [-1])) 
Example #3
Source File: losses.py    From Person-Detection-and-Tracking with MIT License 6 votes vote down vote up
def _compute_loss(self, prediction_tensor, target_tensor, weights):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing the predicted logits for each class
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing logit classification targets
      weights: a float tensor of shape [batch_size, num_anchors]

    Returns:
      loss: a float tensor of shape [batch_size, num_anchors]
        representing the value of the loss function.
    """
    num_classes = prediction_tensor.get_shape().as_list()[-1]
    target_tensor = self._scale_and_softmax_logits(target_tensor)
    prediction_tensor = tf.divide(prediction_tensor, self._logit_scale,
                                  name='scale_logits')

    per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits(
        labels=tf.reshape(target_tensor, [-1, num_classes]),
        logits=tf.reshape(prediction_tensor, [-1, num_classes])))
    return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights 
Example #4
Source File: losses.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def _compute_loss(self, prediction_tensor, target_tensor, weights):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing the predicted logits for each class
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing one-hot encoded classification targets
      weights: a float tensor of shape, either [batch_size, num_anchors,
        num_classes] or [batch_size, num_anchors, 1]. If the shape is
        [batch_size, num_anchors, 1], all the classses are equally weighted.

    Returns:
      loss: a float tensor of shape [batch_size, num_anchors]
        representing the value of the loss function.
    """
    weights = tf.reduce_mean(weights, axis=2)
    num_classes = prediction_tensor.get_shape().as_list()[-1]
    prediction_tensor = tf.divide(
        prediction_tensor, self._logit_scale, name='scale_logit')
    per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits(
        labels=tf.reshape(target_tensor, [-1, num_classes]),
        logits=tf.reshape(prediction_tensor, [-1, num_classes])))
    return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights 
Example #5
Source File: preprocessor.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def convert_class_logits_to_softmax(multiclass_scores, temperature=1.0):
  """Converts multiclass logits to softmax scores after applying temperature.

  Args:
    multiclass_scores: float32 tensor of shape
      [num_instances, num_classes] representing the score for each box for each
      class.
    temperature: Scale factor to use prior to applying softmax. Larger
      temperatures give more uniform distruibutions after softmax.

  Returns:
    multiclass_scores: float32 tensor of shape
      [num_instances, num_classes] with scaling and softmax applied.
  """

  # Multiclass scores must be stored as logits. Apply temp and softmax.
  multiclass_scores_scaled = tf.divide(
      multiclass_scores, temperature, name='scale_logits')
  multiclass_scores = tf.nn.softmax(multiclass_scores_scaled, name='softmax')

  return multiclass_scores 
Example #6
Source File: ipcr_model.py    From pointnet-registration-framework with MIT License 6 votes vote down vote up
def get_loss(predicted_transformation, batch_size, template_pointclouds_pl, source_pointclouds_pl):
	with tf.variable_scope('loss') as LossEvaluation:
		predicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])
		predicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])

		# with tf.variable_scope('quat_normalization') as norm:
		norm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)
		norm_predicted_quat = tf.sqrt(norm_predicted_quat)
		norm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))
		const = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)
		norm_predicted_quat = tf.add(norm_predicted_quat,const)
		predicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)

		transformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat,predicted_position)

		#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)
		loss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)
	return loss 
Example #7
Source File: losses.py    From Person-Detection-and-Tracking with MIT License 6 votes vote down vote up
def _compute_loss(self, prediction_tensor, target_tensor, weights):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing the predicted logits for each class
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing one-hot encoded classification targets
      weights: a float tensor of shape [batch_size, num_anchors]

    Returns:
      loss: a float tensor of shape [batch_size, num_anchors]
        representing the value of the loss function.
    """
    num_classes = prediction_tensor.get_shape().as_list()[-1]
    prediction_tensor = tf.divide(
        prediction_tensor, self._logit_scale, name='scale_logit')
    per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits(
        labels=tf.reshape(target_tensor, [-1, num_classes]),
        logits=tf.reshape(prediction_tensor, [-1, num_classes])))
    return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights 
Example #8
Source File: pcr_model.py    From pointnet-registration-framework with MIT License 6 votes vote down vote up
def get_loss_b(self,predicted_transformation,batch_size,template_pointclouds_pl,source_pointclouds_pl):	
		with tf.variable_scope('loss') as LossEvaluation:
			predicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])
			predicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])

			# with tf.variable_scope('quat_normalization') as norm:
			norm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)
			norm_predicted_quat = tf.sqrt(norm_predicted_quat)
			norm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))
			const = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)
			norm_predicted_quat = tf.add(norm_predicted_quat,const)
			predicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)
	
			transformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat, predicted_position)

			# Use 1024 Points to find loss.
			#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)
			loss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)
			# loss = 0
		return loss 
Example #9
Source File: evaluate_model_utils.py    From lanenet-lane-detection with Apache License 2.0 6 votes vote down vote up
def calculate_model_precision(input_tensor, label_tensor):
    """
    calculate accuracy acc = correct_nums / ground_truth_nums
    :param input_tensor: binary segmentation logits
    :param label_tensor: binary segmentation label
    :return:
    """

    logits = tf.nn.softmax(logits=input_tensor)
    final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1)

    idx = tf.where(tf.equal(final_output, 1))
    pix_cls_ret = tf.gather_nd(label_tensor, idx)
    accuracy = tf.count_nonzero(pix_cls_ret)
    accuracy = tf.divide(
        accuracy,
        tf.cast(tf.shape(tf.gather_nd(label_tensor, tf.where(tf.equal(label_tensor, 1))))[0], tf.int64))

    return accuracy 
Example #10
Source File: evaluate_model_utils.py    From lanenet-lane-detection with Apache License 2.0 6 votes vote down vote up
def calculate_model_fp(input_tensor, label_tensor):
    """
    calculate fp figure
    :param input_tensor:
    :param label_tensor:
    :return:
    """
    logits = tf.nn.softmax(logits=input_tensor)
    final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1)

    idx = tf.where(tf.equal(final_output, 1))
    pix_cls_ret = tf.gather_nd(final_output, idx)
    false_pred = tf.cast(tf.shape(pix_cls_ret)[0], tf.int64) - tf.count_nonzero(
        tf.gather_nd(label_tensor, idx)
    )

    return tf.divide(false_pred, tf.cast(tf.shape(pix_cls_ret)[0], tf.int64)) 
Example #11
Source File: evaluate_model_utils.py    From lanenet-lane-detection with Apache License 2.0 6 votes vote down vote up
def calculate_model_fn(input_tensor, label_tensor):
    """
    calculate fn figure
    :param input_tensor:
    :param label_tensor:
    :return:
    """
    logits = tf.nn.softmax(logits=input_tensor)
    final_output = tf.expand_dims(tf.argmax(logits, axis=-1), axis=-1)

    idx = tf.where(tf.equal(label_tensor, 1))
    pix_cls_ret = tf.gather_nd(final_output, idx)
    label_cls_ret = tf.gather_nd(label_tensor, tf.where(tf.equal(label_tensor, 1)))
    mis_pred = tf.cast(tf.shape(label_cls_ret)[0], tf.int64) - tf.count_nonzero(pix_cls_ret)

    return tf.divide(mis_pred, tf.cast(tf.shape(label_cls_ret)[0], tf.int64)) 
Example #12
Source File: quantize_linear.py    From onnx-tensorflow with Apache License 2.0 6 votes vote down vote up
def version_10(cls, node, **kwargs):
    tensor_dict = kwargs["tensor_dict"]
    x = tensor_dict[node.inputs[0]]
    y_scale = tensor_dict[node.inputs[1]]

    x = tf.cast(x, tf.float32)
    y = tf.divide(x, y_scale)
    y = tf.round(y)
    if len(node.inputs) == 3:
      y_zero_point = tensor_dict[node.inputs[2]]
      y_dtype = y_zero_point.dtype
      y_zero_point = tf.cast(y_zero_point, tf.float32)
      y = tf.add(y, y_zero_point)
    else:  # y_zero_point default dtype = uint8
      y_dtype = tf.uint8

    y = tf.saturate_cast(y, y_dtype)

    return [y] 
Example #13
Source File: math_ops.py    From CapsLayer with Apache License 2.0 6 votes vote down vote up
def divide(x, y, safe_mode=True, epsilon=None, name=None):
    """ A wrapper of `tf.divide`, computes Python style division of x by y but extends safe divide support.
        If safe_mode is `True` or epsilon is given(a small float number), the absolute value of denominator
        in the division will be clip to make sure it's bigger than epsilon(default is 1e-13).

    Args:
        safe_mode: Use safe divide mode.
        epsilon: Float number. Default is `1e-13`.
    """
    if not safe_mode and epsilon is None:
        return tf.divide(x, y, name=name)
    else:
        epsilon = 1e-20 if epsilon is None else epsilon
        name = "safe_divide" if name is None else name
        with tf.name_scope(name):
            y = tf.where(tf.greater(tf.abs(y), epsilon), y, y + tf.sign(y) * epsilon)
            return tf.divide(x, y) 
Example #14
Source File: losses.py    From ros_people_object_detection_tensorflow with Apache License 2.0 6 votes vote down vote up
def _compute_loss(self, prediction_tensor, target_tensor, weights):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing the predicted logits for each class
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing one-hot encoded classification targets
      weights: a float tensor of shape [batch_size, num_anchors]

    Returns:
      loss: a float tensor of shape [batch_size, num_anchors]
        representing the value of the loss function.
    """
    num_classes = prediction_tensor.get_shape().as_list()[-1]
    prediction_tensor = tf.divide(
        prediction_tensor, self._logit_scale, name='scale_logit')
    per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits(
        labels=tf.reshape(target_tensor, [-1, num_classes]),
        logits=tf.reshape(prediction_tensor, [-1, num_classes])))
    return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights 
Example #15
Source File: categorical_cross_entropy_loss.py    From rlgraph with Apache License 2.0 6 votes vote down vote up
def __init__(self, sparse=True, with_kl_regularizer=True, average_time_steps=False, scope="cross-entropy-loss",
                 **kwargs):
        """
        Args:
            sparse (bool): Whether we have sparse labels. Sparse labels can only assign one category to each
                sample, so labels are ints. If False, labels are already softmaxed categorical distribution probs
                OR simple logits.

            average_time_steps (bool): Whether, if a time rank is given, to divide by th esequence lengths to get
                the mean or not (leave as sum).
        """
        super(CategoricalCrossEntropyLoss, self).__init__(scope=scope, **kwargs)

        self.sparse = sparse
        self.with_kl_regularizer = with_kl_regularizer
        self.average_time_steps = average_time_steps
        #self.reduce_ranks = None

        #self.time_rank = None
        #self.time_major = None

        #self.is_bool = None 
Example #16
Source File: metrics.py    From basenji with Apache License 2.0 6 votes vote down vote up
def result(self):
    true_mean = tf.divide(self._true_sum, self._count)
    true_mean2 = tf.math.square(true_mean)
    pred_mean = tf.divide(self._pred_sum, self._count)
    pred_mean2 = tf.math.square(pred_mean)

    term1 = self._product
    term2 = -tf.multiply(true_mean, self._pred_sum)
    term3 = -tf.multiply(pred_mean, self._true_sum)
    term4 = tf.multiply(self._count, tf.multiply(true_mean, pred_mean))
    covariance = term1 + term2 + term3 + term4

    true_var = self._true_sumsq - tf.multiply(self._count, true_mean2)
    pred_var = self._pred_sumsq - tf.multiply(self._count, pred_mean2)
    tp_var = tf.multiply(tf.math.sqrt(true_var), tf.math.sqrt(pred_var))
    correlation = tf.divide(covariance, tp_var)

    if self._summarize:
        return tf.reduce_mean(correlation)
    else:
        return correlation 
Example #17
Source File: metrics.py    From basenji with Apache License 2.0 6 votes vote down vote up
def result(self):
    true_mean = tf.divide(self._true_sum, self._count)
    true_mean2 = tf.math.square(true_mean)

    total = self._true_sumsq - tf.multiply(self._count, true_mean2)

    resid1 = self._pred_sumsq
    resid2 = -2*self._product
    resid3 = self._true_sumsq
    resid = resid1 + resid2 + resid3

    r2 = tf.ones_like(self._shape, dtype=tf.float32) - tf.divide(resid, total)

    if self._summarize:
        return tf.reduce_mean(r2)
    else:
        return r2 
Example #18
Source File: neg_log_likelihood_loss.py    From rlgraph with Apache License 2.0 6 votes vote down vote up
def __init__(self, distribution_spec, average_time_steps=False, scope="negative-log-likelihood-loss", **kwargs):
        """
        Args:
            average_time_steps (bool): Whether, if a time rank is given, to divide by th esequence lengths to get
                the mean or not (leave as sum).
        """
        super(NegativeLogLikelihoodLoss, self).__init__(scope=scope, **kwargs)

        self.distribution = Distribution.from_spec(distribution_spec)
        self.average_time_steps = average_time_steps

        self.add_components(self.distribution)

        #self.reduce_ranks = None

        self.time_rank = None
        self.time_major = None 
Example #19
Source File: losses.py    From Person-Detection-and-Tracking with MIT License 5 votes vote down vote up
def _scale_and_softmax_logits(self, logits):
    """Scale logits then apply softmax."""
    scaled_logits = tf.divide(logits, self._logit_scale, name='scale_logits')
    return tf.nn.softmax(scaled_logits, name='convert_scores') 
Example #20
Source File: common_layers.py    From BERT with Apache License 2.0 5 votes vote down vote up
def apply_spectral_norm(x):
  """Normalizes x using the spectral norm.

  The implementation follows Algorithm 1 of
  https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
  reshaped such that the number of channels (last-dimension) is the same.

  Args:
    x: Tensor with the last dimension equal to the number of filters.

  Returns:
    x: Tensor with the same shape as x normalized by the spectral norm.
    assign_op: Op to be run after every step to update the vector "u".
  """
  weights_shape = shape_list(x)
  other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1]

  # Reshape into a 2-D matrix with outer size num_filters.
  weights_2d = tf.reshape(x, (other, num_filters))

  # v = Wu / ||W u||
  with tf.variable_scope("u", reuse=tf.AUTO_REUSE):
    u = tf.get_variable(
        "u", [num_filters, 1],
        initializer=tf.truncated_normal_initializer(),
        trainable=False)
  v = tf.nn.l2_normalize(tf.matmul(weights_2d, u))

  # u_new = vW / ||v W||
  u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d))

  # s = v*W*u
  spectral_norm = tf.squeeze(
      tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new))))

  # set u equal to u_new in the next iteration.
  assign_op = tf.assign(u, tf.transpose(u_new))
  return tf.divide(x, spectral_norm), assign_op 
Example #21
Source File: mesh_renderer.py    From tf_mesh_renderer with Apache License 2.0 5 votes vote down vote up
def tone_mapper(image, gamma):
  """Applies gamma correction to the input image.

  Tone maps the input image batch in order to make scenes with a high dynamic
  range viewable. The gamma correction factor is computed separately per image,
  but is shared between all provided channels. The exact function computed is:

  image_out = A*image_in^gamma, where A is an image-wide constant computed so
  that the maximum image value is approximately 1. The correction is applied
  to all channels.

  Args:
    image: 4-D float32 tensor with shape [batch_size, image_height,
        image_width, channel_count]. The batch of images to tone map.
    gamma: 0-D float32 nonnegative tensor. Values of gamma below one compress
        relative contrast in the image, and values above one increase it. A
        value of 1 is equivalent to scaling the image to have a maximum value
        of 1.
  Returns:
    4-D float32 tensor with shape [batch_size, image_height, image_width,
    channel_count]. Contains the gamma-corrected images, clipped to the range
    [0, 1].
  """
  batch_size = image.shape[0].value
  corrected_image = tf.pow(image, gamma)
  image_max = tf.reduce_max(
      tf.reshape(corrected_image, [batch_size, -1]), axis=1)
  scaled_image = tf.divide(corrected_image,
                           tf.reshape(image_max, [batch_size, 1, 1, 1]))
  return tf.clip_by_value(scaled_image, 0.0, 1.0) 
Example #22
Source File: model.py    From PanopticSegmentation with MIT License 5 votes vote down vote up
def norm_boxes_graph(boxes, shape):
    """Converts boxes from pixel coordinates to normalized coordinates.
    boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
    shape: [..., (height, width)] in pixels

    Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
    coordinates it's inside the box.

    Returns:
        [..., (y1, x1, y2, x2)] in normalized coordinates
    """
    h, w = tf.split(tf.cast(shape, tf.float32), 2)
    scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
    shift = tf.constant([0., 0., 1., 1.])
    return tf.divide(boxes - shift, scale) 
Example #23
Source File: ops.py    From glas with Apache License 2.0 5 votes vote down vote up
def _hellinger_normal(dist_a, dist_b, name=None):
    """ Compute the Hellinger distance between two normal distributions """
    with tf.name_scope(name, 'hellinger_normal', [dist_a.mu, dist_b.mu]):
        # Add an epsilon to avoid divide by zero
        denominator = tf.maximum(tf.square(dist_a.sigma) + tf.square(dist_b.sigma), 1e-8)

        mu_op = -tf.divide(tf.square(dist_a.mu - dist_b.mu), 4.0 * denominator)
        sigma_op = tf.maximum(tf.divide(2.0 * dist_a.sigma * dist_b.sigma, denominator), 0.0)

        return 1.0 - tf.sqrt(sigma_op) * tf.exp(mu_op) 
Example #24
Source File: siamese_net.py    From atec-nlp with MIT License 5 votes vote down vote up
def forward(self):
        if self._interaction == 'concat':
            self.out = tf.concat([self.out1, self.out2], axis=1, name="out")
        elif self._interaction == 'multiply':
            self.out = tf.multiply(self.out1, self.out2, name="out")
        fc = tf.layers.dense(self.out, 128, name='fc1', activation=tf.nn.relu)
        # self.scores = tf.layers.dense(self.fc, 1, activation=tf.nn.sigmoid)
        self.logits = tf.layers.dense(fc, 2, name='fc2')
        # self.y_pred = tf.round(tf.nn.sigmoid(self.logits), name="predictions")  # pred class
        self.y_pred = tf.cast(tf.argmax(tf.nn.sigmoid(self.logits), 1, name="predictions"), tf.float32)

        with tf.name_scope("loss"):
            # [batch_size, num_classes]
            y = tf.one_hot(tf.cast(self.input_y, tf.int32), 2)
            cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits, labels=y)
            self.loss = tf.reduce_mean(cross_entropy)
            # self.loss = tf.losses.sigmoid_cross_entropy(logits=self.logits, multi_class_labels=y)

            # y = self.input_y
            # y_ = self.scores
            # self.loss = -tf.reduce_mean(pos_weight * y * tf.log(tf.clip_by_value(y_, 1e-10, 1.0))
            #                             + (1-y) * tf.log(tf.clip_by_value(1-y_, 1e-10, 1.0)))
            # add l2 reg except bias anb BN variables.
            self.l2 = self._l2_reg_lambda * tf.reduce_sum(
                [tf.nn.l2_loss(v) for v in tf.trainable_variables() if not ("noreg" in v.name or "bias" in v.name)])
            self.loss += self.l2

        # Accuracy computation is outside of this class.
        with tf.name_scope("metrics"):
            TP = tf.count_nonzero(self.input_y * self.y_pred, dtype=tf.float32)
            TN = tf.count_nonzero((self.input_y - 1) * (self.y_pred - 1), dtype=tf.float32)
            FP = tf.count_nonzero(self.y_pred * (self.input_y - 1), dtype=tf.float32)
            FN = tf.count_nonzero((self.y_pred - 1) * self.input_y, dtype=tf.float32)
            # tf.div like python2 division, tf.divide like python3
            self.cm = tf.confusion_matrix(self.input_y, self.y_pred, name="confusion_matrix")
            self.acc = tf.divide(TP + TN, TP + TN + FP + FN, name="accuracy")
            self.precision = tf.divide(TP, TP + FP, name="precision")
            self.recall = tf.divide(TP, TP + FN, name="recall")
            self.f1 = tf.divide(2 * self.precision * self.recall, self.precision + self.recall, name="F1_score") 
Example #25
Source File: model.py    From rgn with MIT License 5 votes vote down vote up
def _accumulate_loss(config, numerator, denominator, name_prefix=''):
    """ Constructs ops to accumulate and reduce loss and maintain a memory of lowest loss achieved """

    if config['num_evaluation_invocations'] == 1:
        # return simple loss
        accumulated_loss = tf.divide(numerator, denominator, name=name_prefix)
        update_op = reduce_op = tf.no_op()
    else:
        # create accumulator variables. note that tf.Variable uses name_scope (not variable_scope) for naming, which is what's desired in this instance
        numerator_accumulator   = tf.Variable(initial_value=0., trainable=False, name=name_prefix + '_numerator_accumulator')
        denominator_accumulator = tf.Variable(initial_value=0., trainable=False, name=name_prefix + '_denominator_accumulator')

        # accumulate
        with tf.control_dependencies([numerator, denominator, numerator_accumulator, denominator_accumulator]):
            accumulate_numerator   = tf.assign_add(numerator_accumulator, numerator)
            accumulate_denominator = tf.assign_add(denominator_accumulator, denominator)
            update_op = tf.group(accumulate_numerator, accumulate_denominator, name=name_prefix + '_accumulate_op')

        # divide to get final quotient
        with tf.control_dependencies([update_op]):
            accumulated_loss = tf.divide(numerator_accumulator, denominator_accumulator, name=name_prefix + '_accumulated')

        # zero accumulators
        with tf.control_dependencies([accumulated_loss]):
            zero_numerator   = tf.assign(numerator_accumulator,   0.)
            zero_denominator = tf.assign(denominator_accumulator, 0.)
            reduce_op = tf.group(zero_numerator, zero_denominator, name=name_prefix + '_reduce_op')

    min_loss_achieved = tf.Variable(initial_value=float('inf'), trainable=False, name='min_' + name_prefix + '_achieved')
    min_loss_op = tf.assign(min_loss_achieved, tf.reduce_min([min_loss_achieved, accumulated_loss]), name='min_' + name_prefix + '_achieved_op')
    with tf.control_dependencies([min_loss_op]):
        min_loss_achieved = tf.identity(min_loss_achieved)

    return accumulated_loss, min_loss_achieved, min_loss_op, update_op, reduce_op 
Example #26
Source File: tools.py    From Pixel2MeshPlusPlus with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def normal(v):
    norm = tf.norm(v)
    if norm == 0:
        return v
    return tf.divide(v, norm) 
Example #27
Source File: network_base.py    From tf-pose with Apache License 2.0 5 votes vote down vote up
def normalize_nasnet(self, input, name):
        input = tf.divide(input, 255.0, name=name + '_divide')
        input = tf.subtract(input, 0.5, name=name + '_subtract')
        input = tf.multiply(input, 2.0, name=name + '_multiply')
        return input 
Example #28
Source File: network_base.py    From tf-pose with Apache License 2.0 5 votes vote down vote up
def normalize_mobilenet(self, input, name):
        input = tf.divide(input, 255.0, name=name + '_divide')
        input = tf.subtract(input, 0.5, name=name + '_subtract')
        input = tf.multiply(input, 2.0, name=name + '_multiply')
        return input 
Example #29
Source File: network_base.py    From tf-pose with Apache License 2.0 5 votes vote down vote up
def normalize_vgg(self, input, name):
        # normalize input -1.0 ~ 1.0
        input = tf.divide(input, 255.0, name=name + '_divide')
        input = tf.subtract(input, 0.5, name=name + '_subtract')
        input = tf.multiply(input, 2.0, name=name + '_multiply')
        return input 
Example #30
Source File: atari_model.py    From tf2rl with MIT License 5 votes vote down vote up
def _compute_feature(self, states):
        features = tf.divide(tf.cast(states, tf.float32),
                             tf.constant(255.))
        features = self.conv1(features)
        features = self.conv2(features)
        features = self.conv3(features)
        features = self.flat(features)
        features = self.fc1(features)
        return features