Python tensorflow.matrix_determinant() Examples

The following are 15 code examples of tensorflow.matrix_determinant(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: gram.py    From subjective-functions with MIT License 6 votes vote down vote up
def internal_novelty_loss(grams, mul=1.0):
    gram = keras.layers.Concatenate(axis=0)(grams)
    # gram will be something like (5, 64, 64)
    flat = keras.layers.Flatten()(gram)
    flat = PrintLayerShape("flat shape")(flat)

    # ~ (5, 4096)
    covar = Lambda(lambda x: K.dot(x,K.transpose(x)),
            output_shape = lambda input_shape: [input_shape[0], input_shape[0]])(flat)
    covar = PrintLayer("covar")(covar)

    # ~ (5, 5)
    #det = Lambda(lambda x: -tf.matrix_determinant(x),
            #output_shape = lambda input_shape: [1])(covar)
    #det = Lambda(lambda x: -2*tf.reduce_sum(tf.log(tf.diag(tf.cholesky(x)))),
           #output_shape = lambda input_shape: [1])(covar)
    
    def eye_diff(x):
        shape = K.shape(x)
        return x - mul * tf.eye(shape[0], shape[1])

    det = Lambda(lambda x: K.sum(K.square(eye_diff(x))),
            output_shape = lambda input_shape: [1])(covar)
    det = PrintLayer("det")(det)
    return det 
Example #2
Source File: dice_tensorflow1.py    From DiCE with MIT License 6 votes vote down vote up
def dpp_style(self, submethod):
        """Computes the DPP of a matrix."""
        det_entries = []
        if submethod == "inverse_dist":
            for i in range(self.total_CFs):
                for j in range(self.total_CFs):
                    det_temp_entry = tf.divide(1.0, tf.add(
                        1.0, self.compute_dist(self.cfs_frozen[i], self.cfs_frozen[j])))
                    if i == j:
                        det_temp_entry = tf.add(det_temp_entry, 0.0001)
                    det_entries.append(det_temp_entry)

        elif submethod == "exponential_dist":
            for i in range(self.total_CFs):
                for j in range(self.total_CFs):
                    det_temp_entry = tf.divide(1.0, tf.exp(
                        self.compute_dist(self.cfs_frozen[i], self.cfs_frozen[j])))
                    det_entries.append(det_temp_entry)

        det_entries = tf.reshape(det_entries, [self.total_CFs, self.total_CFs])
        diversity_loss = tf.matrix_determinant(det_entries)
        return diversity_loss 
Example #3
Source File: ops.py    From tfdeploy with MIT License 5 votes vote down vote up
def test_MatrixDeterminant(self):
        t = tf.matrix_determinant(self.random(2, 3, 4, 3, 3))
        self.check(t) 
Example #4
Source File: models.py    From anica with MIT License 5 votes vote down vote up
def get_log_det_jacobian(self, o):
        J = self.compute_jacobian(o)
        def step(J_i):
            return tf.log(tf.abs(tf.matrix_determinant(J_i)))
        return tf.map_fn(step, J) 
Example #5
Source File: models.py    From anica with MIT License 5 votes vote down vote up
def get_log_det_jacobian(self, o):
        J = self.compute_jacobian(o)

        def step(J_i):
            return tf.log(tf.abs(tf.matrix_determinant(J_i)))

        return tf.map_fn(step, J) 
Example #6
Source File: linalg_grad_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testBatchGradientUnknownSize(self):
    with self.test_session():
      batch_size = tf.constant(3)
      matrix_size = tf.constant(4)
      batch_identity = tf.tile(
          tf.expand_dims(
              tf.diag(tf.ones([matrix_size])), 0), [batch_size, 1, 1])
      determinants = tf.matrix_determinant(batch_identity)
      reduced = tf.reduce_sum(determinants)
      sum_grad = tf.gradients(reduced, batch_identity)[0]
      self.assertAllClose(batch_identity.eval(), sum_grad.eval()) 
Example #7
Source File: determinant_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _compareDeterminant(self, matrix_x):
    with self.test_session():
      self._compareDeterminantBase(matrix_x, tf.matrix_determinant(matrix_x)) 
Example #8
Source File: determinant_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testNonSquareMatrix(self):
    # When the determinant of a non-square matrix is attempted we should return
    # an error
    with self.assertRaises(ValueError):
      tf.matrix_determinant(
          np.array([[1., 2., 3.], [3., 5., 4.]]).astype(np.float32)) 
Example #9
Source File: determinant_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testWrongDimensions(self):
    # The input to the determinant should be a 2-dimensional tensor.
    tensor1 = tf.constant([1., 2.])
    with self.assertRaises(ValueError):
      tf.matrix_determinant(tensor1) 
Example #10
Source File: operator_test_util.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testDeterminants(self):
    with self.test_session():
      for batch_shape in [(), (2, 3,)]:
        for k in [1, 4]:
          operator, mat = self._build_operator_and_mat(batch_shape, k)
          expected_det = tf.matrix_determinant(mat).eval()

          self._compare_results(expected_det, operator.det())
          self._compare_results(np.log(expected_det), operator.log_det()) 
Example #11
Source File: gram.py    From subjective-functions with MIT License 5 votes vote down vote up
def novelty_loss(grams, mul=1.0):
    dets = []
    for gram in grams:
        # gram will be something like (5, 64, 64)
        flat = keras.layers.Flatten()(gram)

        # ~ (5, 4096)
        covar = Lambda(lambda x: K.dot(x,K.transpose(x)),
                output_shape = lambda input_shape: [input_shape[0], input_shape[0]])(flat)
        covar = PrintLayer("covar")(covar)

        # ~ (5, 5)
        #det = Lambda(lambda x: -tf.matrix_determinant(x),
                #output_shape = lambda input_shape: [1])(covar)
        #det = Lambda(lambda x: -2*tf.reduce_sum(tf.log(tf.diag(tf.cholesky(x)))),
               #output_shape = lambda input_shape: [1])(covar)
        
        def eye_diff(x):
            shape = K.shape(x)
            return x - mul * tf.eye(shape[0], shape[1])

        det = Lambda(lambda x: K.sum(K.square(eye_diff(x))),
                output_shape = lambda input_shape: [1])(covar)
        det = PrintLayer("det")(det)
        dets.append(det)

    if len(dets) > 1:
        return keras.layers.add(dets)
    else:
        return dets[0] 
Example #12
Source File: dagmm.py    From AnomalyDetectionTransformations with MIT License 5 votes vote down vote up
def _calc_component_density(z, phi, mu, sigma):
        sig_inv = tf.matrix_inverse(sigma)
        sig_sqrt_det = K.sqrt(tf.matrix_determinant(2 * np.pi * sigma) + K.epsilon())
        density = phi * (K.exp(-0.5 * K.sum(K.dot(z - mu, sig_inv) * (z - mu),
                                            axis=-1,
                                            keepdims=True)) / sig_sqrt_det) + K.epsilon()

        return density 
Example #13
Source File: main.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def estimate_rotation(xyz0, xyz1, pconf, noise):
  """Estimates the rotation between two sets of keypoints.

  The rotation is estimated by first subtracting mean from each set of keypoints
  and computing SVD of the covariance matrix.

  Args:
    xyz0: [batch, num_kp, 3] The first set of keypoints.
    xyz1: [batch, num_kp, 3] The second set of keypoints.
    pconf: [batch, num_kp] The weights used to compute the rotation estimate.
    noise: A number indicating the noise added to the keypoints.

  Returns:
    [batch, 3, 3] A batch of transposed 3 x 3 rotation matrices.
  """

  xyz0 += tf.random_normal(tf.shape(xyz0), mean=0, stddev=noise)
  xyz1 += tf.random_normal(tf.shape(xyz1), mean=0, stddev=noise)

  pconf2 = tf.expand_dims(pconf, 2)
  cen0 = tf.reduce_sum(xyz0 * pconf2, 1, keepdims=True)
  cen1 = tf.reduce_sum(xyz1 * pconf2, 1, keepdims=True)

  x = xyz0 - cen0
  y = xyz1 - cen1

  cov = tf.matmul(tf.matmul(x, tf.matrix_diag(pconf), transpose_a=True), y)
  _, u, v = tf.svd(cov, full_matrices=True)

  d = tf.matrix_determinant(tf.matmul(v, u, transpose_b=True))
  ud = tf.concat(
      [u[:, :, :-1], u[:, :, -1:] * tf.expand_dims(tf.expand_dims(d, 1), 1)],
      axis=2)
  return tf.matmul(ud, v, transpose_b=True) 
Example #14
Source File: main.py    From models with Apache License 2.0 5 votes vote down vote up
def estimate_rotation(xyz0, xyz1, pconf, noise):
  """Estimates the rotation between two sets of keypoints.

  The rotation is estimated by first subtracting mean from each set of keypoints
  and computing SVD of the covariance matrix.

  Args:
    xyz0: [batch, num_kp, 3] The first set of keypoints.
    xyz1: [batch, num_kp, 3] The second set of keypoints.
    pconf: [batch, num_kp] The weights used to compute the rotation estimate.
    noise: A number indicating the noise added to the keypoints.

  Returns:
    [batch, 3, 3] A batch of transposed 3 x 3 rotation matrices.
  """

  xyz0 += tf.random_normal(tf.shape(xyz0), mean=0, stddev=noise)
  xyz1 += tf.random_normal(tf.shape(xyz1), mean=0, stddev=noise)

  pconf2 = tf.expand_dims(pconf, 2)
  cen0 = tf.reduce_sum(xyz0 * pconf2, 1, keepdims=True)
  cen1 = tf.reduce_sum(xyz1 * pconf2, 1, keepdims=True)

  x = xyz0 - cen0
  y = xyz1 - cen1

  cov = tf.matmul(tf.matmul(x, tf.matrix_diag(pconf), transpose_a=True), y)
  _, u, v = tf.svd(cov, full_matrices=True)

  d = tf.matrix_determinant(tf.matmul(v, u, transpose_b=True))
  ud = tf.concat(
      [u[:, :, :-1], u[:, :, -1:] * tf.expand_dims(tf.expand_dims(d, 1), 1)],
      axis=2)
  return tf.matmul(ud, v, transpose_b=True) 
Example #15
Source File: main.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def estimate_rotation(xyz0, xyz1, pconf, noise):
  """Estimates the rotation between two sets of keypoints.

  The rotation is estimated by first subtracting mean from each set of keypoints
  and computing SVD of the covariance matrix.

  Args:
    xyz0: [batch, num_kp, 3] The first set of keypoints.
    xyz1: [batch, num_kp, 3] The second set of keypoints.
    pconf: [batch, num_kp] The weights used to compute the rotation estimate.
    noise: A number indicating the noise added to the keypoints.

  Returns:
    [batch, 3, 3] A batch of transposed 3 x 3 rotation matrices.
  """

  xyz0 += tf.random_normal(tf.shape(xyz0), mean=0, stddev=noise)
  xyz1 += tf.random_normal(tf.shape(xyz1), mean=0, stddev=noise)

  pconf2 = tf.expand_dims(pconf, 2)
  cen0 = tf.reduce_sum(xyz0 * pconf2, 1, keepdims=True)
  cen1 = tf.reduce_sum(xyz1 * pconf2, 1, keepdims=True)

  x = xyz0 - cen0
  y = xyz1 - cen1

  cov = tf.matmul(tf.matmul(x, tf.matrix_diag(pconf), transpose_a=True), y)
  _, u, v = tf.svd(cov, full_matrices=True)

  d = tf.matrix_determinant(tf.matmul(v, u, transpose_b=True))
  ud = tf.concat(
      [u[:, :, :-1], u[:, :, -1:] * tf.expand_dims(tf.expand_dims(d, 1), 1)],
      axis=2)
  return tf.matmul(ud, v, transpose_b=True)