Python tensorflow.self_adjoint_eig() Examples

The following are 24 code examples of tensorflow.self_adjoint_eig(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: model.py    From minimal-entropy-correlation-alignment with MIT License 6 votes vote down vote up
def log_coral_loss(self, h_src, h_trg, gamma=1e-3):
	# regularized covariances result in inf or nan
	# First: subtract the mean from the data matrix
	batch_size = tf.to_float(tf.shape(h_src)[0])
	h_src = h_src - tf.reduce_mean(h_src, axis=0) 
	h_trg = h_trg - tf.reduce_mean(h_trg, axis=0 )
	cov_source = (1./(batch_size-1)) * tf.matmul( h_src, h_src, transpose_a=True) #+ gamma * tf.eye(self.hidden_repr_size)
	cov_target = (1./(batch_size-1)) * tf.matmul( h_trg, h_trg, transpose_a=True) #+ gamma * tf.eye(self.hidden_repr_size)
	#eigen decomposition
	eig_source  = tf.self_adjoint_eig(cov_source)
	eig_target  = tf.self_adjoint_eig(cov_target)
	log_cov_source = tf.matmul( eig_source[1] ,  tf.matmul(tf.diag( tf.log(eig_source[0]) ), eig_source[1], transpose_b=True) )
	log_cov_target = tf.matmul( eig_target[1] ,  tf.matmul(tf.diag( tf.log(eig_target[0]) ), eig_target[1], transpose_b=True) )

	# Returns the Frobenius norm
	return tf.reduce_mean(tf.square( tf.subtract(log_cov_source,log_cov_target))) 
	#~ return tf.reduce_mean(tf.reduce_max(eig_target[0]))
	#~ return tf.to_float(tf.equal(tf.count_nonzero(h_src), tf.count_nonzero(h_src))) 
Example #2
Source File: weight_blocks.py    From noisy-K-FAC with Apache License 2.0 6 votes vote down vote up
def update(self, block):
        input_factor = block._input_factor
        output_factor = block._output_factor
        pi = _compute_pi_tracenorm(input_factor.get_cov(), output_factor.get_cov())

        coeff = self._coeff / block._renorm_coeff
        coeff = coeff ** 0.5
        damping = coeff / (self._eta ** 0.5)

        ue, uv = tf.self_adjoint_eig(
            input_factor.get_cov() / pi + damping * tf.eye(self._u_c.shape.as_list()[0]))
        ve, vv = tf.self_adjoint_eig(
            output_factor.get_cov() * pi + damping * tf.eye(self._v_c.shape.as_list()[0]))

        ue = coeff / tf.maximum(ue, damping)
        new_uc = uv * ue ** 0.5

        ve = coeff / tf.maximum(ve, damping)
        new_vc = vv * ve ** 0.5

        updates_op = [self._u_c.assign(new_uc), self._v_c.assign(new_vc)]
        return tf.group(*updates_op) 
Example #3
Source File: optimization.py    From cleverhans with MIT License 5 votes vote down vote up
def tf_smooth_eig_vec(self):
    """Function that returns smoothed version of min eigen vector."""
    _, matrix_m = self.dual_object.get_full_psd_matrix()
    # Easier to think in terms of max so negating the matrix
    [eig_vals, eig_vectors] = tf.self_adjoint_eig(-matrix_m)
    exp_eig_vals = tf.exp(tf.divide(eig_vals, self.smooth_placeholder))
    scaling_factor = tf.reduce_sum(exp_eig_vals)
    # Multiplying each eig vector by exponential of corresponding eig value
    # Scaling factor normalizes the vector to be unit norm
    eig_vec_smooth = tf.divide(
        tf.matmul(eig_vectors, tf.diag(tf.sqrt(exp_eig_vals))),
        tf.sqrt(scaling_factor))
    return tf.reshape(
        tf.reduce_sum(eig_vec_smooth, axis=1),
        shape=[eig_vec_smooth.shape[0].value, 1]) 
Example #4
Source File: ops.py    From 3DRegNet with MIT License 5 votes vote down vote up
def tf_quaternion_from_matrix(M):

    import tensorflow as tf

    m00 = M[:, 0, 0][..., None]
    m01 = M[:, 0, 1][..., None]
    m02 = M[:, 0, 2][..., None]
    m10 = M[:, 1, 0][..., None]
    m11 = M[:, 1, 1][..., None]
    m12 = M[:, 1, 2][..., None]
    m20 = M[:, 2, 0][..., None]
    m21 = M[:, 2, 1][..., None]
    m22 = M[:, 2, 2][..., None]
    # symmetric matrix K
    zeros = tf.zeros_like(m00)
    K = tf.concat(
        [m00 - m11 - m22, zeros, zeros, zeros,
         m01 + m10, m11 - m00 - m22, zeros, zeros,
         m02 + m20, m12 + m21, m22 - m00 - m11, zeros,
         m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22],
        axis=1)
    K = tf.reshape(K, (-1, 4, 4))
    K /= 3.0
    # quaternion is eigenvector of K that corresponds to largest eigenvalue
    w, V = tf.self_adjoint_eig(K)

    q0 = V[:, 3, 3][..., None]
    q1 = V[:, 0, 3][..., None]
    q2 = V[:, 1, 3][..., None]
    q3 = V[:, 2, 3][..., None]
    q = tf.concat([q0, q1, q2, q3], axis=1)
    sel = tf.reshape(tf.to_float(q[:, 0] < 0.0), (-1, 1))
    q = (1.0 - sel) * q - sel * q

    return q 
Example #5
Source File: self_adjoint_eig_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _GetSelfAdjointEigGradTest(dtype_, shape_):

  def Test(self):
    np.random.seed(1)
    n = shape_[-1]
    batch_shape = shape_[:-2]
    a = np.random.uniform(
        low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(dtype_)
    a += a.T
    a = np.tile(a, batch_shape + (1, 1))
    # Optimal stepsize for central difference is O(epsilon^{1/3}).
    epsilon = np.finfo(dtype_).eps
    delta = 0.1 * epsilon**(1.0 / 3.0)
    # tolerance obtained by looking at actual differences using
    # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
    if dtype_ == np.float32:
      tol = 1e-2
    else:
      tol = 1e-7
    with self.test_session():
      tf_a = tf.constant(a)
      tf_e, tf_v = tf.self_adjoint_eig(tf_a)
      for b in tf_e, tf_v:
        x_init = np.random.uniform(
            low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(dtype_)
        x_init += x_init.T
        x_init = np.tile(x_init, batch_shape + (1, 1))
        theoretical, numerical = tf.test.compute_gradient(
            tf_a,
            tf_a.get_shape().as_list(),
            b,
            b.get_shape().as_list(),
            x_init_value=x_init,
            delta=delta)
        self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)

  return Test 
Example #6
Source File: optimization.py    From cleverhans with MIT License 5 votes vote down vote up
def tf_min_eig_vec(self):
    """Function for min eigen vector using tf's full eigen decomposition."""
    # Full eigen decomposition requires the explicit psd matrix M
    _, matrix_m = self.dual_object.get_full_psd_matrix()
    [eig_vals, eig_vectors] = tf.self_adjoint_eig(matrix_m)
    index = tf.argmin(eig_vals)
    return tf.reshape(
        eig_vectors[:, index], shape=[eig_vectors.shape[0].value, 1]) 
Example #7
Source File: self_adjoint_eig_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testWrongDimensions(self):
    # The input to self_adjoint_eig should be a tensor of
    # at least rank 2.
    scalar = tf.constant(1.)
    with self.assertRaises(ValueError):
      tf.self_adjoint_eig(scalar)
    vector = tf.constant([1., 2.])
    with self.assertRaises(ValueError):
      tf.self_adjoint_eig(vector) 
Example #8
Source File: fisher_factors.py    From kfac with Apache License 2.0 5 votes vote down vote up
def get_eigendecomp(self):
    """Creates or retrieves eigendecomposition of self._cov."""
    # Unlike get_matpower this doesn't retrieve a stored variable, but instead
    # always computes a fresh version from the current value of self.cov.
    if not self._eigendecomp:
      eigenvalues, eigenvectors = tf.self_adjoint_eig(self.cov)

      # The matrix self._cov is positive semidefinite by construction, but the
      # numerical eigenvalues could be negative due to numerical errors, so here
      # we clip them to be at least FLAGS.eigenvalue_clipping_threshold
      clipped_eigenvalues = tf.maximum(eigenvalues,
                                       EIGENVALUE_CLIPPING_THRESHOLD)
      self._eigendecomp = (clipped_eigenvalues, eigenvectors)

    return self._eigendecomp 
Example #9
Source File: utils.py    From kfac with Apache License 2.0 5 votes vote down vote up
def posdef_eig_self_adjoint(mat):
  """Computes eigendecomposition using self_adjoint_eig."""
  evals, evecs = tf.self_adjoint_eig(mat)
  evals = tf.abs(evals)  # Should be equivalent to svd approach.

  return evals, evecs 
Example #10
Source File: utils.py    From kfac with Apache License 2.0 5 votes vote down vote up
def posdef_inv_eig(tensor, identity, damping):
  """Computes inverse(tensor + damping * identity) with eigendecomposition."""
  eigenvalues, eigenvectors = tf.self_adjoint_eig(tensor + damping * identity)
  return tf.matmul(eigenvectors / eigenvalues, eigenvectors, transpose_b=True) 
Example #11
Source File: ops.py    From tfdeploy with MIT License 5 votes vote down vote up
def test_SelfAdjointEigV2(self):
        t = tf.self_adjoint_eig(np.array(3 * [3, 2, 2, 1]).reshape(3, 2, 2).astype("float32"))
        # the order of eigen vectors and values may differ between tf and np, so only compare sum
        # and mean
        # also, different numerical algorithms are used, so account for difference in precision by
        # comparing numbers with 4 digits
        self.check(t, ndigits=4, stats=True, abs=True) 
Example #12
Source File: kfac.py    From stable-baselines with MIT License 5 votes vote down vote up
def compute_stats_eigen(self):
        """
        compute the eigen decomp using copied var stats to avoid concurrent read/write from other queue

        :return: ([TensorFlow Tensor]) update operations
        """
        # TODO: figure out why this op has delays (possibly moving eigenvectors around?)
        with tf.device('/cpu:0'):
            stats_eigen = self.stats_eigen
            computed_eigen = {}
            eigen_reverse_lookup = {}
            update_ops = []
            # sync copied stats
            with tf.control_dependencies([]):
                for stats_var in stats_eigen:
                    if stats_var not in computed_eigen:
                        eigen_decomposition = tf.self_adjoint_eig(stats_var)
                        eigen_values = eigen_decomposition[0]
                        eigen_vectors = eigen_decomposition[1]
                        if self._use_float64:
                            eigen_values = tf.cast(eigen_values, tf.float64)
                            eigen_vectors = tf.cast(eigen_vectors, tf.float64)
                        update_ops.append(eigen_values)
                        update_ops.append(eigen_vectors)
                        computed_eigen[stats_var] = {'e': eigen_values, 'Q': eigen_vectors}
                        eigen_reverse_lookup[eigen_values] = stats_eigen[stats_var]['e']
                        eigen_reverse_lookup[eigen_vectors] = stats_eigen[stats_var]['Q']

            self.eigen_reverse_lookup = eigen_reverse_lookup
            self.eigen_update_list = update_ops

            if KFAC_DEBUG:
                self.eigen_update_list = [item for item in update_ops]
                with tf.control_dependencies(update_ops):
                    update_ops.append(tf.Print(tf.constant(
                        0.), [tf.convert_to_tensor('computed factor eigen')]))

        return update_ops 
Example #13
Source File: dp_pca.py    From DOTA_models with Apache License 2.0 4 votes vote down vote up
def ComputeDPPrincipalProjection(data, projection_dims,
                                 sanitizer, eps_delta, sigma):
  """Compute differentially private projection.

  Args:
    data: the input data, each row is a data vector.
    projection_dims: the projection dimension.
    sanitizer: the sanitizer used for achieving privacy.
    eps_delta: (eps, delta) pair.
    sigma: if not None, use noise sigma; otherwise compute it using
      eps_delta pair.
  Returns:
    A projection matrix with projection_dims columns.
  """

  eps, delta = eps_delta
  # Normalize each row.
  normalized_data = tf.nn.l2_normalize(data, 1)
  covar = tf.matmul(tf.transpose(normalized_data), normalized_data)
  saved_shape = tf.shape(covar)
  num_examples = tf.slice(tf.shape(data), [0], [1])
  if eps > 0:
    # Since the data is already normalized, there is no need to clip
    # the covariance matrix.
    assert delta > 0
    saned_covar = sanitizer.sanitize(
        tf.reshape(covar, [1, -1]), eps_delta, sigma=sigma,
        option=san.ClipOption(1.0, False), num_examples=num_examples)
    saned_covar = tf.reshape(saned_covar, saved_shape)
    # Symmetrize saned_covar. This also reduces the noise variance.
    saned_covar = 0.5 * (saned_covar + tf.transpose(saned_covar))
  else:
    saned_covar = covar

  # Compute the eigen decomposition of the covariance matrix, and
  # return the top projection_dims eigen vectors, represented as columns of
  # the projection matrix.
  eigvals, eigvecs = tf.self_adjoint_eig(saned_covar)
  _, topk_indices = tf.nn.top_k(eigvals, projection_dims)
  topk_indices = tf.reshape(topk_indices, [projection_dims])
  # Gather and return the corresponding eigenvectors.
  return tf.transpose(tf.gather(tf.transpose(eigvecs), topk_indices)) 
Example #14
Source File: dp_pca.py    From multilabel-image-classification-tensorflow with MIT License 4 votes vote down vote up
def ComputeDPPrincipalProjection(data, projection_dims,
                                 sanitizer, eps_delta, sigma):
  """Compute differentially private projection.

  Args:
    data: the input data, each row is a data vector.
    projection_dims: the projection dimension.
    sanitizer: the sanitizer used for achieving privacy.
    eps_delta: (eps, delta) pair.
    sigma: if not None, use noise sigma; otherwise compute it using
      eps_delta pair.
  Returns:
    A projection matrix with projection_dims columns.
  """

  eps, delta = eps_delta
  # Normalize each row.
  normalized_data = tf.nn.l2_normalize(data, 1)
  covar = tf.matmul(tf.transpose(normalized_data), normalized_data)
  saved_shape = tf.shape(covar)
  num_examples = tf.slice(tf.shape(data), [0], [1])
  if eps > 0:
    # Since the data is already normalized, there is no need to clip
    # the covariance matrix.
    assert delta > 0
    saned_covar = sanitizer.sanitize(
        tf.reshape(covar, [1, -1]), eps_delta, sigma=sigma,
        option=san.ClipOption(1.0, False), num_examples=num_examples)
    saned_covar = tf.reshape(saned_covar, saved_shape)
    # Symmetrize saned_covar. This also reduces the noise variance.
    saned_covar = 0.5 * (saned_covar + tf.transpose(saned_covar))
  else:
    saned_covar = covar

  # Compute the eigen decomposition of the covariance matrix, and
  # return the top projection_dims eigen vectors, represented as columns of
  # the projection matrix.
  eigvals, eigvecs = tf.self_adjoint_eig(saned_covar)
  _, topk_indices = tf.nn.top_k(eigvals, projection_dims)
  topk_indices = tf.reshape(topk_indices, [projection_dims])
  # Gather and return the corresponding eigenvectors.
  return tf.transpose(tf.gather(tf.transpose(eigvecs), topk_indices)) 
Example #15
Source File: dp_pca.py    From HumanRecognition with MIT License 4 votes vote down vote up
def ComputeDPPrincipalProjection(data, projection_dims,
                                 sanitizer, eps_delta, sigma):
  """Compute differentially private projection.

  Args:
    data: the input data, each row is a data vector.
    projection_dims: the projection dimension.
    sanitizer: the sanitizer used for achieving privacy.
    eps_delta: (eps, delta) pair.
    sigma: if not None, use noise sigma; otherwise compute it using
      eps_delta pair.
  Returns:
    A projection matrix with projection_dims columns.
  """

  eps, delta = eps_delta
  # Normalize each row.
  normalized_data = tf.nn.l2_normalize(data, 1)
  covar = tf.matmul(tf.transpose(normalized_data), normalized_data)
  saved_shape = tf.shape(covar)
  num_examples = tf.slice(tf.shape(data), [0], [1])
  if eps > 0:
    # Since the data is already normalized, there is no need to clip
    # the covariance matrix.
    assert delta > 0
    saned_covar = sanitizer.sanitize(
        tf.reshape(covar, [1, -1]), eps_delta, sigma=sigma,
        option=san.ClipOption(1.0, False), num_examples=num_examples)
    saned_covar = tf.reshape(saned_covar, saved_shape)
    # Symmetrize saned_covar. This also reduces the noise variance.
    saned_covar = 0.5 * (saned_covar + tf.transpose(saned_covar))
  else:
    saned_covar = covar

  # Compute the eigen decomposition of the covariance matrix, and
  # return the top projection_dims eigen vectors, represented as columns of
  # the projection matrix.
  eigvals, eigvecs = tf.self_adjoint_eig(saned_covar)
  _, topk_indices = tf.nn.top_k(eigvals, projection_dims)
  topk_indices = tf.reshape(topk_indices, [projection_dims])
  # Gather and return the corresponding eigenvectors.
  return tf.transpose(tf.gather(tf.transpose(eigvecs), topk_indices)) 
Example #16
Source File: dp_pca.py    From object_detection_with_tensorflow with MIT License 4 votes vote down vote up
def ComputeDPPrincipalProjection(data, projection_dims,
                                 sanitizer, eps_delta, sigma):
  """Compute differentially private projection.

  Args:
    data: the input data, each row is a data vector.
    projection_dims: the projection dimension.
    sanitizer: the sanitizer used for achieving privacy.
    eps_delta: (eps, delta) pair.
    sigma: if not None, use noise sigma; otherwise compute it using
      eps_delta pair.
  Returns:
    A projection matrix with projection_dims columns.
  """

  eps, delta = eps_delta
  # Normalize each row.
  normalized_data = tf.nn.l2_normalize(data, 1)
  covar = tf.matmul(tf.transpose(normalized_data), normalized_data)
  saved_shape = tf.shape(covar)
  num_examples = tf.slice(tf.shape(data), [0], [1])
  if eps > 0:
    # Since the data is already normalized, there is no need to clip
    # the covariance matrix.
    assert delta > 0
    saned_covar = sanitizer.sanitize(
        tf.reshape(covar, [1, -1]), eps_delta, sigma=sigma,
        option=san.ClipOption(1.0, False), num_examples=num_examples)
    saned_covar = tf.reshape(saned_covar, saved_shape)
    # Symmetrize saned_covar. This also reduces the noise variance.
    saned_covar = 0.5 * (saned_covar + tf.transpose(saned_covar))
  else:
    saned_covar = covar

  # Compute the eigen decomposition of the covariance matrix, and
  # return the top projection_dims eigen vectors, represented as columns of
  # the projection matrix.
  eigvals, eigvecs = tf.self_adjoint_eig(saned_covar)
  _, topk_indices = tf.nn.top_k(eigvals, projection_dims)
  topk_indices = tf.reshape(topk_indices, [projection_dims])
  # Gather and return the corresponding eigenvectors.
  return tf.transpose(tf.gather(tf.transpose(eigvecs), topk_indices)) 
Example #17
Source File: dp_pca.py    From object_detection_kitti with Apache License 2.0 4 votes vote down vote up
def ComputeDPPrincipalProjection(data, projection_dims,
                                 sanitizer, eps_delta, sigma):
  """Compute differentially private projection.

  Args:
    data: the input data, each row is a data vector.
    projection_dims: the projection dimension.
    sanitizer: the sanitizer used for achieving privacy.
    eps_delta: (eps, delta) pair.
    sigma: if not None, use noise sigma; otherwise compute it using
      eps_delta pair.
  Returns:
    A projection matrix with projection_dims columns.
  """

  eps, delta = eps_delta
  # Normalize each row.
  normalized_data = tf.nn.l2_normalize(data, 1)
  covar = tf.matmul(tf.transpose(normalized_data), normalized_data)
  saved_shape = tf.shape(covar)
  num_examples = tf.slice(tf.shape(data), [0], [1])
  if eps > 0:
    # Since the data is already normalized, there is no need to clip
    # the covariance matrix.
    assert delta > 0
    saned_covar = sanitizer.sanitize(
        tf.reshape(covar, [1, -1]), eps_delta, sigma=sigma,
        option=san.ClipOption(1.0, False), num_examples=num_examples)
    saned_covar = tf.reshape(saned_covar, saved_shape)
    # Symmetrize saned_covar. This also reduces the noise variance.
    saned_covar = 0.5 * (saned_covar + tf.transpose(saned_covar))
  else:
    saned_covar = covar

  # Compute the eigen decomposition of the covariance matrix, and
  # return the top projection_dims eigen vectors, represented as columns of
  # the projection matrix.
  eigvals, eigvecs = tf.self_adjoint_eig(saned_covar)
  _, topk_indices = tf.nn.top_k(eigvals, projection_dims)
  topk_indices = tf.reshape(topk_indices, [projection_dims])
  # Gather and return the corresponding eigenvectors.
  return tf.transpose(tf.gather(tf.transpose(eigvecs), topk_indices)) 
Example #18
Source File: dp_pca.py    From hands-detection with MIT License 4 votes vote down vote up
def ComputeDPPrincipalProjection(data, projection_dims,
                                 sanitizer, eps_delta, sigma):
  """Compute differentially private projection.

  Args:
    data: the input data, each row is a data vector.
    projection_dims: the projection dimension.
    sanitizer: the sanitizer used for achieving privacy.
    eps_delta: (eps, delta) pair.
    sigma: if not None, use noise sigma; otherwise compute it using
      eps_delta pair.
  Returns:
    A projection matrix with projection_dims columns.
  """

  eps, delta = eps_delta
  # Normalize each row.
  normalized_data = tf.nn.l2_normalize(data, 1)
  covar = tf.matmul(tf.transpose(normalized_data), normalized_data)
  saved_shape = tf.shape(covar)
  num_examples = tf.slice(tf.shape(data), [0], [1])
  if eps > 0:
    # Since the data is already normalized, there is no need to clip
    # the covariance matrix.
    assert delta > 0
    saned_covar = sanitizer.sanitize(
        tf.reshape(covar, [1, -1]), eps_delta, sigma=sigma,
        option=san.ClipOption(1.0, False), num_examples=num_examples)
    saned_covar = tf.reshape(saned_covar, saved_shape)
    # Symmetrize saned_covar. This also reduces the noise variance.
    saned_covar = 0.5 * (saned_covar + tf.transpose(saned_covar))
  else:
    saned_covar = covar

  # Compute the eigen decomposition of the covariance matrix, and
  # return the top projection_dims eigen vectors, represented as columns of
  # the projection matrix.
  eigvals, eigvecs = tf.self_adjoint_eig(saned_covar)
  _, topk_indices = tf.nn.top_k(eigvals, projection_dims)
  topk_indices = tf.reshape(topk_indices, [projection_dims])
  # Gather and return the corresponding eigenvectors.
  return tf.transpose(tf.gather(tf.transpose(eigvecs), topk_indices)) 
Example #19
Source File: dp_pca.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def ComputeDPPrincipalProjection(data, projection_dims,
                                 sanitizer, eps_delta, sigma):
  """Compute differentially private projection.

  Args:
    data: the input data, each row is a data vector.
    projection_dims: the projection dimension.
    sanitizer: the sanitizer used for acheiving privacy.
    eps_delta: (eps, delta) pair.
    sigma: if not None, use noise sigma; otherwise compute it using
      eps_delta pair.
  Returns:
    A projection matrix with projection_dims columns.
  """

  eps, delta = eps_delta
  # Normalize each row.
  normalized_data = tf.nn.l2_normalize(data, 1)
  covar = tf.matmul(tf.transpose(normalized_data), normalized_data)
  saved_shape = tf.shape(covar)
  num_examples = tf.slice(tf.shape(data), [0], [1])
  if eps > 0:
    # Since the data is already normalized, there is no need to clip
    # the covariance matrix.
    assert delta > 0
    saned_covar = sanitizer.sanitize(
        tf.reshape(covar, [1, -1]), eps_delta, sigma=sigma,
        option=san.ClipOption(1.0, False), num_examples=num_examples)
    saned_covar = tf.reshape(saned_covar, saved_shape)
    # Symmetrize saned_covar. This also reduces the noise variance.
    saned_covar = 0.5 * (saned_covar + tf.transpose(saned_covar))
  else:
    saned_covar = covar

  # Compute the eigen decomposition of the covariance matrix, and
  # return the top projection_dims eigen vectors, represented as columns of
  # the projection matrix.
  eigvals, eigvecs = tf.self_adjoint_eig(saned_covar)
  _, topk_indices = tf.nn.top_k(eigvals, projection_dims)
  topk_indices = tf.reshape(topk_indices, [projection_dims])
  # Gather and return the corresponding eigenvectors.
  return tf.transpose(tf.gather(tf.transpose(eigvecs), topk_indices)) 
Example #20
Source File: dp_pca.py    From Action_Recognition_Zoo with MIT License 4 votes vote down vote up
def ComputeDPPrincipalProjection(data, projection_dims,
                                 sanitizer, eps_delta, sigma):
  """Compute differentially private projection.

  Args:
    data: the input data, each row is a data vector.
    projection_dims: the projection dimension.
    sanitizer: the sanitizer used for acheiving privacy.
    eps_delta: (eps, delta) pair.
    sigma: if not None, use noise sigma; otherwise compute it using
      eps_delta pair.
  Returns:
    A projection matrix with projection_dims columns.
  """

  eps, delta = eps_delta
  # Normalize each row.
  normalized_data = tf.nn.l2_normalize(data, 1)
  covar = tf.matmul(tf.transpose(normalized_data), normalized_data)
  saved_shape = tf.shape(covar)
  num_examples = tf.slice(tf.shape(data), [0], [1])
  if eps > 0:
    # Since the data is already normalized, there is no need to clip
    # the covariance matrix.
    assert delta > 0
    saned_covar = sanitizer.sanitize(
        tf.reshape(covar, [1, -1]), eps_delta, sigma=sigma,
        option=san.ClipOption(1.0, False), num_examples=num_examples)
    saned_covar = tf.reshape(saned_covar, saved_shape)
    # Symmetrize saned_covar. This also reduces the noise variance.
    saned_covar = 0.5 * (saned_covar + tf.transpose(saned_covar))
  else:
    saned_covar = covar

  # Compute the eigen decomposition of the covariance matrix, and
  # return the top projection_dims eigen vectors, represented as columns of
  # the projection matrix.
  eigvals, eigvecs = tf.self_adjoint_eig(saned_covar)
  _, topk_indices = tf.nn.top_k(eigvals, projection_dims)
  topk_indices = tf.reshape(topk_indices, [projection_dims])
  # Gather and return the corresponding eigenvectors.
  return tf.transpose(tf.gather(tf.transpose(eigvecs), topk_indices)) 
Example #21
Source File: vgp.py    From VFF with Apache License 2.0 4 votes vote down vote up
def build_KL(self):
        """
        The covariance of q(u) has a kronecker structure, so
        appropriate reductions apply for the trace and logdet terms.
        """
        # Mahalanobis term, m^T K^{-1} m
        Kuu = [make_Kuu(kern, a, b, self.ms) for kern, a, b, in zip(self.kerns, self.a, self.b)]
        Kim = kron_vec_apply(Kuu, self.q_mu, 'solve')
        KL = 0.5*tf.reduce_sum(self.q_mu * Kim)

        # Constant term
        KL += -0.5*tf.cast(tf.size(self.q_mu), float_type)

        # Log det term
        Ls = [tf.matrix_band_part(q_sqrt_d, -1, 0) for q_sqrt_d in self.q_sqrt_kron]
        N_others = [float(np.prod(self.Ms)) / M for M in self.Ms]
        Q_logdets = [tf.reduce_sum(tf.log(tf.square(tf.diag_part(L)))) for L in Ls]
        KL += -0.5 * reduce(tf.add, [N*logdet for N, logdet in zip(N_others, Q_logdets)])

        # trace term tr(K^{-1} Sigma_q)
        Ss = [tf.matmul(L, tf.transpose(L)) for L in Ls]
        traces = [K.trace_KiX(S) for K, S, in zip(Kuu, Ss)]
        KL += 0.5 * reduce(tf.multiply, traces)  # kron-trace is the produce of traces

        # log det term Kuu
        Kuu_logdets = [K.logdet() for K in Kuu]
        KL += 0.5 * reduce(tf.add, [N*logdet for N, logdet in zip(N_others, Kuu_logdets)])

        if self.use_two_krons:
            # extra logdet terms:
            Ls_2 = [tf.matrix_band_part(q_sqrt_d, -1, 0) for q_sqrt_d in self.q_sqrt_kron_2]
            LiL = [tf.matrix_triangular_solve(L1, L2) for L1, L2 in zip(Ls, Ls_2)]
            eigvals = [tf.self_adjoint_eig(tf.matmul(tf.transpose(mat), mat))[0] for mat in LiL]  # discard eigenvectors
            eigvals_kronned = kron([tf.reshape(e, [1, -1]) for e in eigvals])
            KL += -0.5 * tf.reduce_sum(tf.log(1 + eigvals_kronned))

            # extra trace terms
            Ss = [tf.matmul(L, tf.transpose(L)) for L in Ls_2]
            traces = [K.trace_KiX(S) for K, S, in zip(Kuu, Ss)]
            KL += 0.5 * reduce(tf.multiply, traces)  # kron-trace is the produce of traces

        elif self.use_extra_ranks:
            # extra logdet terms
            KiW = kron_mat_apply(Kuu, self.q_sqrt_W, 'solve', self.use_extra_ranks)
            WTKiW = tf.matmul(tf.transpose(self.q_sqrt_W), KiW)
            L_extra = tf.cholesky(np.eye(self.use_extra_ranks) + WTKiW)
            KL += -0.5 * tf.reduce_sum(tf.log(tf.square(tf.diag_part(L_extra))))

            # extra trace terms
            KL += 0.5 * tf.reduce_sum(tf.diag_part(WTKiW))

        return KL 
Example #22
Source File: self_adjoint_eig_op_test.py    From deep_image_model with Apache License 2.0 4 votes vote down vote up
def _GetSelfAdjointEigTest(dtype_, shape_):

  def CompareEigenVectors(self, x, y, tol):
    # Eigenvectors are only unique up to sign so we normalize the signs first.
    signs = np.sign(np.sum(np.divide(x, y), -2, keepdims=True))
    x *= signs
    self.assertAllClose(x, y, atol=tol, rtol=tol)

  def CompareEigenDecompositions(self, x_e, x_v, y_e, y_v, tol):
    num_batches = int(np.prod(x_e.shape[:-1]))
    n = x_e.shape[-1]
    x_e = np.reshape(x_e, [num_batches] + [n])
    x_v = np.reshape(x_v, [num_batches] + [n, n])
    y_e = np.reshape(y_e, [num_batches] + [n])
    y_v = np.reshape(y_v, [num_batches] + [n, n])
    for i in range(num_batches):
      x_ei, x_vi = SortEigenDecomposition(x_e[i, :], x_v[i, :, :])
      y_ei, y_vi = SortEigenDecomposition(y_e[i, :], y_v[i, :, :])
      self.assertAllClose(x_ei, y_ei, atol=tol, rtol=tol)
      CompareEigenVectors(self, x_vi, y_vi, tol)

  def Test(self):
    np.random.seed(1)
    n = shape_[-1]
    batch_shape = shape_[:-2]
    a = np.random.uniform(
        low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(dtype_)
    a += a.T
    a = np.tile(a, batch_shape + (1, 1))
    if dtype_ == np.float32:
      atol = 1e-4
    else:
      atol = 1e-12
    for compute_v in False, True:
      np_e, np_v = np.linalg.eig(a)
      with self.test_session():
        if compute_v:
          tf_e, tf_v = tf.self_adjoint_eig(tf.constant(a))

          # Check that V*diag(E)*V^T is close to A.
          a_ev = tf.batch_matmul(
              tf.batch_matmul(tf_v, tf.matrix_diag(tf_e)), tf_v, adj_y=True)
          self.assertAllClose(a_ev.eval(), a, atol=atol)

          # Compare to numpy.linalg.eig.
          CompareEigenDecompositions(self, np_e, np_v, tf_e.eval(), tf_v.eval(),
                                     atol)
        else:
          tf_e = tf.self_adjoint_eigvals(tf.constant(a))
          self.assertAllClose(
              np.sort(np_e, -1), np.sort(tf_e.eval(), -1), atol=atol)

  return Test 
Example #23
Source File: dp_pca.py    From Gun-Detector with Apache License 2.0 4 votes vote down vote up
def ComputeDPPrincipalProjection(data, projection_dims,
                                 sanitizer, eps_delta, sigma):
  """Compute differentially private projection.

  Args:
    data: the input data, each row is a data vector.
    projection_dims: the projection dimension.
    sanitizer: the sanitizer used for achieving privacy.
    eps_delta: (eps, delta) pair.
    sigma: if not None, use noise sigma; otherwise compute it using
      eps_delta pair.
  Returns:
    A projection matrix with projection_dims columns.
  """

  eps, delta = eps_delta
  # Normalize each row.
  normalized_data = tf.nn.l2_normalize(data, 1)
  covar = tf.matmul(tf.transpose(normalized_data), normalized_data)
  saved_shape = tf.shape(covar)
  num_examples = tf.slice(tf.shape(data), [0], [1])
  if eps > 0:
    # Since the data is already normalized, there is no need to clip
    # the covariance matrix.
    assert delta > 0
    saned_covar = sanitizer.sanitize(
        tf.reshape(covar, [1, -1]), eps_delta, sigma=sigma,
        option=san.ClipOption(1.0, False), num_examples=num_examples)
    saned_covar = tf.reshape(saned_covar, saved_shape)
    # Symmetrize saned_covar. This also reduces the noise variance.
    saned_covar = 0.5 * (saned_covar + tf.transpose(saned_covar))
  else:
    saned_covar = covar

  # Compute the eigen decomposition of the covariance matrix, and
  # return the top projection_dims eigen vectors, represented as columns of
  # the projection matrix.
  eigvals, eigvecs = tf.self_adjoint_eig(saned_covar)
  _, topk_indices = tf.nn.top_k(eigvals, projection_dims)
  topk_indices = tf.reshape(topk_indices, [projection_dims])
  # Gather and return the corresponding eigenvectors.
  return tf.transpose(tf.gather(tf.transpose(eigvecs), topk_indices)) 
Example #24
Source File: dp_pca.py    From yolo_v2 with Apache License 2.0 4 votes vote down vote up
def ComputeDPPrincipalProjection(data, projection_dims,
                                 sanitizer, eps_delta, sigma):
  """Compute differentially private projection.

  Args:
    data: the input data, each row is a data vector.
    projection_dims: the projection dimension.
    sanitizer: the sanitizer used for achieving privacy.
    eps_delta: (eps, delta) pair.
    sigma: if not None, use noise sigma; otherwise compute it using
      eps_delta pair.
  Returns:
    A projection matrix with projection_dims columns.
  """

  eps, delta = eps_delta
  # Normalize each row.
  normalized_data = tf.nn.l2_normalize(data, 1)
  covar = tf.matmul(tf.transpose(normalized_data), normalized_data)
  saved_shape = tf.shape(covar)
  num_examples = tf.slice(tf.shape(data), [0], [1])
  if eps > 0:
    # Since the data is already normalized, there is no need to clip
    # the covariance matrix.
    assert delta > 0
    saned_covar = sanitizer.sanitize(
        tf.reshape(covar, [1, -1]), eps_delta, sigma=sigma,
        option=san.ClipOption(1.0, False), num_examples=num_examples)
    saned_covar = tf.reshape(saned_covar, saved_shape)
    # Symmetrize saned_covar. This also reduces the noise variance.
    saned_covar = 0.5 * (saned_covar + tf.transpose(saned_covar))
  else:
    saned_covar = covar

  # Compute the eigen decomposition of the covariance matrix, and
  # return the top projection_dims eigen vectors, represented as columns of
  # the projection matrix.
  eigvals, eigvecs = tf.self_adjoint_eig(saned_covar)
  _, topk_indices = tf.nn.top_k(eigvals, projection_dims)
  topk_indices = tf.reshape(topk_indices, [projection_dims])
  # Gather and return the corresponding eigenvectors.
  return tf.transpose(tf.gather(tf.transpose(eigvecs), topk_indices))