Python tensorflow.trace() Examples

The following are 22 code examples of tensorflow.trace(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: mmd.py    From opt-mmd with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
    m = tf.cast(K_XX.get_shape()[0], tf.float32)
    n = tf.cast(K_YY.get_shape()[0], tf.float32)

    if biased:
        mmd2 = (tf.reduce_sum(K_XX) / (m * m)
              + tf.reduce_sum(K_YY) / (n * n)
              - 2 * tf.reduce_sum(K_XY) / (m * n))
    else:
        if const_diagonal is not False:
            trace_X = m * const_diagonal
            trace_Y = n * const_diagonal
        else:
            trace_X = tf.trace(K_XX)
            trace_Y = tf.trace(K_YY)

        mmd2 = ((tf.reduce_sum(K_XX) - trace_X) / (m * (m - 1))
              + (tf.reduce_sum(K_YY) - trace_Y) / (n * (n - 1))
              - 2 * tf.reduce_sum(K_XY) / (m * n))

    return mmd2 
Example #2
Source File: sparse_covariance.py    From tf-example-models with Apache License 2.0 6 votes vote down vote up
def get_value_updater(self, data, new_mean, gamma_weighted, gamma_sum):
        tf_new_differences = tf.subtract(data, tf.expand_dims(new_mean, 0))
        tf_sq_dist_matrix = tf.matmul(tf.expand_dims(tf_new_differences, 2), tf.expand_dims(tf_new_differences, 1))
        tf_new_covariance = tf.reduce_sum(tf_sq_dist_matrix * tf.expand_dims(tf.expand_dims(gamma_weighted, 1), 2), 0)

        if self.has_prior:
            tf_new_covariance = self.get_prior_adjustment(tf_new_covariance, gamma_sum)

        tf_s, tf_u, _ = tf.svd(tf_new_covariance)

        tf_required_eigvals = tf_s[:self.rank]
        tf_required_eigvecs = tf_u[:, :self.rank]

        tf_new_baseline = (tf.trace(tf_new_covariance) - tf.reduce_sum(tf_required_eigvals)) / self.tf_rest
        tf_new_eigvals = tf_required_eigvals - tf_new_baseline
        tf_new_eigvecs = tf.transpose(tf_required_eigvecs)

        return tf.group(
            self.tf_baseline.assign(tf_new_baseline),
            self.tf_eigvals.assign(tf_new_eigvals),
            self.tf_eigvecs.assign(tf_new_eigvecs)
        ) 
Example #3
Source File: my_layers.py    From DAS with Apache License 2.0 6 votes vote down vote up
def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
    m = tf.cast(tf.shape(K_XX)[0], tf.float32)
    n = tf.cast(tf.shape(K_YY)[0], tf.float32)

    
    if biased:
        mmd2 = (tf.reduce_sum(K_XX, keep_dims=True) / (m * m)
              + tf.reduce_sum(K_YY, keep_dims=True) / (n * n)
              - 2 * tf.reduce_sum(K_XY, keep_dims=True) / (m * n))
    else:
        if const_diagonal is not False:
            trace_X = m * const_diagonal
            trace_Y = n * const_diagonal
        else:
            trace_X = tf.trace(K_XX)
            trace_Y = tf.trace(K_YY)

        mmd2 = ((tf.reduce_sum(K_XX) - trace_X) / (m * (m - 1))
              + (tf.reduce_sum(K_YY) - trace_Y) / (n * (n - 1))
              - 2 * tf.reduce_sum(K_XY) / (m * n))

    return mmd2 
Example #4
Source File: mmd.py    From RGAN with MIT License 6 votes vote down vote up
def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
    m = tf.cast(K_XX.get_shape()[0], tf.float32)
    n = tf.cast(K_YY.get_shape()[0], tf.float32)

    if biased:
        mmd2 = (tf.reduce_sum(K_XX) / (m * m)
              + tf.reduce_sum(K_YY) / (n * n)
              - 2 * tf.reduce_sum(K_XY) / (m * n))
    else:
        if const_diagonal is not False:
            trace_X = m * const_diagonal
            trace_Y = n * const_diagonal
        else:
            trace_X = tf.trace(K_XX)
            trace_Y = tf.trace(K_YY)

        mmd2 = ((tf.reduce_sum(K_XX) - trace_X) / (m * (m - 1))
              + (tf.reduce_sum(K_YY) - trace_Y) / (n * (n - 1))
              - 2 * tf.reduce_sum(K_XY) / (m * n))

    return mmd2 
Example #5
Source File: symmetry_qs.py    From elbow with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def general_orthog_correction(mean, std, k, scale_svs=None):    
    
    std = tf.clip_by_value(std, 1e-2, np.inf)
    
    if len(std.get_shape()) > 1:
        # largest singular value of the covariance matrix for each row
        iso_std = tf.expand_dims(tf.reduce_max(std, axis=1), axis=1)
    else:
        iso_std = std

    r = mean/iso_std        
    A = .5 * tf.matmul(tf.transpose(r), r)

    tr = tf.trace(A)
    svs = tf.sqrt(util.differentiable_sq_singular_vals(A))
    if scale_svs is not None:
        svs *= scale_svs
        tr *= scale_svs
        
    lb = lpbessel_svs(svs, k)
    
    return tr - lb 
Example #6
Source File: math_func.py    From MMD-GAN with Apache License 2.0 5 votes vote down vote up
def trace_sqrt_product_tf(cov1, cov2):
    """ This function calculates trace(sqrt(cov1 * cov2))

    This code is inspired from:
    https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py

    :param cov1:
    :param cov2:
    :return:
    """
    sqrt_cov1 = sqrt_sym_mat_tf(cov1)
    cov_121 = tf.matmul(tf.matmul(sqrt_cov1, cov2), sqrt_cov1)

    return tf.trace(sqrt_sym_mat_tf(cov_121)) 
Example #7
Source File: math_func.py    From MMD-GAN with Apache License 2.0 5 votes vote down vote up
def trace_sqrt_product_np(cov1, cov2):
    """ This function calculates trace(sqrt(cov1 * cov2))

    This code is inspired from:
    https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py

    :param cov1:
    :param cov2:
    :return:
    """
    sqrt_cov1 = sqrt_sym_mat_np(cov1)
    cov_121 = np.matmul(np.matmul(sqrt_cov1, cov2), sqrt_cov1)

    return np.trace(sqrt_sym_mat_np(cov_121)) 
Example #8
Source File: spin.py    From spectral_inference_networks with Apache License 2.0 5 votes vote down vote up
def _objective(xx, obj):
  """Objective function as custom op so that we can overload gradients."""
  with tf.name_scope('objective'):
    chol = tf.cholesky(xx)
    choli = tf.linalg.inv(chol)

    rq = tf.matmul(choli, tf.matmul(obj, choli, transpose_b=True))
    eigval = tf.matrix_diag_part(rq)
    loss = tf.trace(rq)
    grad = functools.partial(_objective_grad, xx, obj)
  return (loss, eigval, chol), grad 
Example #9
Source File: pointfly.py    From ldgcnn with MIT License 5 votes vote down vote up
def compute_eigenvals(A):
    A_11 = A[:, :, 0, 0]  # (N, P)
    A_12 = A[:, :, 0, 1]
    A_13 = A[:, :, 0, 2]
    A_22 = A[:, :, 1, 1]
    A_23 = A[:, :, 1, 2]
    A_33 = A[:, :, 2, 2]
    I = tf.eye(3)
    p1 = tf.square(A_12) + tf.square(A_13) + tf.square(A_23)  # (N, P)
    q = tf.trace(A) / 3  # (N, P)
    p2 = tf.square(A_11 - q) + tf.square(A_22 - q) + tf.square(A_33 - q) + 2 * p1  # (N, P)
    p = tf.sqrt(p2 / 6) + 1e-8  # (N, P)
    N = tf.shape(A)[0]
    q_4d = tf.reshape(q, (N, -1, 1, 1))  # (N, P, 1, 1)
    p_4d = tf.reshape(p, (N, -1, 1, 1))
    B = (1 / p_4d) * (A - q_4d * I)  # (N, P, 3, 3)
    r = tf.clip_by_value(compute_determinant(B) / 2, -1, 1)  # (N, P)
    phi = tf.acos(r) / 3  # (N, P)
    eig1 = q + 2 * p * tf.cos(phi)  # (N, P)
    eig3 = q + 2 * p * tf.cos(phi + (2 * math.pi / 3))
    eig2 = 3 * q - eig1 - eig3
    return tf.abs(tf.stack([eig1, eig2, eig3], axis=2))  # (N, P, 3)


# P shape is (N, P, 3), N shape is (N, P, K, 3)
# return shape is (N, P) 
Example #10
Source File: weight_blocks.py    From noisy-K-FAC with Apache License 2.0 5 votes vote down vote up
def _compute_pi_tracenorm(left_cov, right_cov):
    left_norm = tf.trace(left_cov) * right_cov.shape.as_list()[0]
    right_norm = tf.trace(right_cov) * left_cov.shape.as_list()[0]
    return tf.sqrt(left_norm / right_norm) 
Example #11
Source File: pointfly.py    From scanobjectnn with MIT License 5 votes vote down vote up
def compute_eigenvals(A):
    A_11 = A[:, :, 0, 0]  # (N, P)
    A_12 = A[:, :, 0, 1]
    A_13 = A[:, :, 0, 2]
    A_22 = A[:, :, 1, 1]
    A_23 = A[:, :, 1, 2]
    A_33 = A[:, :, 2, 2]
    I = tf.eye(3)
    p1 = tf.square(A_12) + tf.square(A_13) + tf.square(A_23)  # (N, P)
    q = tf.trace(A) / 3  # (N, P)
    p2 = tf.square(A_11 - q) + tf.square(A_22 - q) + tf.square(A_33 - q) + 2 * p1  # (N, P)
    p = tf.sqrt(p2 / 6) + 1e-8  # (N, P)
    N = tf.shape(A)[0]
    q_4d = tf.reshape(q, (N, -1, 1, 1))  # (N, P, 1, 1)
    p_4d = tf.reshape(p, (N, -1, 1, 1))
    B = (1 / p_4d) * (A - q_4d * I)  # (N, P, 3, 3)
    r = tf.clip_by_value(compute_determinant(B) / 2, -1, 1)  # (N, P)
    phi = tf.acos(r) / 3  # (N, P)
    eig1 = q + 2 * p * tf.cos(phi)  # (N, P)
    eig3 = q + 2 * p * tf.cos(phi + (2 * math.pi / 3))
    eig2 = 3 * q - eig1 - eig3
    return tf.abs(tf.stack([eig1, eig2, eig3], axis=2))  # (N, P, 3)


# P shape is (N, P, 3), N shape is (N, P, K, 3)
# return shape is (N, P) 
Example #12
Source File: rgbd.py    From MvDSCN with MIT License 5 votes vote down vote up
def HSIC(self, c_v, c_w):
        N = tf.shape(c_v)[0]
        H = tf.ones((N, N)) * tf.cast((1/N), tf.float32) * (-1) + tf.eye(N)
        K_1 = tf.matmul(c_v, tf.transpose(c_v))
        K_2 = tf.matmul(c_w, tf.transpose(c_w))
        rst = tf.matmul(K_1, H)
        rst = tf.matmul(rst, K_2)
        rst = tf.matmul(rst, H)
        rst = tf.trace(rst)
        return rst 
Example #13
Source File: symmetry_qs.py    From elbow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _logp(self, result, mean, std, **kwargs):

        n, k = self.shape
        base_logp = tf.reduce_sum(util.dists.gaussian_log_density(result, mean=mean, stddev=std))
        cxu = tf.matmul(tf.transpose(result/std), mean/std)
        svs = tf.sqrt(util.differentiable_sq_singular_vals(cxu))    
        lb = lpbessel_svs(svs, k)
        lp = base_logp + lb - tf.trace(cxu)
        
        return lp 
Example #14
Source File: refiner.py    From motion_reconstruction with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def init_pose(pred_Rs, init_pose, weights=None):
    """
    Should stay close to initial weights
    pred_Rs is N x 24 x 3 x 3
    init_pose is 72D, need to conver to Rodrigues
    """
    init_Rs = batch_rodrigues(tf.reshape(init_pose, [-1, 3]))
    init_Rs = tf.reshape(init_Rs, [-1, 24, 3, 3])
    RRt = tf.matmul(init_Rs, pred_Rs, transpose_b=True)
    costheta = (tf.trace(RRt) - 1) / 2.
    target = tf.ones_like(costheta)
    if weights is None:
        weights = tf.ones_like(costheta)
    return tf.losses.mean_squared_error(target, costheta, weights=weights) 
Example #15
Source File: refiner.py    From motion_reconstruction with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def pose_smoothness(poses, global_only=False):
    """
    # Poses is F x 24 x 3 x 3
    Computes \sum ||p_i - p_{i+1}||
    On the pose in Rotation matrices space.
    It compues the angle between the two rotations:
    (tr(R) - 1) / 2 = cos(theta)
    So penalize acos((tr(R) - 1) / 2) --> this nans
    So:
    minimize: (1 - tr(R_1*R_2')) / 2 = -cos(theta) of R_1*R_2'
    min at -1.
    """
    # These are F-1 x 24 x 3 x 3 (Ok this is exactly the same..)
    curr_pose = poses[:-1]
    next_pose = poses[1:]
    RRt = tf.matmul(curr_pose, next_pose, transpose_b=True)

    # For min (1-tr(RR_T)) / 2
    costheta = (tf.trace(RRt) - 1) / 2.
    target = tf.ones_like(costheta)
    if global_only:
        print('Pose smoothness increased on global!')
        weights_global = 10 * tf.expand_dims(tf.ones_like(costheta[:, 0]), 1)
        weights_joints = tf.ones_like(costheta[:, 1:])
        weights = tf.concat([weights_global, weights_joints], 1)
    else:
        weights = tf.ones_like(costheta)
    return tf.losses.mean_squared_error(target, costheta, weights=weights) 
Example #16
Source File: tensorflow.py    From deepx with MIT License 5 votes vote down vote up
def block_trace(self, X, m, n):
        blocks = []
        for i in range(n):
            blocks.append([])
            for j in range(n):
                block = self.trace(X[..., i*m:(i+1)*m, j*m:(j+1)*m])
                blocks[-1].append(block)
        return self.pack([
            self.pack([
                b for b in block
            ])
            for block in blocks
        ]) 
Example #17
Source File: tensorflow.py    From deepx with MIT License 5 votes vote down vote up
def trace(self, a):
        return tf.trace(a) 
Example #18
Source File: trace_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def compare(self, x):
    np_ans = np.trace(x, axis1=-2, axis2=-1)
    with self.test_session(use_gpu=True):
      tf_ans = tf.trace(x).eval()
    self.assertAllClose(tf_ans, np_ans) 
Example #19
Source File: ops.py    From tfdeploy with MIT License 5 votes vote down vote up
def test_trace(self):
        t = tf.trace(self.random(3, 3))
        self.check(t) 
Example #20
Source File: batch_lbs.py    From motion_reconstruction with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def batch_rot2aa(Rs):
    """
    Rs is B x 3 x 3
    void cMathUtil::RotMatToAxisAngle(const tMatrix& mat, tVector& out_axis, double& out_theta)
    {
        double c = 0.5 * (mat(0, 0) + mat(1, 1) + mat(2, 2) - 1);
        c = cMathUtil::Clamp(c, -1.0, 1.0);

        out_theta = std::acos(c);

        if (std::abs(out_theta) < 0.00001)
        {
            out_axis = tVector(0, 0, 1, 0);
        }
        else
        {
            double m21 = mat(2, 1) - mat(1, 2);
            double m02 = mat(0, 2) - mat(2, 0);
            double m10 = mat(1, 0) - mat(0, 1);
            double denom = std::sqrt(m21 * m21 + m02 * m02 + m10 * m10);
            out_axis[0] = m21 / denom;
            out_axis[1] = m02 / denom;
            out_axis[2] = m10 / denom;
            out_axis[3] = 0;
        }
    }
    """
    cos = 0.5 * (tf.trace(Rs) - 1)
    cos = tf.clip_by_value(cos, -1, 1)

    theta = tf.acos(cos)

    m21 = Rs[:, 2, 1] - Rs[:, 1, 2]
    m02 = Rs[:, 0, 2] - Rs[:, 2, 0]
    m10 = Rs[:, 1, 0] - Rs[:, 0, 1]
    denom = tf.sqrt(m21 * m21 + m02 * m02 + m10 * m10)

    axis0 = tf.where(tf.abs(theta) < 0.00001, m21, m21 / denom)
    axis1 = tf.where(tf.abs(theta) < 0.00001, m02, m02 / denom)
    axis2 = tf.where(tf.abs(theta) < 0.00001, m10, m10 / denom)

    return tf.expand_dims(theta, 1) * tf.stack([axis0, axis1, axis2], 1)

    # def get_aa(Rs, theta):
    #     m21 = Rs[:, 2, 1] - Rs[:, 1, 2]
    #     m02 = Rs[:, 0, 2] - Rs[:, 2, 0]
    #     m10 = Rs[:, 1, 0] - Rs[:, 0, 1]
    #     denom = tf.sqrt(m21 * m21 + m02 * m02 + m10 * m10)

    #     axis0 = m21 / denom
    #     axis1 = m02 / denom
    #     axis2 = m10 / denom

    #     return tf.expand_dims(theta, 1) * tf.stack([axis0, axis1, axis2], 1)

    # def get_identity(theta):
    #     return tf.zeros([theta.shape.as_list()[0], 1], dtype=tf.float32)

    # axis_angle = tf.where(tf.abs(theta) < 0.00001, get_aa(Rs, theta), get_identity(theta))

    # return axis_angle 
Example #21
Source File: batch_lbs.py    From human_dynamics with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def batch_rot2aa(Rs):
    """
    Rs is B x 3 x 3
    void cMathUtil::RotMatToAxisAngle(const tMatrix& mat, tVector& out_axis,
                                      double& out_theta)
    {
        double c = 0.5 * (mat(0, 0) + mat(1, 1) + mat(2, 2) - 1);
        c = cMathUtil::Clamp(c, -1.0, 1.0);

        out_theta = std::acos(c);

        if (std::abs(out_theta) < 0.00001)
        {
            out_axis = tVector(0, 0, 1, 0);
        }
        else
        {
            double m21 = mat(2, 1) - mat(1, 2);
            double m02 = mat(0, 2) - mat(2, 0);
            double m10 = mat(1, 0) - mat(0, 1);
            double denom = std::sqrt(m21 * m21 + m02 * m02 + m10 * m10);
            out_axis[0] = m21 / denom;
            out_axis[1] = m02 / denom;
            out_axis[2] = m10 / denom;
            out_axis[3] = 0;
        }
    }
    """
    cos = 0.5 * (tf.trace(Rs) - 1)
    cos = tf.clip_by_value(cos, -1, 1)

    theta = tf.acos(cos)

    m21 = Rs[:, 2, 1] - Rs[:, 1, 2]
    m02 = Rs[:, 0, 2] - Rs[:, 2, 0]
    m10 = Rs[:, 1, 0] - Rs[:, 0, 1]
    denom = tf.sqrt(m21 * m21 + m02 * m02 + m10 * m10)

    axis0 = tf.where(tf.abs(theta) < 0.00001, m21, m21 / denom)
    axis1 = tf.where(tf.abs(theta) < 0.00001, m02, m02 / denom)
    axis2 = tf.where(tf.abs(theta) < 0.00001, m10, m10 / denom)

    return tf.expand_dims(theta, 1) * tf.stack([axis0, axis1, axis2], 1) 
Example #22
Source File: batch_lbs.py    From phd with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def batch_rot2aa(Rs):
    """
    Rs is B x 3 x 3
    void cMathUtil::RotMatToAxisAngle(const tMatrix& mat, tVector& out_axis,
                                      double& out_theta)
    {
        double c = 0.5 * (mat(0, 0) + mat(1, 1) + mat(2, 2) - 1);
        c = cMathUtil::Clamp(c, -1.0, 1.0);

        out_theta = std::acos(c);

        if (std::abs(out_theta) < 0.00001)
        {
            out_axis = tVector(0, 0, 1, 0);
        }
        else
        {
            double m21 = mat(2, 1) - mat(1, 2);
            double m02 = mat(0, 2) - mat(2, 0);
            double m10 = mat(1, 0) - mat(0, 1);
            double denom = std::sqrt(m21 * m21 + m02 * m02 + m10 * m10);
            out_axis[0] = m21 / denom;
            out_axis[1] = m02 / denom;
            out_axis[2] = m10 / denom;
            out_axis[3] = 0;
        }
    }
    """
    cos = 0.5 * (tf.trace(Rs) - 1)
    cos = tf.clip_by_value(cos, -1, 1)

    theta = tf.acos(cos)

    m21 = Rs[:, 2, 1] - Rs[:, 1, 2]
    m02 = Rs[:, 0, 2] - Rs[:, 2, 0]
    m10 = Rs[:, 1, 0] - Rs[:, 0, 1]
    denom = tf.sqrt(m21 * m21 + m02 * m02 + m10 * m10)

    axis0 = tf.where(tf.abs(theta) < 0.00001, m21, m21 / denom)
    axis1 = tf.where(tf.abs(theta) < 0.00001, m02, m02 / denom)
    axis2 = tf.where(tf.abs(theta) < 0.00001, m10, m10 / denom)

    return tf.expand_dims(theta, 1) * tf.stack([axis0, axis1, axis2], 1)