Python tensorflow.diag() Examples

The following are 30 code examples of tensorflow.diag(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: e2c_seq.py    From e2c-pytorch with Apache License 2.0 6 votes vote down vote up
def transition(h,share=None):
  # compute A,B,o linearization matrices
  with tf.variable_scope("trans",reuse=share):
    for l in range(2):
      h=ReLU(h,100,"aggregate_loss"+str(l))
    with tf.variable_scope("A"):
      v,r=tf.split(1,2,linear(h,z_dim*2))
      v1=tf.expand_dims(v,-1) # (batch, z_dim, 1)
      rT=tf.expand_dims(r,1) # batch, 1, z_dim
      I=tf.diag([1.]*z_dim)
      A=(I+tf.batch_matmul(v1,rT)) # (z_dim, z_dim) + (batch, z_dim, 1)*(batch, 1, z_dim) (I is broadcasted) 
    with tf.variable_scope("B"):
      B=linear(h,z_dim*u_dim)
      B=tf.reshape(B,[-1,z_dim,u_dim])
    with tf.variable_scope("o"):
      o=linear(h,z_dim)
    return A,B,o,v,r 
Example #2
Source File: hawkes_cumulant_matching.py    From tick with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def starting_point(self, random=False):
        """Heuristic to find a starting point candidate

        Parameters
        ----------
        random : `bool`
            Use a random orthogonal matrix instead of identity

        Returns
        -------
        startint_point : `np.ndarray`, shape=(n_nodes, n_nodes)
            A starting point candidate
        """
        sqrt_C = sqrtm(self.covariance)
        sqrt_L = np.sqrt(self.mean_intensity)
        if random:
            random_matrix = np.random.rand(self.n_nodes, self.n_nodes)
            M, _ = qr(random_matrix)
        else:
            M = np.eye(self.n_nodes)
        initial = np.dot(np.dot(sqrt_C, M), np.diag(1. / sqrt_L))
        return initial 
Example #3
Source File: linear_regression.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def solve_ridge(x, y, ridge_factor):
  with tf.name_scope("solve_ridge"):
    # Added a column of ones to the end of the feature matrix for bias
    A = tf.concat([x, tf.ones((x.shape.as_list()[0], 1))], axis=1)

    # Analytic solution for the ridge regression loss
    inv_target = tf.matmul(A, A, transpose_a=True)
    np_diag_penalty = ridge_factor * np.ones(
        A.shape.as_list()[1], dtype="float32")
    # Remove penalty on bias component of weights
    np_diag_penalty[-1] = 0.
    diag_penalty = tf.constant(np_diag_penalty)
    inv_target += tf.diag(diag_penalty)

    inv = tf.matrix_inverse(inv_target)
    w = tf.matmul(inv, tf.matmul(A, y, transpose_a=True))
    return w 
Example #4
Source File: crf_as_rnn_keras_layer.py    From CRFasRNNLayer with MIT License 6 votes vote down vote up
def build(self, input_shape):
        self.spatial_ker_weights = self.add_weight(name='spatial_ker_weights',
                                                   shape=(self.num_classes,),
                                                   initializer=tf.initializers.truncated_normal(mean=0, stddev=0.1),
                                                   trainable=True)

        self.spatial_ker_weights = tf.diag(self.spatial_ker_weights)

        self.bilateral_ker_weights = self.add_weight(name='bilateral_ker_weights',
                                                     shape=(self.num_classes,),
                                                     initializer=tf.initializers.truncated_normal(mean=0, stddev=0.1),
                                                     trainable=True)
        self.bilateral_ker_weights = tf.diag(self.bilateral_ker_weights)

        self.compatibility_matrix = self.add_weight(name='compatibility_matrix',
                                                    shape=(self.num_classes, self.num_classes),
                                                    initializer=tf.initializers.truncated_normal(mean=0, stddev=0.1),
                                                    trainable=True)

        super(CRF_RNN_Layer, self).build(input_shape) 
Example #5
Source File: tfops.py    From glow with MIT License 6 votes vote down vote up
def _symmetric_matrix_square_root(mat, eps=1e-10):
    """Compute square root of a symmetric matrix.
    Note that this is different from an elementwise square root. We want to
    compute M' where M' = sqrt(mat) such that M' * M' = mat.
    Also note that this method **only** works for symmetric matrices.
    Args:
      mat: Matrix to take the square root of.
      eps: Small epsilon such that any element less than eps will not be square
        rooted to guard against numerical instability.
    Returns:
      Matrix square root of mat.
    """
    # Unlike numpy, tensorflow's return order is (s, u, v)
    s, u, v = tf.svd(mat)
    # sqrt is unstable around 0, just use 0 in such case
    si = tf.where(tf.less(s, eps), s, tf.sqrt(s))
    # Note that the v returned by Tensorflow is v = V
    # (when referencing the equation A = U S V^T)
    # This is unlike Numpy which returns v = V^T
    return tf.matmul(
        tf.matmul(u, tf.diag(si)), v, transpose_b=True) 
Example #6
Source File: gmm_ops.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _covariance(x, diag):
  """Defines the covariance operation of a matrix.

  Args:
    x: a matrix Tensor. Dimension 0 should contain the number of examples.
    diag: if True, it computes the diagonal covariance.

  Returns:
    A Tensor representing the covariance of x. In the case of
  diagonal matrix just the diagonal is returned.
  """
  num_points = tf.to_float(tf.shape(x)[0])
  x -= tf.reduce_mean(x, 0, keep_dims=True)
  if diag:
    cov = tf.reduce_sum(
        tf.square(x), 0, keep_dims=True) / (num_points - 1)
  else:
    cov = tf.matmul(x, x, transpose_a=True)  / (num_points - 1)
  return cov 
Example #7
Source File: vae.py    From disentanglement_lib with Apache License 2.0 6 votes vote down vote up
def regularize_diag_off_diag_dip(covariance_matrix, lambda_od, lambda_d):
  """Compute on and off diagonal regularizers for DIP-VAE models.

  Penalize deviations of covariance_matrix from the identity matrix. Uses
  different weights for the deviations of the diagonal and off diagonal entries.

  Args:
    covariance_matrix: Tensor of size [num_latent, num_latent] to regularize.
    lambda_od: Weight of penalty for off diagonal elements.
    lambda_d: Weight of penalty for diagonal elements.

  Returns:
    dip_regularizer: Regularized deviation from diagonal of covariance_matrix.
  """
  covariance_matrix_diagonal = tf.diag_part(covariance_matrix)
  covariance_matrix_off_diagonal = covariance_matrix - tf.diag(
      covariance_matrix_diagonal)
  dip_regularizer = tf.add(
      lambda_od * tf.reduce_sum(covariance_matrix_off_diagonal**2),
      lambda_d * tf.reduce_sum((covariance_matrix_diagonal - 1)**2))
  return dip_regularizer 
Example #8
Source File: e2c_plane.py    From e2c-pytorch with Apache License 2.0 6 votes vote down vote up
def transition(h):
    # compute A,B,o linearization matrices
    with tf.variable_scope("trans"):
        for l in range(2):
            h = ReLU(h, 100, "aggregate_loss" + str(l))
        with tf.variable_scope("A"):
            v, r = tf.split(1, 2, linear(h, z_dim * 2))
            v1 = tf.expand_dims(v, -1)  # (batch, z_dim, 1)
            rT = tf.expand_dims(r, 1)  # batch, 1, z_dim
            I = tf.diag([1.] * z_dim)
            A = (
                I + tf.batch_matmul(v1, rT)
            )  # (z_dim, z_dim) + (batch, z_dim, 1)*(batch, 1, z_dim) (I is broadcasted) 
        with tf.variable_scope("B"):
            B = linear(h, z_dim * u_dim)
            B = tf.reshape(B, [-1, z_dim, u_dim])
        with tf.variable_scope("o"):
            o = linear(h, z_dim)
        return A, B, o, v, r 
Example #9
Source File: e2c_plane.py    From e2c with Apache License 2.0 6 votes vote down vote up
def transition(h):
  # compute A,B,o linearization matrices
  with tf.variable_scope("trans"):
    for l in range(2):
      h=ReLU(h,100,"l"+str(l))
    with tf.variable_scope("A"):
      v,r=tf.split(1,2,linear(h,z_dim*2))
      v1=tf.expand_dims(v,-1) # (batch, z_dim, 1)
      rT=tf.expand_dims(r,1) # batch, 1, z_dim
      I=tf.diag([1.]*z_dim)
      A=(I+tf.batch_matmul(v1,rT)) # (z_dim, z_dim) + (batch, z_dim, 1)*(batch, 1, z_dim) (I is broadcasted) 
    with tf.variable_scope("B"):
      B=linear(h,z_dim*u_dim)
      B=tf.reshape(B,[-1,z_dim,u_dim])
    with tf.variable_scope("o"):
      o=linear(h,z_dim)
    return A,B,o,v,r 
Example #10
Source File: e2c_seq.py    From e2c with Apache License 2.0 6 votes vote down vote up
def transition(h,share=None):
  # compute A,B,o linearization matrices
  with tf.variable_scope("trans",reuse=share):
    for l in range(2):
      h=ReLU(h,100,"l"+str(l))
    with tf.variable_scope("A"):
      v,r=tf.split(1,2,linear(h,z_dim*2))
      v1=tf.expand_dims(v,-1) # (batch, z_dim, 1)
      rT=tf.expand_dims(r,1) # batch, 1, z_dim
      I=tf.diag([1.]*z_dim)
      A=(I+tf.batch_matmul(v1,rT)) # (z_dim, z_dim) + (batch, z_dim, 1)*(batch, 1, z_dim) (I is broadcasted) 
    with tf.variable_scope("B"):
      B=linear(h,z_dim*u_dim)
      B=tf.reshape(B,[-1,z_dim,u_dim])
    with tf.variable_scope("o"):
      o=linear(h,z_dim)
    return A,B,o,v,r 
Example #11
Source File: graph_builder.py    From Action_Recognition_Zoo with MIT License 5 votes vote down vote up
def BatchedSparseToDense(sparse_indices, output_size):
  """Batch compatible sparse to dense conversion.

  This is useful for one-hot coded target labels.

  Args:
    sparse_indices: [batch_size] tensor containing one index per batch
    output_size: needed in order to generate the correct dense output

  Returns:
    A [batch_size, output_size] dense tensor.
  """
  eye = tf.diag(tf.fill([output_size], tf.constant(1, tf.float32)))
  return tf.nn.embedding_lookup(eye, sparse_indices) 
Example #12
Source File: pca.py    From elbow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _logp(self, result, Z, mu, std):
        n, d_output = self.shape
        
        cov = tf.matmul(Z, tf.transpose(Z)) + tf.diag(tf.ones(n,)*std) 
        L = tf.cholesky(cov)
        r = result - mu
        out_cols = tf.unpack(r, axis=1)
        lps = [multivariate_gaussian_log_density(col, mu=0., L=L) for col in out_cols]
        return tf.reduce_sum(tf.pack(lps)) 
Example #13
Source File: pca.py    From elbow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _entropy(self, Z, mu, std):
        n, d_output = self.shape
        cov = tf.matmul(Z, tf.transpose(Z)) + tf.diag(tf.ones(n,)*std) 
        L = tf.cholesky(cov)
        return d_output * multivariate_gaussian_entropy(L=L) 
Example #14
Source File: pca.py    From elbow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _build_inverse_projection(self, X, W, mu, std):
        n, d_latent = self.shape
        variance = tf.square(std)

        M = tf.matmul(tf.transpose(W), W) + tf.diag(tf.ones((d_latent))*variance)
        L = tf.cholesky(M)
        
        # pred_Z = M^-1 W' r' as per (6) in tipping & bishop JRSS
        # https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/bishop-ppca-jrss.pdf
        r = X-mu
        WTr = tf.transpose(tf.matmul(r, W)) # W' (t - mu)  
        tmp = tf.matrix_triangular_solve(L, WTr, lower=True)
        pred_z = tf.transpose(tf.matrix_triangular_solve(tf.transpose(L), tmp, lower=False))        
        
        return pred_z, L, std 
Example #15
Source File: time_series.py    From elbow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _sample_and_entropy(self, transition_matrices,
                            step_noise_means,
                            step_noise_covs,
                            unary_means,
                            unary_variances):

        T, d = self.shape
        
        upwards_means = tf.unpack(unary_means)
        upwards_vars = tf.unpack(unary_variances)
        unary_factors = [MVGaussianMeanCov(mean, tf.diag(vs)) for (mean, vs) in zip(upwards_means, upwards_vars)]

        # transition_matrices is either a d x d matrix, or a T x d x d tensor
        if len(transition_matrices.get_shape()) == 2:
            transition_matrices = [transition_matrices for i in range(T)]

        # step noise mean is either a (d,)-vector or a T x d matrix
        if len(step_noise_means.get_shape()) == 1:
            step_noise_means = [step_noise_means for i in range(T)]

        # step noise cov is either a d x d matrix or a T x d x d tensor
        if len(step_noise_covs.get_shape()) == 2:
            step_noise_covs = [step_noise_covs for i in range(T)]

        step_noise_factors = [MVGaussianMeanCov(step_noise_means[t], step_noise_covs[t]) for t in range(T)]
            
        back_filtered, logZ = self._pass_messages_backwards(transition_matrices,
                                                            step_noise_factors,
                                                            unary_factors)

        self._back_filtered = back_filtered
        self._logZ = logZ
        
        eps = tf.random_normal(shape=self.shape)
        sample, entropy = self._sample_forward(back_filtered, transition_matrices,
                                               step_noise_factors, eps)
        return sample, entropy 
Example #16
Source File: graph_builder.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def BatchedSparseToDense(sparse_indices, output_size):
  """Batch compatible sparse to dense conversion.

  This is useful for one-hot coded target labels.

  Args:
    sparse_indices: [batch_size] tensor containing one index per batch
    output_size: needed in order to generate the correct dense output

  Returns:
    A [batch_size, output_size] dense tensor.
  """
  eye = tf.diag(tf.fill([output_size], tf.constant(1, tf.float32)))
  return tf.nn.embedding_lookup(eye, sparse_indices) 
Example #17
Source File: attention.py    From DiSAN with Apache License 2.0 5 votes vote down vote up
def self_align_attention(rep_tensor, mask, scope=None, simplify=True, hn=None):  # correct
    """
    attention strategy 4: self * self => attention self
    :param rep_tensor: rank is three [bs,sl,hn]
    :param mask: [bs,sl] tf.bool
    :param scope
    :param simplify:
    :return:  attended tensor [bs,sl,hn]
    """
    with tf.name_scope(scope or 'self_attention'):
        bs = tf.shape(rep_tensor)[0]
        sl = tf.shape(rep_tensor)[1]
        #vec = tf.shape(rep_tensor)[2]
        ivec = rep_tensor.get_shape().as_list()[-1]

        to_be_attended = tf.tile(tf.expand_dims(rep_tensor, 1), [1, sl, 1, 1])
        if not simplify:
            assert hn is not None
            rep_tensor = tf.nn.relu(linear([rep_tensor], hn, True, 0., 'linear_transform'))
        # 1. self alignment
        mask_tiled_sec = tf.tile(tf.expand_dims(mask, 1), [1, sl, 1])  # bs,sl,sl
        mask_tiled_mian = tf.tile(tf.expand_dims(mask, 2), [1, 1, sl])  # bs,sl,sl
        mask_tiled = tf.logical_and(mask_tiled_sec, mask_tiled_mian)
        input_sec = tf.tile(tf.expand_dims(rep_tensor, 1), [1, sl, 1, 1])  # bs,1-sl,sl,hn
        input_main = tf.tile(tf.expand_dims(rep_tensor, 2), [1, 1, sl, 1])  # bs,sl,1-sl,hn
        # self_alignment = tf.reduce_sum(input_sec * input_main, -1)  # bs,sl,sl
        self_alignment = (1.0 / ivec) * tf.reduce_sum(input_sec * input_main, -1)  # bs,sl,sl
        # 2. generate diag~/ mat
        # diag = tf.expand_dims(
        #     tf.cast(tf.logical_not(
        #         tf.cast(
        #             tf.diag(
        #                 tf.ones([sl], tf.int32)), tf.bool)
        #     ), tf.float32), 0)  # 1,sl,sl
        diag = tf.expand_dims(tf.logical_not(
                tf.cast(tf.diag(tf.ones([sl], tf.int32)), tf.bool)), 0)  # 1,sl,sl
        diag = tf.tile(diag, [bs, 1, 1])  # bs, sl, sl
        # self_alignment = self_alignment * diag  # bs,sl,sl
        # 3. attend data
        context = softsel(to_be_attended, self_alignment, tf.logical_and(mask_tiled, diag))  # [bs,sl,sl],  bs,sl,hn
        return context 
Example #18
Source File: graph_builder.py    From hands-detection with MIT License 5 votes vote down vote up
def BatchedSparseToDense(sparse_indices, output_size):
  """Batch compatible sparse to dense conversion.

  This is useful for one-hot coded target labels.

  Args:
    sparse_indices: [batch_size] tensor containing one index per batch
    output_size: needed in order to generate the correct dense output

  Returns:
    A [batch_size, output_size] dense tensor.
  """
  eye = tf.diag(tf.fill([output_size], tf.constant(1, tf.float32)))
  return tf.nn.embedding_lookup(eye, sparse_indices) 
Example #19
Source File: graph_builder.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def BatchedSparseToDense(sparse_indices, output_size):
  """Batch compatible sparse to dense conversion.

  This is useful for one-hot coded target labels.

  Args:
    sparse_indices: [batch_size] tensor containing one index per batch
    output_size: needed in order to generate the correct dense output

  Returns:
    A [batch_size, output_size] dense tensor.
  """
  eye = tf.diag(tf.fill([output_size], tf.constant(1, tf.float32)))
  return tf.nn.embedding_lookup(eye, sparse_indices) 
Example #20
Source File: parameterization.py    From elbow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def psd_diagonal(shape=None, init=None, name=None):
    assert(init is None) # TODO figure out init semantics
    n, n2 = shape
    assert(n==n2)
    init = np.float32(np.zeros(n))
    
    latent_diag = tf.Variable(init, name="latent_"+name if name is not None else None)
    psd = tf.diag(tf.exp(latent_diag), name=name)
    return psd 
Example #21
Source File: tf_rmsd.py    From dl4chem-geometry with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def tf_kabsch_rmsd_masked(P, Q, mask, tol):
    N = tf.reduce_sum(mask)
    mask_mat = tf.diag(tf.reshape(mask, (-1,)))
    P_masked = tf.matmul(mask_mat, P) + tol
    Q_masked = tf.matmul(mask_mat, Q) + tol
    P_transformed = tf_kabsch_rotate(P_masked, Q_masked)
    return tf_rmsd_masked(P_transformed, Q_masked, N) 
Example #22
Source File: matrix_structures.py    From VFF with Apache License 2.0 5 votes vote down vote up
def get(self):
        return tf.diag(self.d) 
Example #23
Source File: matrix_structures.py    From VFF with Apache License 2.0 5 votes vote down vote up
def get(self):
        W = tf.expand_dims(self.v, 1)
        return tf.diag(self.d) - tf.matmul(W, tf.transpose(W)) 
Example #24
Source File: matrix_structures.py    From VFF with Apache License 2.0 5 votes vote down vote up
def __init__(self, d, v):
        """
        A matrix of the form

            diag(d) - v v^T

        (note the minus sign)
        """
        self.d = d
        self.v = v 
Example #25
Source File: matrix_structures.py    From VFF with Apache License 2.0 5 votes vote down vote up
def get(self):
        V = tf.expand_dims(self.v, 1)
        return tf.diag(self.d) + tf.matmul(V, tf.transpose(V)) 
Example #26
Source File: matrix_structures.py    From VFF with Apache License 2.0 5 votes vote down vote up
def get(self):
        return tf.diag(self.d) - tf.matmul(self.W, tf.transpose(self.W)) 
Example #27
Source File: matrix_structures.py    From VFF with Apache License 2.0 5 votes vote down vote up
def __init__(self, d, W):
        """
        A matrix of the form

            diag(d) - W W^T

        (note the minus sign)
        """
        self.d = d
        self.W = W 
Example #28
Source File: matrix_structures.py    From VFF with Apache License 2.0 5 votes vote down vote up
def get(self):
        return tf.diag(self.d) + tf.matmul(self.W, tf.transpose(self.W)) 
Example #29
Source File: matrix_structures.py    From VFF with Apache License 2.0 5 votes vote down vote up
def __init__(self, d, W):
        """
        A matrix of the form

            diag(d) + W W^T

        """
        self.d = d
        self.W = W 
Example #30
Source File: vgp.py    From VFF with Apache License 2.0 5 votes vote down vote up
def _build_KL(self):
        """
        We're working in a 'whitened' representation, so this is the KL between
        q(u) and N(0, 1)
        """
        Kuu = make_Kuu(self.kern, self.a, self.b, self.ms)
        Kim = Kuu.solve(self.q_mu)
        KL = 0.5*tf.squeeze(tf.matmul(tf.transpose(Kim), self.q_mu))  # Mahalanobis term
        KL += 0.5 * Kuu.trace_KiX(tf.diag(tf.square(tf.reshape(self.q_sqrt, [-1]))))
        KL += -0.5*tf.cast(tf.size(self.q_mu), float_type)  # Constant term.
        KL += -0.5*tf.reduce_sum(tf.log(tf.square(self.q_sqrt)))  # Log det Q
        KL += 0.5*Kuu.logdet()  # Log det P
        return KL