Python tensorflow.matrix_diag() Examples

The following are 28 code examples of tensorflow.matrix_diag(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: components.py    From strsum with Apache License 2.0 6 votes vote down vote up
def get_matrix_tree(r, A):
    L = tf.reduce_sum(A, 1)
    L = tf.matrix_diag(L)
    L = L - A

    r_diag = tf.matrix_diag(r)
    LL = L + r_diag

    LL_inv = tf.matrix_inverse(LL)  #batch_l, doc_l, doc_l
    LL_inv_diag_ = tf.matrix_diag_part(LL_inv)

    d0 = tf.multiply(r, LL_inv_diag_)

    LL_inv_diag = tf.expand_dims(LL_inv_diag_, 2)

    tmp1 = tf.multiply(A, tf.matrix_transpose(LL_inv_diag))
    tmp2 = tf.multiply(A, tf.matrix_transpose(LL_inv))

    d = tmp1 - tmp2
    d = tf.concat([tf.expand_dims(d0,[1]), d], 1)
    return d 
Example #2
Source File: cls_model.py    From RGCNN with Apache License 2.0 6 votes vote down vote up
def get_laplacian(self, adj_matrix, normalize=True):
        """Compute pairwise distance of a point cloud.

        Args:
            pairwise distance: tensor (batch_size, num_points, num_points)

        Returns:
            pairwise distance: (batch_size, num_points, num_points)
        """
        if normalize:
            D = tf.reduce_sum(adj_matrix, axis=1)  # (batch_size,num_points)
            eye = tf.ones_like(D)
            eye = tf.matrix_diag(eye)
            D = 1 / tf.sqrt(D)
            D = tf.matrix_diag(D)
            L = eye - tf.matmul(tf.matmul(D, adj_matrix), D)
        else:
            D = tf.reduce_sum(adj_matrix, axis=1)  # (batch_size,num_points)
            # eye = tf.ones_like(D)
            # eye = tf.matrix_diag(eye)
            # D = 1 / tf.sqrt(D)
            D = tf.matrix_diag(D)
            L = D - adj_matrix
        return L 
Example #3
Source File: seg_model.py    From RGCNN with Apache License 2.0 6 votes vote down vote up
def get_laplacian(self, adj_matrix, normalize=True):
        """Compute pairwise distance of a point cloud.

        Args:
            pairwise distance: tensor (batch_size, num_points, num_points)

        Returns:
            pairwise distance: (batch_size, num_points, num_points)
        """
        if normalize:
            D = tf.reduce_sum(adj_matrix, axis=1)  # (batch_size,num_points)
            eye = tf.ones_like(D)
            eye = tf.matrix_diag(eye)
            D = 1 / tf.sqrt(D)
            D = tf.matrix_diag(D)
            L = eye - tf.matmul(tf.matmul(D, adj_matrix), D)
        else:
            D = tf.reduce_sum(adj_matrix, axis=1)  # (batch_size,num_points)
            # eye = tf.ones_like(D)
            # eye = tf.matrix_diag(eye)
            # D = 1 / tf.sqrt(D)
            D = tf.matrix_diag(D)
            L = D - adj_matrix
        return L 
Example #4
Source File: vae.py    From disentanglement_lib with Apache License 2.0 6 votes vote down vote up
def regularizer(self, kl_loss, z_mean, z_logvar, z_sampled):
    cov_z_mean = compute_covariance_z_mean(z_mean)
    lambda_d = self.lambda_d_factor * self.lambda_od
    if self.dip_type == "i":  # Eq 6 page 4
      # mu = z_mean is [batch_size, num_latent]
      # Compute cov_p(x) [mu(x)] = E[mu*mu^T] - E[mu]E[mu]^T]
      cov_dip_regularizer = regularize_diag_off_diag_dip(
          cov_z_mean, self.lambda_od, lambda_d)
    elif self.dip_type == "ii":
      cov_enc = tf.matrix_diag(tf.exp(z_logvar))
      expectation_cov_enc = tf.reduce_mean(cov_enc, axis=0)
      cov_z = expectation_cov_enc + cov_z_mean
      cov_dip_regularizer = regularize_diag_off_diag_dip(
          cov_z, self.lambda_od, lambda_d)
    else:
      raise NotImplementedError("DIP variant not supported.")
    return kl_loss + cov_dip_regularizer 
Example #5
Source File: ops.py    From video_prediction with MIT License 6 votes vote down vote up
def upsample2d(inputs, strides, padding='SAME', upsample_mode='bilinear'):
    if upsample_mode == 'bilinear':
        single_bilinear_kernel = get_bilinear_kernel(strides).astype(np.float32)
        input_shape = inputs.get_shape().as_list()
        bilinear_kernel = tf.matrix_diag(tf.tile(tf.constant(single_bilinear_kernel)[..., None], (1, 1, input_shape[-1])))
        outputs = deconv2d(inputs, input_shape[-1], kernel_size=single_bilinear_kernel.shape,
                           strides=strides, kernel=bilinear_kernel, padding=padding, use_bias=False)
    elif upsample_mode == 'nearest':
        strides = list(strides) if isinstance(strides, (tuple, list)) else [strides] * 2
        input_shape = inputs.get_shape().as_list()
        inputs_tiled = tf.tile(inputs[:, :, None, :, None, :], [1, 1, strides[0], 1, strides[1], 1])
        outputs = tf.reshape(inputs_tiled, [input_shape[0], input_shape[1] * strides[0],
                                            input_shape[2] * strides[1], input_shape[3]])
    else:
        raise ValueError("Unknown upsample mode %s" % upsample_mode)
    return outputs 
Example #6
Source File: operator_pd_vdvt_update_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _updated_mat(self, mat, v, diag):
    # Get dense matrix defined by its square root, which is an update of `mat`:
    # A = (mat + v D v^T) (mat + v D v^T)^T
    # D is the diagonal matrix with `diag` on the diagonal.

    # If diag is None, then it defaults to the identity matrix, so DV^T = V^T
    if diag is None:
      diag_vt = tf.matrix_transpose(v)
    else:
      diag_mat = tf.matrix_diag(diag)
      diag_vt = tf.batch_matmul(diag_mat, v, adj_y=True)

    v_diag_vt = tf.batch_matmul(v, diag_vt)
    sqrt = mat + v_diag_vt
    a = tf.batch_matmul(sqrt, sqrt, adj_y=True)
    return a.eval() 
Example #7
Source File: transforms.py    From GPflowOpt with Apache License 2.0 6 votes vote down vote up
def build_backward_variance(self, Yvar):
        """
        Additional method for scaling variance backward (used in :class:`.Normalizer`). Can process both the diagonal
        variances returned by predict_f, as well as full covariance matrices.

        :param Yvar: size N x N x P or size N x P
        :return: Yvar scaled, same rank and size as input
        """
        rank = tf.rank(Yvar)
        # Because TensorFlow evaluates both fn1 and fn2, the transpose can't be in the same line. If a full cov
        # matrix is provided fn1 turns it into a rank 4, then tries to transpose it as a rank 3.
        # Splitting it in two steps however works fine.
        Yvar = tf.cond(tf.equal(rank, 2), lambda: tf.matrix_diag(tf.transpose(Yvar)), lambda: Yvar)
        Yvar = tf.cond(tf.equal(rank, 2), lambda: tf.transpose(Yvar, perm=[1, 2, 0]), lambda: Yvar)

        N = tf.shape(Yvar)[0]
        D = tf.shape(Yvar)[2]
        L = tf.cholesky(tf.square(tf.transpose(self.A)))
        Yvar = tf.reshape(Yvar, [N * N, D])
        scaled_var = tf.reshape(tf.transpose(tf.cholesky_solve(L, tf.transpose(Yvar))), [N, N, D])
        return tf.cond(tf.equal(rank, 2), lambda: tf.reduce_sum(scaled_var, axis=1), lambda: scaled_var) 
Example #8
Source File: layers.py    From SogouMRCToolkit with Apache License 2.0 6 votes vote down vote up
def __call__(self, query, key, value, mask=None):
        batch_size = tf.shape(query)[0]
        max_query_len = tf.shape(query)[1]
        max_key_len = tf.shape(key)[1]
        wq = tf.transpose(
            tf.reshape(self.dense_layers[0](query), [batch_size, max_query_len, self.heads, self.units // self.heads]),
            [2, 0, 1, 3])  # Head*B*QL*(U/Head)
        wk = tf.transpose(
            tf.reshape(self.dense_layers[1](key), [batch_size, max_key_len, self.heads, self.units // self.heads]),
            [2, 0, 1, 3])  # Head*B*KL*(U/Head)
        wv = tf.transpose(
            tf.reshape(self.dense_layers[2](value), [batch_size, max_key_len, self.heads, self.units // self.heads]),
            [2, 0, 1, 3])  # Head*B*KL*(U/Head)
        attention_score = tf.matmul(wq, wk, transpose_b=True) / tf.sqrt(float(self.units) / self.heads)  # Head*B*QL*KL
        if query == key and not self.attention_on_itself:
            attention_score += tf.matrix_diag(tf.zeros(max_key_len) - 100.0)
        if mask is not None:
            attention_score += tf.expand_dims(mask, 1)
        similarity = tf.nn.softmax(attention_score, -1)  # Head*B*QL*KL
        return tf.reshape(tf.transpose(tf.matmul(similarity, wv), [1, 2, 0, 3]),
                          [batch_size, max_query_len, self.units])  # B*QL*U 
Example #9
Source File: FBSNNs.py    From FBSNNs with MIT License 5 votes vote down vote up
def sigma_tf(self, t, X, Y): # M x 1, M x D, M x 1
        M = self.M
        D = self.D
        return tf.matrix_diag(tf.ones([M,D])) # M x D x D
    ########################################################################### 
Example #10
Source File: main.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def estimate_rotation(xyz0, xyz1, pconf, noise):
  """Estimates the rotation between two sets of keypoints.

  The rotation is estimated by first subtracting mean from each set of keypoints
  and computing SVD of the covariance matrix.

  Args:
    xyz0: [batch, num_kp, 3] The first set of keypoints.
    xyz1: [batch, num_kp, 3] The second set of keypoints.
    pconf: [batch, num_kp] The weights used to compute the rotation estimate.
    noise: A number indicating the noise added to the keypoints.

  Returns:
    [batch, 3, 3] A batch of transposed 3 x 3 rotation matrices.
  """

  xyz0 += tf.random_normal(tf.shape(xyz0), mean=0, stddev=noise)
  xyz1 += tf.random_normal(tf.shape(xyz1), mean=0, stddev=noise)

  pconf2 = tf.expand_dims(pconf, 2)
  cen0 = tf.reduce_sum(xyz0 * pconf2, 1, keepdims=True)
  cen1 = tf.reduce_sum(xyz1 * pconf2, 1, keepdims=True)

  x = xyz0 - cen0
  y = xyz1 - cen1

  cov = tf.matmul(tf.matmul(x, tf.matrix_diag(pconf), transpose_a=True), y)
  _, u, v = tf.svd(cov, full_matrices=True)

  d = tf.matrix_determinant(tf.matmul(v, u, transpose_b=True))
  ud = tf.concat(
      [u[:, :, :-1], u[:, :, -1:] * tf.expand_dims(tf.expand_dims(d, 1), 1)],
      axis=2)
  return tf.matmul(ud, v, transpose_b=True) 
Example #11
Source File: pointnet_util.py    From articulated-part-induction with MIT License 5 votes vote down vote up
def sample_and_group_all(xyz, points, use_xyz=True):
    '''
    Inputs:
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Outputs:
        new_xyz: (batch_size, 1, 3) as (0,0,0)
        new_points: (batch_size, 1, ndataset, 3+channel) TF tensor
    Note:
        Equivalent to sample_and_group with npoint=1, radius=inf, use (0,0,0) as the centroid
    '''
    #### svd ####
    #s, u, v = tf.svd(xyz)
    #xyz = tf.matmul(u, tf.matrix_diag(s))
    #############
    batch_size = tf.shape(xyz)[0]
    nsample = xyz.get_shape()[1].value
    new_xyz = tf.tile(tf.constant(np.array([0,0,0]).reshape((1,1,3)),dtype=tf.float32),(batch_size,1,1)) # (batch_size, 1, 3)
    idx = tf.tile(tf.constant(np.array(range(nsample)).reshape((1,1,nsample))),(batch_size,1,1))
    grouped_xyz = tf.reshape(xyz, (batch_size, 1, nsample, 3)) # (batch_size, npoint=1, nsample, 3)
    if points is not None:
        if use_xyz:
            new_points = tf.concat([xyz, points], axis=2) # (batch_size, 16, 259)
        else:
            new_points = points
        new_points = tf.expand_dims(new_points, 1) # (batch_size, 1, 16, 259)
    else:
        new_points = grouped_xyz
    return new_xyz, new_points, idx, grouped_xyz 
Example #12
Source File: memory.py    From dynamic-kanerva-machines with Apache License 2.0 5 votes vote down vote up
def _get_prior_params(self):
    log_var = snt.TrainableVariable(
        [], name='prior_var_scale',
        initializers={'w': tf.constant_initializer(
            np.log(1.0))})()
    self._prior_var = tf.ones([self._memory_size]) * tf.exp(log_var) + EPSILON
    prior_cov = tf.matrix_diag(self._prior_var)
    prior_mean = snt.TrainableVariable(
        [self._memory_size, self._code_size],
        name='prior_mean',
        initializers={'w': tf.truncated_normal_initializer(
            mean=0.0, stddev=1.0)})()
    return prior_mean, prior_cov 
Example #13
Source File: main.py    From models with Apache License 2.0 5 votes vote down vote up
def estimate_rotation(xyz0, xyz1, pconf, noise):
  """Estimates the rotation between two sets of keypoints.

  The rotation is estimated by first subtracting mean from each set of keypoints
  and computing SVD of the covariance matrix.

  Args:
    xyz0: [batch, num_kp, 3] The first set of keypoints.
    xyz1: [batch, num_kp, 3] The second set of keypoints.
    pconf: [batch, num_kp] The weights used to compute the rotation estimate.
    noise: A number indicating the noise added to the keypoints.

  Returns:
    [batch, 3, 3] A batch of transposed 3 x 3 rotation matrices.
  """

  xyz0 += tf.random_normal(tf.shape(xyz0), mean=0, stddev=noise)
  xyz1 += tf.random_normal(tf.shape(xyz1), mean=0, stddev=noise)

  pconf2 = tf.expand_dims(pconf, 2)
  cen0 = tf.reduce_sum(xyz0 * pconf2, 1, keepdims=True)
  cen1 = tf.reduce_sum(xyz1 * pconf2, 1, keepdims=True)

  x = xyz0 - cen0
  y = xyz1 - cen1

  cov = tf.matmul(tf.matmul(x, tf.matrix_diag(pconf), transpose_a=True), y)
  _, u, v = tf.svd(cov, full_matrices=True)

  d = tf.matrix_determinant(tf.matmul(v, u, transpose_b=True))
  ud = tf.concat(
      [u[:, :, :-1], u[:, :, -1:] * tf.expand_dims(tf.expand_dims(d, 1), 1)],
      axis=2)
  return tf.matmul(ud, v, transpose_b=True) 
Example #14
Source File: main.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def estimate_rotation(xyz0, xyz1, pconf, noise):
  """Estimates the rotation between two sets of keypoints.

  The rotation is estimated by first subtracting mean from each set of keypoints
  and computing SVD of the covariance matrix.

  Args:
    xyz0: [batch, num_kp, 3] The first set of keypoints.
    xyz1: [batch, num_kp, 3] The second set of keypoints.
    pconf: [batch, num_kp] The weights used to compute the rotation estimate.
    noise: A number indicating the noise added to the keypoints.

  Returns:
    [batch, 3, 3] A batch of transposed 3 x 3 rotation matrices.
  """

  xyz0 += tf.random_normal(tf.shape(xyz0), mean=0, stddev=noise)
  xyz1 += tf.random_normal(tf.shape(xyz1), mean=0, stddev=noise)

  pconf2 = tf.expand_dims(pconf, 2)
  cen0 = tf.reduce_sum(xyz0 * pconf2, 1, keepdims=True)
  cen1 = tf.reduce_sum(xyz1 * pconf2, 1, keepdims=True)

  x = xyz0 - cen0
  y = xyz1 - cen1

  cov = tf.matmul(tf.matmul(x, tf.matrix_diag(pconf), transpose_a=True), y)
  _, u, v = tf.svd(cov, full_matrices=True)

  d = tf.matrix_determinant(tf.matmul(v, u, transpose_b=True))
  ud = tf.concat(
      [u[:, :, :-1], u[:, :, -1:] * tf.expand_dims(tf.expand_dims(d, 1), 1)],
      axis=2)
  return tf.matmul(ud, v, transpose_b=True) 
Example #15
Source File: SVP.py    From KD_methods_with_TF with MIT License 5 votes vote down vote up
def gradient_svd(op, ds, dU, dV):
    s, U, V = op.outputs

    u_sz = tf.squeeze(tf.slice(tf.shape(dU),[1],[1]))
    v_sz = tf.squeeze(tf.slice(tf.shape(dV),[1],[1]))
    s_sz = tf.squeeze(tf.slice(tf.shape(ds),[1],[1]))

    S = tf.matrix_diag(s)
    s_2 = tf.square(s)

    eye = tf.expand_dims(tf.eye(s_sz),0) 
    k = (1 - eye)/(tf.expand_dims(s_2,2)-tf.expand_dims(s_2,1) + eye)
    KT = tf.matrix_transpose(k)
    KT = removenan(KT)
    
    def msym(X):
        return (X+tf.matrix_transpose(X))
    
    def left_grad(U,S,V,dU,dV):
        U, V = (V, U); dU, dV = (dV, dU)
        D = tf.matmul(dU,tf.matrix_diag(1/(s+1e-8)))
    
        grad = tf.matmul(D - tf.matmul(U, tf.matrix_diag(tf.matrix_diag_part(tf.matmul(U,D,transpose_a=True)))
                           + 2*tf.matmul(S, msym(KT*(tf.matmul(D,tf.matmul(U,S),transpose_a=True))))), V,transpose_b=True)
        
        grad = tf.matrix_transpose(grad)
        return grad

    def right_grad(U,S,V,dU,dV):
        grad = tf.matmul(2*tf.matmul(U, tf.matmul(S, msym(KT*(tf.matmul(V,dV,transpose_a=True)))) ),V,transpose_b=True)
        return grad
    
    grad = tf.cond(tf.greater(v_sz, u_sz), lambda :  left_grad(U,S,V,dU,dV), 
                                           lambda : right_grad(U,S,V,dU,dV))
    
    return [grad] 
Example #16
Source File: nn.py    From pixelsnail-public with MIT License 5 votes vote down vote up
def non_cached_get_causal_mask(canvas_size, causal_unit):
    assert causal_unit == 1
    ones = tf.ones([canvas_size, canvas_size], dtype=tf.float32)
    lt = tf.matrix_band_part(ones, -1, 0) - tf.matrix_diag(tf.ones([canvas_size,], dtype=tf.float32))
    return lt[None, ...] 
Example #17
Source File: svp.py    From BERT with Apache License 2.0 5 votes vote down vote up
def gradient_svd(op, ds, dU, dV):
	s, U, V = op.outputs

	u_sz = tf.squeeze(tf.slice(tf.shape(dU),[1],[1]))
	v_sz = tf.squeeze(tf.slice(tf.shape(dV),[1],[1]))
	s_sz = tf.squeeze(tf.slice(tf.shape(ds),[1],[1]))

	S = tf.matrix_diag(s)
	s_2 = tf.square(s)

	eye = tf.expand_dims(tf.eye(s_sz),0) 
	k = (1 - eye)/(tf.expand_dims(s_2,2)-tf.expand_dims(s_2,1) + eye)
	KT = tf.matrix_transpose(k)
	KT = removenan(KT)
	
	def msym(X):
		return (X+tf.matrix_transpose(X))
	
	def left_grad(U,S,V,dU,dV):
		U, V = (V, U); dU, dV = (dV, dU)
		D = tf.matmul(dU,tf.matrix_diag(1/(s+1e-8)))
		US = tf.matmul(U,S)
	
		grad = tf.matmul(D, V, transpose_b=True)\
			  +tf.matmul(tf.matmul(U,tf.matrix_diag(tf.matrix_diag_part(-tf.matmul(U,D,transpose_a=True)))), V, transpose_b=True)\
			  +tf.matmul(2*tf.matmul(US, msym(KT*(tf.matmul(V,-tf.matmul(V,tf.matmul(D,US,transpose_a=True)),transpose_a=True)))),V,transpose_b=True)
		grad = tf.matrix_transpose(grad)
		return grad

	def right_grad(U,S,V,dU,dV):
		US = tf.matmul(U,S)
		grad = tf.matmul(2*tf.matmul(US, msym(KT*(tf.matmul(V,dV,transpose_a=True))) ),V,transpose_b=True)
		return grad
	
	grad = tf.cond(tf.greater(v_sz, u_sz), lambda : left_grad(U,S,V,dU,dV), 
										   lambda : right_grad(U,S,V,dU,dV))
	
	return [grad] 
Example #18
Source File: diag_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testInvalidShape(self):
    with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
      tf.matrix_diag(0) 
Example #19
Source File: BlackScholesBarenblatt100D.py    From FBSNNs with MIT License 5 votes vote down vote up
def sigma_tf(self, t, X, Y): # M x 1, M x D, M x 1
        return 0.4*tf.matrix_diag(X) # M x D x D
    
    ########################################################################### 
Example #20
Source File: tensorflow.py    From deepx with MIT License 5 votes vote down vote up
def matrix_diag(self, a):
        return tf.matrix_diag(a) 
Example #21
Source File: diag_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testVector(self):
    with self.test_session(use_gpu=self._use_gpu):
      v = np.array([1.0, 2.0, 3.0])
      mat = np.diag(v)
      v_diag = tf.matrix_diag(v)
      self.assertEqual((3, 3), v_diag.get_shape())
      self.assertAllEqual(v_diag.eval(), mat) 
Example #22
Source File: mvn_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testMultivariateNormalDiagWithSoftplusStDev(self):
    mu = [-1.0, 1.0]
    diag = [-1.0, -2.0]
    with self.test_session():
      dist = distributions.MultivariateNormalDiagWithSoftplusStDev(mu, diag)
      samps = dist.sample(1000, seed=0).eval()
      cov_mat = tf.matrix_diag(tf.nn.softplus(diag)).eval()**2

      self.assertAllClose(mu, samps.mean(axis=0), atol=0.1)
      self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.1) 
Example #23
Source File: mvn_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSample(self):
    mu = [-1.0, 1.0]
    diag = [1.0, 2.0]
    with self.test_session():
      dist = distributions.MultivariateNormalDiag(mu, diag)
      samps = dist.sample(1000, seed=0).eval()
      cov_mat = tf.matrix_diag(diag).eval()**2

      self.assertAllClose(mu, samps.mean(axis=0), atol=0.1)
      self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.1) 
Example #24
Source File: operator_pd_diag_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _diag_to_matrix(self, diag):
    return tf.matrix_diag(diag**2).eval() 
Example #25
Source File: operator_pd_diag_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _diag_to_matrix(self, diag):
    return tf.matrix_diag(diag).eval() 
Example #26
Source File: diag_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testBatchVector(self):
    with self.test_session(use_gpu=self._use_gpu):
      v_batch = np.array([[1.0, 2.0, 3.0],
                          [4.0, 5.0, 6.0]])
      mat_batch = np.array(
          [[[1.0, 0.0, 0.0],
            [0.0, 2.0, 0.0],
            [0.0, 0.0, 3.0]],
           [[4.0, 0.0, 0.0],
            [0.0, 5.0, 0.0],
            [0.0, 0.0, 6.0]]])
      v_batch_diag = tf.matrix_diag(v_batch)
      self.assertEqual((2, 3, 3), v_batch_diag.get_shape())
      self.assertAllEqual(v_batch_diag.eval(), mat_batch) 
Example #27
Source File: diag_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testGrad(self):
    shapes = ((3,), (7, 4))
    with self.test_session(use_gpu=self._use_gpu):
      for shape in shapes:
        x = tf.constant(np.random.rand(*shape), np.float32)
        y = tf.matrix_diag(x)
        error = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
                                               y, y.get_shape().as_list())
        self.assertLess(error, 1e-4) 
Example #28
Source File: diag_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testInvalidShapeAtEval(self):
    with self.test_session(use_gpu=self._use_gpu):
      v = tf.placeholder(dtype=tf.float32)
      with self.assertRaisesOpError("input must be at least 1-dim"):
        tf.matrix_diag(v).eval(feed_dict={v: 0.0})