Python tensorflow.matrix_set_diag() Examples

The following are 30 code examples of tensorflow.matrix_set_diag(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: memory.py    From dynamic-kanerva-machines with Apache License 2.0 6 votes vote down vote up
def _update_memory(self, old_memory, w_samples, new_z_mean, new_z_var):
    """Setting new_z_var=0 for sample based update."""
    old_mean, old_cov = old_memory
    wR = self.get_w_to_z_mean(w_samples, old_memory.M_mean)
    wU, wUw = self._read_cov(w_samples, old_memory)
    sigma_z = wUw + new_z_var + self._obs_noise_stddev**2  # [S, B]
    delta = new_z_mean - wR  # [S, B, C]
    c_z = wU / tf.expand_dims(sigma_z, -1)  # [S, B, M]
    posterior_mean = old_mean + tf.einsum('sbm,sbc->bmc', c_z, delta)
    posterior_cov = old_cov - tf.einsum('sbm,sbn->bmn', c_z, wU)
    # Clip diagonal elements for numerical stability
    posterior_cov = tf.matrix_set_diag(
        posterior_cov,
        tf.clip_by_value(tf.matrix_diag_part(posterior_cov), EPSILON, 1e10))
    new_memory = MemoryState(M_mean=posterior_mean, M_cov=posterior_cov)
    return new_memory 
Example #2
Source File: core.py    From parametric_tsne with MIT License 6 votes vote down vote up
def _get_normed_sym_tf(X_, batch_size):
    """
    Compute the normalized and symmetrized probability matrix from
    relative probabilities X_, where X_ is a Tensorflow Tensor
    Parameters
    ----------
    X_ : 2-d Tensor (N, N)
        asymmetric probabilities. For instance, X_(i, j) = P(i|j)
    Returns
    -------
    P : 2-d Tensor (N, N)
        symmetric probabilities, making the assumption that P(i|j) = P(j|i)
        Diagonals are all 0s."""
    toset = tf.constant(0, shape=[batch_size], dtype=X_.dtype)
    X_ = tf.matrix_set_diag(X_, toset)
    norm_facs = tf.reduce_sum(X_, axis=0, keep_dims=True)
    X_ = X_ / norm_facs
    X_ = 0.5*(X_ + tf.transpose(X_))
    
    return X_ 
Example #3
Source File: diag_op_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testRectangularBatch(self):
    with self.test_session(use_gpu=self._use_gpu):
      v_batch = np.array([[-1.0, -2.0],
                          [-4.0, -5.0]])
      mat_batch = np.array(
          [[[1.0, 0.0, 3.0],
            [0.0, 2.0, 0.0]],
           [[4.0, 0.0, 4.0],
            [0.0, 5.0, 0.0]]])

      mat_set_diag_batch = np.array(
          [[[-1.0, 0.0, 3.0],
            [0.0, -2.0, 0.0]],
           [[-4.0, 0.0, 4.0],
            [0.0, -5.0, 0.0]]])
      output = tf.matrix_set_diag(mat_batch, v_batch)
      self.assertEqual((2, 2, 3), output.get_shape())
      self.assertAllEqual(mat_set_diag_batch, output.eval()) 
Example #4
Source File: gaussian_process.py    From BERT with Apache License 2.0 6 votes vote down vote up
def fit(self, x=None, y=None):
    # p(coeffs | x, y) = Normal(coeffs |
    #   mean = (1/noise_variance) (1/noise_variance x^T x + I)^{-1} x^T y,
    #   covariance = (1/noise_variance x^T x + I)^{-1})
    # TODO(trandustin): We newly fit the data at each call. Extend to do
    # Bayesian updating.
    kernel_matrix = tf.matmul(x, x, transpose_a=True) / self.noise_variance
    coeffs_precision = tf.matrix_set_diag(
        kernel_matrix, tf.matrix_diag_part(kernel_matrix) + 1.)
    coeffs_precision_tril = tf.linalg.cholesky(coeffs_precision)
    self.coeffs_precision_tril_op = tf.linalg.LinearOperatorLowerTriangular(
        coeffs_precision_tril)
    self.coeffs_mean = self.coeffs_precision_tril_op.solvevec(
        self.coeffs_precision_tril_op.solvevec(tf.einsum('nm,n->m', x, y)),
        adjoint=True) / self.noise_variance
    # TODO(trandustin): To be fully Keras-compatible, return History object.
    return 
Example #5
Source File: diag_op_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testSquareBatch(self):
    with self.test_session(use_gpu=self._use_gpu):
      v_batch = np.array([[-1.0, -2.0, -3.0],
                          [-4.0, -5.0, -6.0]])
      mat_batch = np.array(
          [[[1.0, 0.0, 3.0],
            [0.0, 2.0, 0.0],
            [1.0, 0.0, 3.0]],
           [[4.0, 0.0, 4.0],
            [0.0, 5.0, 0.0],
            [2.0, 0.0, 6.0]]])

      mat_set_diag_batch = np.array(
          [[[-1.0, 0.0, 3.0],
            [0.0, -2.0, 0.0],
            [1.0, 0.0, -3.0]],
           [[-4.0, 0.0, 4.0],
            [0.0, -5.0, 0.0],
            [2.0, 0.0, -6.0]]])
      output = tf.matrix_set_diag(mat_batch, v_batch)
      self.assertEqual((2, 3, 3), output.get_shape())
      self.assertAllEqual(mat_set_diag_batch, output.eval()) 
Example #6
Source File: PredX_MPNN.py    From dl4chem-geometry with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _pos_to_proximity(self, pos, reuse=True): #[batch_size, n_max, 3]

        with tf.variable_scope('pos_to_proximity', reuse=reuse):

            pos_1 = tf.expand_dims(pos, axis = 2)
            pos_2 = tf.expand_dims(pos, axis = 1)

            pos_sub = tf.subtract(pos_1, pos_2)
            proximity = tf.square(pos_sub)
            proximity = tf.reduce_sum(proximity, 3)
            proximity = tf.sqrt(proximity + 1e-5)

            proximity = tf.reshape(proximity, [self.batch_size, self.n_max, self.n_max])
            proximity = tf.multiply(proximity, self.mask)
            proximity = tf.multiply(proximity, tf.transpose(self.mask, perm = [0, 2, 1]))

            proximity = tf.matrix_set_diag(proximity, [[0] * self.n_max] * self.batch_size)

        return proximity 
Example #7
Source File: bayes.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def fit(self, x=None, y=None):
    # p(coeffs | x, y) = Normal(coeffs |
    #   mean = (1/noise_variance) (1/noise_variance x^T x + I)^{-1} x^T y,
    #   covariance = (1/noise_variance x^T x + I)^{-1})
    # TODO(trandustin): We newly fit the data at each call. Extend to do
    # Bayesian updating.
    kernel_matrix = tf.matmul(x, x, transpose_a=True) / self.noise_variance
    coeffs_precision = tf.matrix_set_diag(
        kernel_matrix, tf.matrix_diag_part(kernel_matrix) + 1.)
    coeffs_precision_tril = tf.linalg.cholesky(coeffs_precision)
    self.coeffs_precision_tril_op = tf.linalg.LinearOperatorLowerTriangular(
        coeffs_precision_tril)
    self.coeffs_mean = self.coeffs_precision_tril_op.solvevec(
        self.coeffs_precision_tril_op.solvevec(tf.einsum('nm,n->m', x, y)),
        adjoint=True) / self.noise_variance
    # TODO(trandustin): To be fully Keras-compatible, return History object.
    return 
Example #8
Source File: digraph_ops.py    From object_detection_with_tensorflow with MIT License 5 votes vote down vote up
def CombineArcAndRootPotentials(arcs, roots):
  """Combines arc and root potentials into a single set of potentials.

  Args:
    arcs: [B,N,N] tensor of batched arc potentials.
    roots: [B,N] matrix of batched root potentials.

  Returns:
    [B,N,N] tensor P of combined potentials where
      P_{b,s,t} = s == t ? roots[b,t] : arcs[b,s,t]
  """
  # All arguments must have statically-known rank.
  check.Eq(arcs.get_shape().ndims, 3, 'arcs must be rank 3')
  check.Eq(roots.get_shape().ndims, 2, 'roots must be a matrix')

  # All arguments must share the same type.
  dtype = arcs.dtype.base_dtype
  check.Same([dtype, roots.dtype.base_dtype], 'dtype mismatch')

  roots_shape = tf.shape(roots)
  arcs_shape = tf.shape(arcs)
  batch_size = roots_shape[0]
  num_tokens = roots_shape[1]
  with tf.control_dependencies([
      tf.assert_equal(batch_size, arcs_shape[0]),
      tf.assert_equal(num_tokens, arcs_shape[1]),
      tf.assert_equal(num_tokens, arcs_shape[2])]):
    return tf.matrix_set_diag(arcs, roots) 
Example #9
Source File: diag_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testGradWithNoShapeInformation(self):
    with self.test_session(use_gpu=self._use_gpu) as sess:
      v = tf.placeholder(dtype=tf.float32)
      mat = tf.placeholder(dtype=tf.float32)
      grad_input = tf.placeholder(dtype=tf.float32)
      output = tf.matrix_set_diag(mat, v)
      grads = tf.gradients(output, [mat, v], grad_ys=grad_input)
      grad_input_val = np.random.rand(3, 3).astype(np.float32)
      grad_vals = sess.run(
          grads, feed_dict={v: 2 * np.ones(3), mat: np.ones((3, 3)),
                            grad_input: grad_input_val})
      self.assertAllEqual(np.diag(grad_input_val), grad_vals[1])
      self.assertAllEqual(grad_input_val - np.diag(np.diag(grad_input_val)),
                          grad_vals[0]) 
Example #10
Source File: tf_ops.py    From safekit with MIT License 5 votes vote down vote up
def full_mvn_loss(truth, h):
    """
    Takes the output of a neural network after it's last activation, performs an affine transform.
    It returns the mahalonobis distances between the targets and the result of the affine transformation, according
    to a parametrized Normal distribution. The log of the determinant of the parametrized
    covariance matrix is meant to be minimized to avoid a trivial optimization.

    :param truth: Actual datapoints to compare against learned distribution
    :param h: output of neural network (after last non-linear transform)
    :return: (tf.Tensor[MB X D], tf.Tensor[MB X 1]) Loss matrix, log_of_determinants of covariance matrices.
    """
    fan_in = h.get_shape().as_list()[1]
    dimension = truth.get_shape().as_list()[1]
    U = 100*tf.Variable(tf.truncated_normal([fan_in, dimension + dimension**2],
                                                                 dtype=tf.float32,
                                                                 name='U'))
    b = tf.Variable(tf.zeros([dimension + dimension**2]))
    y = tf.matmul(h, U) + b
    mu = tf.slice(y, [0, 0], [-1, dimension])  # is MB x dimension
    var = tf.slice(y, [0, dimension], [-1, -1])*0.0001  # is MB x dimension^2 # WARNING WARNING TODO FIX THIS MAGIC NUMBER
    var = tf.reshape(var, [-1, dimension, dimension])  # make it a MB x D x D tensor (var is a superset of the lower triangular part of a Cholesky decomp)
    var_diag = tf.exp(tf.matrix_diag_part(var)) + 1 # WARNING: FIX THIS MAGIC NUMBER
    var = tf.matrix_set_diag(var,var_diag)
    var = tf.matrix_band_part(var, -1, 0)
    z = tf.squeeze(tf.matrix_triangular_solve(var, tf.reshape(truth - mu, [-1, dimension, 1]), lower=True, adjoint=False))  # z should be MB x D
    inner_prods = tf.reduce_sum(tf.square(z), 1)  # take row-wise inner products of z, leaving MB x 1 vector
    logdet = tf.reduce_sum(tf.log(tf.square(tf.matrix_diag_part(var))), 1) # diag_part converts MB x D x D to MB x D, square and log preserve, then sum makes MB x 1
    loss_column = inner_prods  # is MB x 1 ... hard to track of individual features' contributions due to correlations
    tf.add_to_collection('full', var_diag)
    tf.add_to_collection('full', var)
    return tf.reshape(loss_column, [-1, 1]), tf.reshape(logdet, [-1, 1]) 
Example #11
Source File: digraph_ops.py    From hands-detection with MIT License 5 votes vote down vote up
def CombineArcAndRootPotentials(arcs, roots):
  """Combines arc and root potentials into a single set of potentials.

  Args:
    arcs: [B,N,N] tensor of batched arc potentials.
    roots: [B,N] matrix of batched root potentials.

  Returns:
    [B,N,N] tensor P of combined potentials where
      P_{b,s,t} = s == t ? roots[b,t] : arcs[b,s,t]
  """
  # All arguments must have statically-known rank.
  check.Eq(arcs.get_shape().ndims, 3, 'arcs must be rank 3')
  check.Eq(roots.get_shape().ndims, 2, 'roots must be a matrix')

  # All arguments must share the same type.
  dtype = arcs.dtype.base_dtype
  check.Same([dtype, roots.dtype.base_dtype], 'dtype mismatch')

  roots_shape = tf.shape(roots)
  arcs_shape = tf.shape(arcs)
  batch_size = roots_shape[0]
  num_tokens = roots_shape[1]
  with tf.control_dependencies([
      tf.assert_equal(batch_size, arcs_shape[0]),
      tf.assert_equal(num_tokens, arcs_shape[1]),
      tf.assert_equal(num_tokens, arcs_shape[2])]):
    return tf.matrix_set_diag(arcs, roots) 
Example #12
Source File: digraph_ops.py    From object_detection_kitti with Apache License 2.0 5 votes vote down vote up
def CombineArcAndRootPotentials(arcs, roots):
  """Combines arc and root potentials into a single set of potentials.

  Args:
    arcs: [B,N,N] tensor of batched arc potentials.
    roots: [B,N] matrix of batched root potentials.

  Returns:
    [B,N,N] tensor P of combined potentials where
      P_{b,s,t} = s == t ? roots[b,t] : arcs[b,s,t]
  """
  # All arguments must have statically-known rank.
  check.Eq(arcs.get_shape().ndims, 3, 'arcs must be rank 3')
  check.Eq(roots.get_shape().ndims, 2, 'roots must be a matrix')

  # All arguments must share the same type.
  dtype = arcs.dtype.base_dtype
  check.Same([dtype, roots.dtype.base_dtype], 'dtype mismatch')

  roots_shape = tf.shape(roots)
  arcs_shape = tf.shape(arcs)
  batch_size = roots_shape[0]
  num_tokens = roots_shape[1]
  with tf.control_dependencies([
      tf.assert_equal(batch_size, arcs_shape[0]),
      tf.assert_equal(num_tokens, arcs_shape[1]),
      tf.assert_equal(num_tokens, arcs_shape[2])]):
    return tf.matrix_set_diag(arcs, roots) 
Example #13
Source File: diag_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testInvalidShape(self):
    with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
      tf.matrix_set_diag(0, [0])
    with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
      tf.matrix_set_diag([[0]], 0) 
Example #14
Source File: modelv2.py    From Cross-Lingual-NBT with GNU General Public License v3.0 5 votes vote down vote up
def decode(self, h_utterance_representation, gate):
        h = tf.multiply(gate, tf.expand_dims(h_utterance_representation, 1))
        h = tf.reshape(h, [-1, self.vector_dimension])
        h = tf.nn.dropout(h, self.keep_prob)
        with tf.variable_scope("output_model", reuse=tf.AUTO_REUSE):
            w2_softmax = tf.get_variable(name="W2-projection", initializer=tf.random_normal([self.vector_dimension, 1]))
            b2_softmax = tf.get_variable(name="b2-projection", initializer=tf.zeros([1]))
            if self.use_softmax:
                #h = tf.sigmoid(tf.matmul(h, w1_softmax) + b1_softmax)
                y_presoftmax = tf.matmul(h, w2_softmax) + b2_softmax
                y_presoftmax = tf.reshape(y_presoftmax, [-1, self.label_count])
                append_zeros_none = tf.zeros([tf.shape(y_presoftmax)[0], 1])
                y_presoftmax = tf.concat([y_presoftmax, append_zeros_none], 1)
            else:
                y_presoftmax = tf.matmul(h, w2_softmax) + b2_softmax
                y_presoftmax = tf.reshape(y_presoftmax, [-1, self.label_count])

        with tf.variable_scope('distribution_output', reuse=tf.AUTO_REUSE):
            if self.use_softmax:
                a_memory = tf.get_variable(name="a-memory", initializer=tf.random_normal([1, 1]))
                diag_memory = a_memory * tf.diag(tf.ones(self.label_size))
                b_memory = tf.get_variable(name="b-memory", initializer=tf.random_normal([1, 1]))
                non_diag_memory = tf.matrix_set_diag(b_memory * tf.ones([self.label_size, self.label_size]), tf.zeros(self.label_size))
                W_memory = diag_memory + non_diag_memory
                a_current = tf.get_variable(name="a-current", initializer=tf.random_normal([1, 1]))
                diag_current = a_current * tf.diag(tf.ones(self.label_size))
                b_current = tf.get_variable(name="b-current", initializer=tf.random_normal([1, 1]))
                non_diag_current = tf.matrix_set_diag(b_current * tf.ones([self.label_size, self.label_size]), tf.zeros(self.label_size))
                W_current = diag_current + non_diag_current
                y_combine = tf.matmul(self.y_past_state, W_memory) + tf.matmul(y_presoftmax, W_current)
                y = tf.nn.softmax(y_combine)  # + y_ss_update_contrib)
                cross_entropy = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=y_combine, labels=self.y_))
            else:
                y = tf.nn.sigmoid(y_presoftmax)  # for requestables, we just have turn-level binary decisions
                cross_entropy = tf.reduce_sum(tf.square(y - self.y_))
        return y, cross_entropy 
Example #15
Source File: digraph_ops.py    From HumanRecognition with MIT License 5 votes vote down vote up
def CombineArcAndRootPotentials(arcs, roots):
  """Combines arc and root potentials into a single set of potentials.

  Args:
    arcs: [B,N,N] tensor of batched arc potentials.
    roots: [B,N] matrix of batched root potentials.

  Returns:
    [B,N,N] tensor P of combined potentials where
      P_{b,s,t} = s == t ? roots[b,t] : arcs[b,s,t]
  """
  # All arguments must have statically-known rank.
  check.Eq(arcs.get_shape().ndims, 3, 'arcs must be rank 3')
  check.Eq(roots.get_shape().ndims, 2, 'roots must be a matrix')

  # All arguments must share the same type.
  dtype = arcs.dtype.base_dtype
  check.Same([dtype, roots.dtype.base_dtype], 'dtype mismatch')

  roots_shape = tf.shape(roots)
  arcs_shape = tf.shape(arcs)
  batch_size = roots_shape[0]
  num_tokens = roots_shape[1]
  with tf.control_dependencies([
      tf.assert_equal(batch_size, arcs_shape[0]),
      tf.assert_equal(num_tokens, arcs_shape[1]),
      tf.assert_equal(num_tokens, arcs_shape[2])]):
    return tf.matrix_set_diag(arcs, roots) 
Example #16
Source File: digraph_ops.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def CombineArcAndRootPotentials(arcs, roots):
  """Combines arc and root potentials into a single set of potentials.

  Args:
    arcs: [B,N,N] tensor of batched arc potentials.
    roots: [B,N] matrix of batched root potentials.

  Returns:
    [B,N,N] tensor P of combined potentials where
      P_{b,s,t} = s == t ? roots[b,t] : arcs[b,s,t]
  """
  # All arguments must have statically-known rank.
  check.Eq(arcs.get_shape().ndims, 3, 'arcs must be rank 3')
  check.Eq(roots.get_shape().ndims, 2, 'roots must be a matrix')

  # All arguments must share the same type.
  dtype = arcs.dtype.base_dtype
  check.Same([dtype, roots.dtype.base_dtype], 'dtype mismatch')

  roots_shape = tf.shape(roots)
  arcs_shape = tf.shape(arcs)
  batch_size = roots_shape[0]
  num_tokens = roots_shape[1]
  with tf.control_dependencies([
      tf.assert_equal(batch_size, arcs_shape[0]),
      tf.assert_equal(num_tokens, arcs_shape[1]),
      tf.assert_equal(num_tokens, arcs_shape[2])]):
    return tf.matrix_set_diag(arcs, roots) 
Example #17
Source File: digraph_ops.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def CombineArcAndRootPotentials(arcs, roots):
  """Combines arc and root potentials into a single set of potentials.

  Args:
    arcs: [B,N,N] tensor of batched arc potentials.
    roots: [B,N] matrix of batched root potentials.

  Returns:
    [B,N,N] tensor P of combined potentials where
      P_{b,s,t} = s == t ? roots[b,t] : arcs[b,s,t]
  """
  # All arguments must have statically-known rank.
  check.Eq(arcs.get_shape().ndims, 3, 'arcs must be rank 3')
  check.Eq(roots.get_shape().ndims, 2, 'roots must be a matrix')

  # All arguments must share the same type.
  dtype = arcs.dtype.base_dtype
  check.Same([dtype, roots.dtype.base_dtype], 'dtype mismatch')

  roots_shape = tf.shape(roots)
  arcs_shape = tf.shape(arcs)
  batch_size = roots_shape[0]
  num_tokens = roots_shape[1]
  with tf.control_dependencies([
      tf.assert_equal(batch_size, arcs_shape[0]),
      tf.assert_equal(num_tokens, arcs_shape[1]),
      tf.assert_equal(num_tokens, arcs_shape[2])]):
    return tf.matrix_set_diag(arcs, roots) 
Example #18
Source File: diag_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testGrad(self):
    shapes = ((3, 4, 4), (3, 3, 4), (3, 4, 3), (7, 4, 8, 8))
    with self.test_session(use_gpu=self._use_gpu):
      for shape in shapes:
        x = tf.constant(np.random.rand(*shape), dtype=tf.float32)
        diag_shape = shape[:-2] + (min(shape[-2:]),)
        x_diag = tf.constant(np.random.rand(*diag_shape), dtype=tf.float32)
        y = tf.matrix_set_diag(x, x_diag)
        error_x = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
                                                 y, y.get_shape().as_list())
        self.assertLess(error_x, 1e-4)
        error_x_diag = tf.test.compute_gradient_error(
            x_diag, x_diag.get_shape().as_list(),
            y, y.get_shape().as_list())
        self.assertLess(error_x_diag, 1e-4) 
Example #19
Source File: digraph_ops.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def CombineArcAndRootPotentials(arcs, roots):
  """Combines arc and root potentials into a single set of potentials.

  Args:
    arcs: [B,N,N] tensor of batched arc potentials.
    roots: [B,N] matrix of batched root potentials.

  Returns:
    [B,N,N] tensor P of combined potentials where
      P_{b,s,t} = s == t ? roots[b,t] : arcs[b,s,t]
  """
  # All arguments must have statically-known rank.
  check.Eq(arcs.get_shape().ndims, 3, 'arcs must be rank 3')
  check.Eq(roots.get_shape().ndims, 2, 'roots must be a matrix')

  # All arguments must share the same type.
  dtype = arcs.dtype.base_dtype
  check.Same([dtype, roots.dtype.base_dtype], 'dtype mismatch')

  roots_shape = tf.shape(roots)
  arcs_shape = tf.shape(arcs)
  batch_size = roots_shape[0]
  num_tokens = roots_shape[1]
  with tf.control_dependencies([
      tf.assert_equal(batch_size, arcs_shape[0]),
      tf.assert_equal(num_tokens, arcs_shape[1]),
      tf.assert_equal(num_tokens, arcs_shape[2])]):
    return tf.matrix_set_diag(arcs, roots) 
Example #20
Source File: diag_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testRectangular(self):
    with self.test_session(use_gpu=self._use_gpu):
      v = np.array([3.0, 4.0])
      mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]])
      expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]])
      output = tf.matrix_set_diag(mat, v)
      self.assertEqual((2, 3), output.get_shape())
      self.assertAllEqual(expected, output.eval())

      v = np.array([3.0, 4.0])
      mat = np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
      expected = np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]])
      output = tf.matrix_set_diag(mat, v)
      self.assertEqual((3, 2), output.get_shape())
      self.assertAllEqual(expected, output.eval()) 
Example #21
Source File: diag_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSquare(self):
    with self.test_session(use_gpu=self._use_gpu):
      v = np.array([1.0, 2.0, 3.0])
      mat = np.array([[0.0, 1.0, 0.0],
                      [1.0, 0.0, 1.0],
                      [1.0, 1.0, 1.0]])
      mat_set_diag = np.array([[1.0, 1.0, 0.0],
                               [1.0, 2.0, 1.0],
                               [1.0, 1.0, 3.0]])
      output = tf.matrix_set_diag(mat, v)
      self.assertEqual((3, 3), output.get_shape())
      self.assertAllEqual(mat_set_diag, output.eval()) 
Example #22
Source File: digraph_ops.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def CombineArcAndRootPotentials(arcs, roots):
  """Combines arc and root potentials into a single set of potentials.

  Args:
    arcs: [B,N,N] tensor of batched arc potentials.
    roots: [B,N] matrix of batched root potentials.

  Returns:
    [B,N,N] tensor P of combined potentials where
      P_{b,s,t} = s == t ? roots[b,t] : arcs[b,s,t]
  """
  # All arguments must have statically-known rank.
  check.Eq(arcs.get_shape().ndims, 3, 'arcs must be rank 3')
  check.Eq(roots.get_shape().ndims, 2, 'roots must be a matrix')

  # All arguments must share the same type.
  dtype = arcs.dtype.base_dtype
  check.Same([dtype, roots.dtype.base_dtype], 'dtype mismatch')

  roots_shape = tf.shape(roots)
  arcs_shape = tf.shape(arcs)
  batch_size = roots_shape[0]
  num_tokens = roots_shape[1]
  with tf.control_dependencies([
      tf.assert_equal(batch_size, arcs_shape[0]),
      tf.assert_equal(num_tokens, arcs_shape[1]),
      tf.assert_equal(num_tokens, arcs_shape[2])]):
    return tf.matrix_set_diag(arcs, roots) 
Example #23
Source File: digraph_ops.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def CombineArcAndRootPotentials(arcs, roots):
  """Combines arc and root potentials into a single set of potentials.

  Args:
    arcs: [B,N,N] tensor of batched arc potentials.
    roots: [B,N] matrix of batched root potentials.

  Returns:
    [B,N,N] tensor P of combined potentials where
      P_{b,s,t} = s == t ? roots[b,t] : arcs[b,s,t]
  """
  # All arguments must have statically-known rank.
  check.Eq(arcs.get_shape().ndims, 3, 'arcs must be rank 3')
  check.Eq(roots.get_shape().ndims, 2, 'roots must be a matrix')

  # All arguments must share the same type.
  dtype = arcs.dtype.base_dtype
  check.Same([dtype, roots.dtype.base_dtype], 'dtype mismatch')

  roots_shape = tf.shape(roots)
  arcs_shape = tf.shape(arcs)
  batch_size = roots_shape[0]
  num_tokens = roots_shape[1]
  with tf.control_dependencies([
      tf.assert_equal(batch_size, arcs_shape[0]),
      tf.assert_equal(num_tokens, arcs_shape[1]),
      tf.assert_equal(num_tokens, arcs_shape[2])]):
    return tf.matrix_set_diag(arcs, roots) 
Example #24
Source File: fit.py    From parasol with MIT License 5 votes vote down vote up
def quadratic_regression_pd(SA, costs, diag_cost=False):
    assert not diag_cost
    global global_step
    dsa = SA.shape[-1]
    C = tf.get_variable('cost_mat{}'.format(global_step), shape=[dsa, dsa],
                        dtype=tf.float32,
                        initializer=tf.random_uniform_initializer(minval=-0.1, maxval=0.1))
    L = tf.matrix_band_part(C, -1, 0)
    L = tf.matrix_set_diag(L, tf.maximum(tf.matrix_diag_part(L), 0.0))
    LL = tf.matmul(L, tf.transpose(L))
    c = tf.get_variable('cost_vec{}'.format(global_step), shape=[dsa],
                        dtype=tf.float32, initializer=tf.zeros_initializer())
    b = tf.get_variable('cost_bias{}'.format(global_step), shape=[],
                        dtype=tf.float32, initializer=tf.zeros_initializer())
    s_ = tf.placeholder(tf.float32, [None, dsa])
    c_ = tf.placeholder(tf.float32, [None])
    pred_cost = 0.5 * tf.einsum('na,ab,nb->n', s_, LL, s_) + \
            tf.einsum('na,a->n', s_, c) + b
    mse = tf.reduce_mean(tf.square(pred_cost - c_))
    opt = tf.train.MomentumOptimizer(1e-3, 0.9).minimize(mse)
    N = SA.shape[0]
    SA = SA.reshape([-1, dsa])
    costs = costs.reshape([-1])
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for itr in tqdm.trange(1000, desc='Fitting cost'):
            _, m = sess.run([opt, mse], feed_dict={
                s_: SA,
                c_: costs,
            })
            if itr == 0 or itr == 999:
                print('mse itr {}: {}'.format(itr, m))
        cost_mat, cost_vec = sess.run((LL, c))

    global_step += 1
    return cost_mat, cost_vec 
Example #25
Source File: bayes.py    From BERT with Apache License 2.0 5 votes vote down vote up
def get_moments(x):
  """Gets first and second moments of input."""
  if isinstance(x, ed.RandomVariable):
    mean = x.distribution.mean()
    variance = x.distribution.variance()
    try:
      covariance = x.distribution.covariance()
    except NotImplementedError:
      covariance = tf.zeros(x.shape.concatenate(x.shape[-1]), dtype=x.dtype)
      covariance = tf.matrix_set_diag(covariance, variance)
  else:
    mean = x
    variance = tf.zeros_like(x)
    covariance = tf.zeros(x.shape.concatenate(x.shape[-1]), dtype=x.dtype)
  return mean, variance, covariance 
Example #26
Source File: svgp.py    From zhusuan with MIT License 5 votes vote down vote up
def build_variational(hps, kernel, z_pos, x, n_particles):
    bn = zs.BayesianNet()
    z_mean = tf.get_variable(
        'z/mean', [hps.n_z], hps.dtype, tf.zeros_initializer())
    z_cov_raw = tf.get_variable(
        'z/cov_raw', initializer=tf.eye(hps.n_z, dtype=hps.dtype))
    z_cov_tril = tf.matrix_set_diag(
        tf.matrix_band_part(z_cov_raw, -1, 0),
        tf.nn.softplus(tf.matrix_diag_part(z_cov_raw)))
    fz = bn.multivariate_normal_cholesky(
        'fz', z_mean, z_cov_tril, n_samples=n_particles)
    bn.stochastic('fx', gp_conditional(z_pos, fz, x, False, kernel))
    return bn 
Example #27
Source File: addressing.py    From dnc with Apache License 2.0 5 votes vote down vote up
def _link(self, prev_link, prev_precedence_weights, write_weights):
    """Calculates the new link graphs.

    For each write head, the link is a directed graph (represented by a matrix
    with entries in range [0, 1]) whose vertices are the memory locations, and
    an edge indicates temporal ordering of writes.

    Args:
      prev_link: A tensor of shape `[batch_size, num_writes, memory_size,
          memory_size]` representing the previous link graphs for each write
          head.
      prev_precedence_weights: A tensor of shape `[batch_size, num_writes,
          memory_size]` which is the previous "aggregated" write weights for
          each write head.
      write_weights: A tensor of shape `[batch_size, num_writes, memory_size]`
          containing the new locations in memory written to.

    Returns:
      A tensor of shape `[batch_size, num_writes, memory_size, memory_size]`
      containing the new link graphs for each write head.
    """
    with tf.name_scope('link'):
      batch_size = tf.shape(prev_link)[0]
      write_weights_i = tf.expand_dims(write_weights, 3)
      write_weights_j = tf.expand_dims(write_weights, 2)
      prev_precedence_weights_j = tf.expand_dims(prev_precedence_weights, 2)
      prev_link_scale = 1 - write_weights_i - write_weights_j
      new_link = write_weights_i * prev_precedence_weights_j
      link = prev_link_scale * prev_link + new_link
      # Return the link with the diagonal set to zero, to remove self-looping
      # edges.
      return tf.matrix_set_diag(
          link,
          tf.zeros(
              [batch_size, self._num_writes, self._memory_size],
              dtype=link.dtype)) 
Example #28
Source File: pot.py    From adagan with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def correlation_loss(self, opts, input_):
        """
        Independence test based on Pearson's correlation.
        Keep in mind that this captures only linear dependancies.
        However, for multivariate Gaussian independence is equivalent
        to zero correlation.
        """

        batch_size = self.get_batch_size(opts, input_)
        dim = int(input_.get_shape()[1])
        transposed = tf.transpose(input_, perm=[1, 0])
        mean = tf.reshape(tf.reduce_mean(transposed, axis=1), [-1, 1])
        centered_transposed = transposed - mean # Broadcasting mean
        cov = tf.matmul(centered_transposed, centered_transposed, transpose_b=True)
        cov = cov / (batch_size - 1)
        #cov = tf.Print(cov, [cov], "cov")
        sigmas = tf.sqrt(tf.diag_part(cov) + 1e-5)
        #sigmas = tf.Print(sigmas, [sigmas], "sigmas")
        sigmas = tf.reshape(sigmas, [1, -1])
        sigmas = tf.matmul(sigmas, sigmas, transpose_a=True)
        #sigmas = tf.Print(sigmas, [sigmas], "sigmas")
        # Pearson's correlation
        corr = cov / sigmas
        triangle = tf.matrix_set_diag(tf.matrix_band_part(corr, 0, -1), tf.zeros(dim))
        #triangle = tf.Print(triangle, [triangle], "triangle")
        loss = tf.reduce_sum(tf.square(triangle)) / ((dim * dim - dim) / 2.0)
        #loss = tf.Print(loss, [loss], "Correlation loss")
        return loss 
Example #29
Source File: gaussian_process.py    From BERT with Apache License 2.0 4 votes vote down vote up
def call(self, inputs):
    if self.conditional_inputs is None and self.conditional_outputs is None:
      covariance_matrix = self.covariance_fn(inputs, inputs)
      # Tile locations so output has shape [units, batch_size]. Covariance will
      # broadcast to [units, batch_size, batch_size], and we perform
      # shape manipulations to get a random variable over [batch_size, units].
      loc = self.mean_fn(inputs)
      loc = tf.tile(loc[tf.newaxis], [self.units] + [1] * len(loc.shape))
    else:
      knn = self.covariance_fn(inputs, inputs)
      knm = self.covariance_fn(inputs, self.conditional_inputs)
      kmm = self.covariance_fn(self.conditional_inputs, self.conditional_inputs)
      kmm = tf.matrix_set_diag(
          kmm, tf.matrix_diag_part(kmm) + tf.keras.backend.epsilon())
      kmm_tril = tf.linalg.cholesky(kmm)
      kmm_tril_operator = tf.linalg.LinearOperatorLowerTriangular(kmm_tril)
      knm_operator = tf.linalg.LinearOperatorFullMatrix(knm)

      # TODO(trandustin): Vectorize linear algebra for multiple outputs. For
      # now, we do each separately and stack to obtain a locations Tensor of
      # shape [units, batch_size].
      loc = []
      for conditional_outputs_unit in tf.unstack(self.conditional_outputs,
                                                 axis=-1):
        center = conditional_outputs_unit - self.mean_fn(
            self.conditional_inputs)
        loc_unit = knm_operator.matvec(
            kmm_tril_operator.solvevec(kmm_tril_operator.solvevec(center),
                                       adjoint=True))
        loc.append(loc_unit)
      loc = tf.stack(loc) + self.mean_fn(inputs)[tf.newaxis]

      covariance_matrix = knn
      covariance_matrix -= knm_operator.matmul(
          kmm_tril_operator.solve(
              kmm_tril_operator.solve(knm, adjoint_arg=True), adjoint=True))

    covariance_matrix = tf.matrix_set_diag(
        covariance_matrix,
        tf.matrix_diag_part(covariance_matrix) + tf.keras.backend.epsilon())

    # Form a multivariate normal random variable with batch_shape units and
    # event_shape batch_size. Then make it be independent across the units
    # dimension. Then transpose its dimensions so it is [batch_size, units].
    random_variable = ed.MultivariateNormalFullCovariance(
        loc=loc, covariance_matrix=covariance_matrix)
    random_variable = ed.Independent(random_variable.distribution,
                                     reinterpreted_batch_ndims=1)
    bijector = tfp.bijectors.Inline(
        forward_fn=lambda x: tf.transpose(x, [1, 0]),
        inverse_fn=lambda y: tf.transpose(y, [1, 0]),
        forward_event_shape_fn=lambda input_shape: input_shape[::-1],
        forward_event_shape_tensor_fn=lambda input_shape: input_shape[::-1],
        inverse_log_det_jacobian_fn=lambda y: tf.cast(0, y.dtype),
        forward_min_event_ndims=2)
    random_variable = ed.TransformedDistribution(random_variable.distribution,
                                                 bijector=bijector)
    return random_variable 
Example #30
Source File: core.py    From parametric_tsne with MIT License 4 votes vote down vote up
def kl_loss(y_true, y_pred, alpha=1.0, batch_size=None, num_perplexities=None, _eps=DEFAULT_EPS):
    """ Kullback-Leibler Loss function (Tensorflow)
    between the "true" output and the "predicted" output
    Parameters
    ----------
    y_true : 2d array_like (N, N*P)
        Should be the P matrix calculated from input data.
        Differences in input points using a Gaussian probability distribution
        Different P (perplexity) values stacked along dimension 1
    y_pred : 2d array_like (N, output_dims)
        Output of the neural network. We will calculate
        the Q matrix based on this output
    alpha : float, optional
        Parameter used to calculate Q. Default 1.0
    batch_size : int, required
        Number of samples per batch. y_true.shape[0]
    num_perplexities : int, required
        Number of perplexities stacked along axis 1
    Returns
    -------
    kl_loss : tf.Tensor, scalar value
        Kullback-Leibler divergence P_ || Q_

    """
    P_ = y_true
    Q_ = _make_Q(y_pred, alpha, batch_size)
    
    _tf_eps = tf.constant(_eps, dtype=P_.dtype)
    
    kls_per_beta = []
    components = tf.split(P_, num_perplexities, axis=1, name='split_perp')
    for cur_beta_P in components:
        #yrange = tf.range(zz*batch_size, (zz+1)*batch_size)
        #cur_beta_P = tf.slice(P_, [zz*batch_size, [-1, batch_size])
        #cur_beta_P = P_
        kl_matr = tf.multiply(cur_beta_P, tf.log(cur_beta_P + _tf_eps) - tf.log(Q_ + _tf_eps), name='kl_matr')
        toset = tf.constant(0, shape=[batch_size], dtype=kl_matr.dtype)
        kl_matr_keep = tf.matrix_set_diag(kl_matr, toset)
        kl_total_cost_cur_beta = tf.reduce_sum(kl_matr_keep)
        kls_per_beta.append(kl_total_cost_cur_beta)
    kl_total_cost = tf.add_n(kls_per_beta)
    #kl_total_cost = kl_total_cost_cur_beta
    
    return kl_total_cost