Python tensorflow.segment_sum() Examples

The following are 30 code examples of tensorflow.segment_sum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: layers_copy.py    From graph_level_drug_discovery with Apache License 2.0 6 votes vote down vote up
def call(self, x):
    """Execute this layer on input tensors.

    Parameters
    ----------
    x: list of Tensor
      should be [embedding tensor of molecules, of shape (batch_size*max_n_atoms*n_embedding),
                 mask tensor of molecules, of shape (batch_size*max_n_atoms)]

    Returns
    -------
    list of tf.Tensor
      Of shape (batch_size)
    """
    self.build()
    output = x[0]
    atom_membership = x[1]
    for i, W in enumerate(self.W_list[:-1]):
      output = tf.matmul(output, W) + self.b_list[i]
      output = self.activation(output)
    output = tf.matmul(output, self.W_list[-1]) + self.b_list[-1]
    if self.output_activation:
      output = self.activation(output)
    output = tf.segment_sum(output, atom_membership)
    return output 
Example #2
Source File: graph_layers.py    From PADME with MIT License 6 votes vote down vote up
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, atom_membership
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    output = in_layers[0].out_tensor
    atom_membership = in_layers[1].out_tensor
    for i, W in enumerate(self.W_list[:-1]):
      output = tf.matmul(output, W) + self.b_list[i]
      output = self.activation(output)
    output = tf.matmul(output, self.W_list[-1]) + self.b_list[-1]
    if self.output_activation:
      output = self.activation(output)
    output = tf.segment_sum(output, atom_membership)
    out_tensor = output
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor 
Example #3
Source File: graph_layers.py    From PADME with MIT License 6 votes vote down vote up
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, membership
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    # Add trainable weights
    self.build()

    # Extract atom_features
    atom_features = in_layers[0].out_tensor
    membership = in_layers[1].out_tensor
    # Extract atom_features
    graph_features = tf.segment_sum(atom_features, membership)
    # sum all graph outputs
    outputs = self.DAGgraph_step(graph_features, self.W_list, self.b_list)
    out_tensor = outputs
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor 
Example #4
Source File: layers.py    From graph_level_drug_discovery with Apache License 2.0 6 votes vote down vote up
def call(self, x, mask=None):
    """Execute this layer on input tensors.

    x = [graph_features, membership]

    Parameters
    ----------
    x: tf.Tensor
      Tensor of each atom's graph features

    Returns
    -------
    outputs: tf.Tensor
      Tensor of each molecule's features

    """
    # Add trainable weights
    self.build()
    atom_features = x[0]
    membership = x[1]
    # Extract atom_features
    graph_features = tf.segment_sum(atom_features, membership)
    # sum all graph outputs
    outputs = self.DAGgraph_step(graph_features, self.W_list, self.b_list)
    return outputs 
Example #5
Source File: layers.py    From graph_level_drug_discovery with Apache License 2.0 6 votes vote down vote up
def call(self, x):
    """Execute this layer on input tensors.

    Parameters
    ----------
    x: list of Tensor
      should be [embedding tensor of molecules, of shape (batch_size*max_n_atoms*n_embedding),
                 mask tensor of molecules, of shape (batch_size*max_n_atoms)]

    Returns
    -------
    list of tf.Tensor
      Of shape (batch_size)
    """
    self.build()
    output = x[0]
    atom_membership = x[1]
    for i, W in enumerate(self.W_list[:-1]):
      output = tf.matmul(output, W) + self.b_list[i]
      output = self.activation(output)
    output = tf.matmul(output, self.W_list[-1]) + self.b_list[-1]
    if self.output_activation:
      output = self.activation(output)
    output = tf.segment_sum(output, atom_membership)
    return output 
Example #6
Source File: segment_reduction_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testGradientMatchesSegmentSum(self):
    # Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum
    # and compare the outputs, which should be identical.
    # NB: for this test to work, indices must be valid for SegmentSum, namely
    # it must be sorted, the indices must be contiguous, and num_segments
    # must be max(indices) + 1.
    indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]
    n = len(indices)
    num_cols = 2
    shape = [n, num_cols]
    num_segments = max(indices) + 1
    with self.test_session(use_gpu=self.use_gpu):
      tf_x, np_x = self._input(shape, dtype=tf.float64)
      # Results from UnsortedSegmentSum
      unsorted_s = tf.unsorted_segment_sum(data=tf_x,
                                                 segment_ids=indices,
                                                 num_segments=num_segments)
      (unsorted_jacob_t, unsorted_jacob_n) = tf.test.compute_gradient(
          tf_x,
          shape,
          unsorted_s,
          [num_segments, num_cols],
          x_init_value=np_x.astype(np.double),
          delta=1)
      # Results from SegmentSum
      sorted_s = tf.segment_sum(data=tf_x, segment_ids=indices)
      sorted_jacob_t, sorted_jacob_n = tf.test.compute_gradient(
          tf_x,
          shape,
          sorted_s,
          [num_segments, num_cols],
          x_init_value=np_x.astype(np.double),
          delta=1)
    self.assertAllClose(unsorted_jacob_t, sorted_jacob_t, rtol=1e-3, atol=1e-3)
    self.assertAllClose(unsorted_jacob_n, sorted_jacob_n, rtol=1e-3, atol=1e-3) 
Example #7
Source File: ops.py    From document-qa with Apache License 2.0 5 votes vote down vote up
def segment_logsumexp(xs, segments):
    """ Similar tf.segment_sum but compute logsumexp rather then sum """
    # Stop gradients following the implementation of tf.reduce_logsumexp
    maxs = tf.stop_gradient(tf.reduce_max(xs, axis=1))
    segment_maxes = tf.segment_max(maxs, segments)
    xs -= tf.expand_dims(tf.gather(segment_maxes, segments), 1)
    sums = tf.reduce_sum(tf.exp(xs), axis=1)
    return tf.log(tf.segment_sum(sums, segments)) + segment_maxes 
Example #8
Source File: utils.py    From PiNN with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_atomic_dress(dataset, elems, max_iter=None):
    """Fit the atomic energy with a element dependent atomic dress

    Args:
        dataset: dataset to fit
        elems: a list of element numbers
    Returns:
        atomic_dress: a dictionary comprising the atomic energy of each element
        error: residue error of the atomic dress
    """
    tensors = dataset.make_one_shot_iterator().get_next()
    if 'ind_1' not in tensors:
        tensors['ind_1'] = tf.expand_dims(tf.zeros_like(tensors['elems']), 1)
        tensors['e_data'] = tf.expand_dims(tensors['e_data'], 0)
    count = tf.equal(tf.expand_dims(
        tensors['elems'], 1), tf.expand_dims(elems, 0))
    count = tf.cast(count, tf.int32)
    count = tf.segment_sum(count, tensors['ind_1'][:, 0])
    sess = tf.Session()
    x, y = [], []
    it = 0
    while True:
        it += 1
        if max_iter is not None and it > max_iter:
            break
        try:
            x_i, y_i = sess.run((count, tensors['e_data']))
            x.append(x_i)
            y.append(y_i)
        except tf.errors.OutOfRangeError:
            break
    x, y = np.concatenate(x, 0), np.concatenate(y, 0)
    beta = np.dot(np.dot(np.linalg.pinv(np.dot(x.T, x)), x.T), np.array(y))
    dress = {e: float(beta[i]) for (i, e) in enumerate(elems)}
    error = np.dot(x, beta) - y
    return dress, error 
Example #9
Source File: NeuralNetwork.py    From PhysNet with MIT License 5 votes vote down vote up
def electrostatic_energy_per_atom(self, Dij, Qa, idx_i, idx_j):
        #gather charges
        Qi = tf.gather(Qa, idx_i)
        Qj = tf.gather(Qa, idx_j)
        #calculate variants of Dij which we need to calculate
        #the various shileded/non-shielded potentials
        DijS = tf.sqrt(Dij*Dij + 1.0) #shielded distance
        #calculate value of switching function
        switch = self._switch(Dij) #normal switch
        cswitch = 1.0-switch #complementary switch
        #calculate shielded/non-shielded potentials
        if self.lr_cut is None: #no non-bonded cutoff
            Eele_ordinary = 1.0/Dij   #ordinary electrostatic energy
            Eele_shielded = 1.0/DijS  #shielded electrostatic energy
            #combine shielded and ordinary interactions and apply prefactors 
            Eele = self.kehalf*Qi*Qj*(cswitch*Eele_shielded + switch*Eele_ordinary)
        else: #with non-bonded cutoff
            cut   = self.lr_cut
            cut2  = self.lr_cut*self.lr_cut
            Eele_ordinary = 1.0/Dij  +  Dij/cut2 - 2.0/cut
            Eele_shielded = 1.0/DijS + DijS/cut2 - 2.0/cut
            #combine shielded and ordinary interactions and apply prefactors 
            Eele = self.kehalf*Qi*Qj*(cswitch*Eele_shielded + switch*Eele_ordinary)
            Eele = tf.where(Dij <= cut, Eele, tf.zeros_like(Eele))
        return tf.segment_sum(Eele, idx_i) 

    #save the current model 
Example #10
Source File: NeuralNetwork.py    From PhysNet with MIT License 5 votes vote down vote up
def scaled_charges(self, Z, Qa, Q_tot=None, batch_seg=None):
        with tf.name_scope("scaled_charges"):
            if batch_seg is None:
                batch_seg = tf.zeros_like(Z)
            #number of atoms per batch (needed for charge scaling)
            Na_per_batch = tf.segment_sum(tf.ones_like(batch_seg, dtype=self.dtype), batch_seg)
            if Q_tot is None: #assume desired total charge zero if not given
                Q_tot = tf.zeros_like(Na_per_batch, dtype=self.dtype)
            #return scaled charges (such that they have the desired total charge)
            return Qa + tf.gather(((Q_tot-tf.segment_sum(Qa, batch_seg))/Na_per_batch), batch_seg)

    #switch function for electrostatic interaction (switches between shielded and unshielded electrostatic interaction) 
Example #11
Source File: NeuralNetwork.py    From PhysNet with MIT License 5 votes vote down vote up
def energy_from_scaled_atomic_properties(self, Ea, Qa, Dij, Z, idx_i, idx_j, batch_seg=None):
        with tf.name_scope("energy_from_atomic_properties"):
            if batch_seg is None:
                batch_seg = tf.zeros_like(Z)
            #add electrostatic and dispersion contribution to atomic energy
            if self.use_electrostatic:
                Ea += self.electrostatic_energy_per_atom(Dij, Qa, idx_i, idx_j)
            if self.use_dispersion:
                if self.lr_cut is not None:   
                    Ea += d3_autoev*edisp(Z, Dij/d3_autoang, idx_i, idx_j, s6=self.s6, s8=self.s8, a1=self.a1, a2=self.a2, cutoff=self.lr_cut/d3_autoang)
                else:
                    Ea += d3_autoev*edisp(Z, Dij/d3_autoang, idx_i, idx_j, s6=self.s6, s8=self.s8, a1=self.a1, a2=self.a2)
        return tf.squeeze(tf.segment_sum(Ea, batch_seg))

    #calculates the energy and forces given the scaled atomic atomic properties (in order to prevent recomputation if atomic properties are calculated) 
Example #12
Source File: grimme_d3.py    From PhysNet with MIT License 5 votes vote down vote up
def _ncoord(Zi, Zj, r, idx_i, cutoff=None, k1=d3_k1, rcov=d3_rcov):
    '''
    compute coordination numbers by adding an inverse damping function
    '''
    rco = tf.gather(rcov,Zi) + tf.gather(rcov,Zj)
    rr = tf.cast(rco,r.dtype)/r
    damp = 1.0/(1.0+tf.exp(-k1*(rr-1.0)))
    if cutoff is not None:
        damp *= _smootherstep(r, cutoff)
    return tf.segment_sum(damp,idx_i) 
Example #13
Source File: pooling.py    From SchNet with MIT License 5 votes vote down vote up
def __init__(self, mode='sum', name=None):
        if mode == 'sum':
            self._reduce = tf.segment_sum
        elif mode == 'mean':
            self._reduce = tf.segment_mean
        super(PoolSegments, self).__init__(name) 
Example #14
Source File: models.py    From scene-graph-TF-release with MIT License 5 votes vote down vote up
def _compute_vert_context_soft(self, edge_factor, vert_factor, reuse=False):
        """
        attention-based vertex(node) message pooling
        """

        out_edge = utils.pad_and_gather(edge_factor, self.edge_pair_mask_inds[:,0])
        in_edge = utils.pad_and_gather(edge_factor, self.edge_pair_mask_inds[:,1])
        # gather correspounding vert factors
        vert_factor_gathered = tf.gather(vert_factor, self.edge_pair_segment_inds)

        # concat outgoing edges and ingoing edges with gathered vert_factors
        out_edge_w_input = tf.concat(concat_dim=1, values=[out_edge, vert_factor_gathered])
        in_edge_w_input = tf.concat(concat_dim=1, values=[in_edge, vert_factor_gathered])

        # compute compatibility scores
        (self.feed(out_edge_w_input)
             .fc(1, relu=False, reuse=reuse, name='out_edge_w_fc')
             .sigmoid(name='out_edge_score'))
        (self.feed(in_edge_w_input)
             .fc(1, relu=False, reuse=reuse, name='in_edge_w_fc')
             .sigmoid(name='in_edge_score'))

        out_edge_w = self.get_output('out_edge_score')
        in_edge_w = self.get_output('in_edge_score')

        # weight the edge factors with computed weigths
        out_edge_weighted = tf.mul(out_edge, out_edge_w)
        in_edge_weighted = tf.mul(in_edge, in_edge_w)


        edge_sum = out_edge_weighted + in_edge_weighted
        vert_ctx = tf.segment_sum(edge_sum, self.edge_pair_segment_inds)
        return vert_ctx 
Example #15
Source File: segment_reduction_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSegmentIdsInvalid7(self):
    shape = [4, 4]
    with self.test_session():
      tf_x, _ = self._input(shape)
      indices = [0, 0, 0, -2]
      s = tf.segment_sum(data=tf_x, segment_ids=indices)
      with self.assertRaisesOpError("segment ids must be >= 0"):
        s.eval() 
Example #16
Source File: segment_reduction_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSegmentIdsSize(self):
    shape = [4, 4]
    with self.test_session():
      tf_x, _ = self._input(shape)
      indices = [0, 1]
      s = tf.segment_sum(data=tf_x, segment_ids=indices)
      with self.assertRaisesOpError("segment_ids should be the same size"):
        s.eval() 
Example #17
Source File: graph_layers.py    From PADME with MIT License 5 votes vote down vote up
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ 
    parent layers: atom_features, atom_split
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    outputs = in_layers[0].out_tensor
    atom_split = in_layers[1].out_tensor

    if self.gaussian_expand:
      outputs = self.gaussian_histogram(outputs)

    output_molecules = tf.segment_sum(outputs, atom_split)

    if self.gaussian_expand:
      output_molecules = tf.matmul(output_molecules, self.W) + self.b
      output_molecules = self.activation(output_molecules)

    out_tensor = output_molecules
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor 
Example #18
Source File: graph_layers.py    From PADME with MIT License 5 votes vote down vote up
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, distance, distance_membership_i, distance_membership_j
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    atom_features = in_layers[0].out_tensor
    distance = in_layers[1].out_tensor
    distance_membership_i = in_layers[2].out_tensor
    distance_membership_j = in_layers[3].out_tensor
    distance_hidden = tf.matmul(distance, self.W_df) + self.b_df
    atom_features_hidden = tf.matmul(atom_features, self.W_cf) + self.b_cf
    outputs = tf.multiply(distance_hidden,
                          tf.gather(atom_features_hidden,
                                    distance_membership_j))

    # for atom i in a molecule m, this step multiplies together distance info of atom pair(i,j)
    # and embeddings of atom j(both gone through a hidden layer)
    outputs = tf.matmul(outputs, self.W_fc)
    outputs = self.activation(outputs)

    output_ii = tf.multiply(self.b_df, atom_features_hidden)
    output_ii = tf.matmul(output_ii, self.W_fc)
    output_ii = self.activation(output_ii)

    # for atom i, sum the influence from all other atom j in the molecule
    outputs = tf.segment_sum(outputs,
                             distance_membership_i) - output_ii + atom_features
    out_tensor = outputs
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor 
Example #19
Source File: graph_layers.py    From PADME with MIT License 5 votes vote down vote up
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Perform M steps of set2set gather,
        detailed descriptions in: https://arxiv.org/abs/1511.06391 """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    # Extract atom_features
    atom_features = in_layers[0].out_tensor
    atom_split = in_layers[1].out_tensor

    self.c = tf.zeros((self.batch_size, self.n_hidden))
    self.h = tf.zeros((self.batch_size, self.n_hidden))

    for i in range(self.M):
      q_expanded = tf.gather(self.h, atom_split)
      e = tf.reduce_sum(atom_features * q_expanded, 1)
      e_mols = tf.dynamic_partition(e, atom_split, self.batch_size)
      # Add another value(~-Inf) to prevent error in softmax
      e_mols = [
          tf.concat([e_mol, tf.constant([-1000.])], 0) for e_mol in e_mols
      ]
      a = tf.concat([tf.nn.softmax(e_mol)[:-1] for e_mol in e_mols], 0)
      r = tf.segment_sum(tf.reshape(a, [-1, 1]) * atom_features, atom_split)
      # Model using this layer must set pad_batches=True
      q_star = tf.concat([self.h, r], axis=1)
      self.h, self.c = self.LSTMStep(q_star, self.c)

    out_tensor = q_star
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor 
Example #20
Source File: ops.py    From tfdeploy with MIT License 5 votes vote down vote up
def test_SegmentSum(self):
        t = tf.segment_sum(self.random(4, 2, 3), np.array([0, 1, 1, 2]))
        self.check(t) 
Example #21
Source File: segment_reduction_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testValues(self):
    dtypes = [tf.float32,
              tf.float64,
              tf.int64,
              tf.int32,
              tf.complex64,
              tf.complex128]

    # Each item is np_op1, np_op2, tf_op
    ops_list = [(np.add, None, tf.segment_sum),
                (self._mean_cum_op, self._mean_reduce_op,
                 tf.segment_mean),
                (np.ndarray.__mul__, None, tf.segment_prod),
                (np.minimum, None, tf.segment_min),
                (np.maximum, None, tf.segment_max)]

    # A subset of ops has been enabled for complex numbers
    complex_ops_list = [(np.add, None, tf.segment_sum),
                        (np.ndarray.__mul__, None, tf.segment_prod)]

    n = 10
    shape = [n, 2]
    indices = [i // 3 for i in range(n)]
    for dtype in dtypes:
      if dtype in (tf.complex64, tf.complex128):
        curr_ops_list = complex_ops_list
      else:
        curr_ops_list = ops_list

      with self.test_session(use_gpu=False):
        tf_x, np_x = self._input(shape, dtype=dtype)
        for np_op1, np_op2, tf_op in curr_ops_list:
          np_ans = self._segmentReduce(indices, np_x, np_op1, np_op2)
          s = tf_op(data=tf_x, segment_ids=indices)
          tf_ans = s.eval()
          self._assertAllClose(indices, np_ans, tf_ans)
          # NOTE(mrry): The static shape inference that computes
          # `tf_ans.shape` can only infer that sizes from dimension 1
          # onwards, because the size of dimension 0 is data-dependent
          # and may therefore vary dynamically.
          self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:]) 
Example #22
Source File: segment_reduction_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSegmentIdsShape(self):
    shape = [4, 4]
    tf_x, _ = self._input(shape)
    indices = tf.constant([0, 1, 2, 2], shape=[2, 2])
    with self.assertRaises(ValueError):
      tf.segment_sum(data=tf_x, segment_ids=indices) 
Example #23
Source File: segment_reduction_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSegmentIdsInvalid6(self):
    shape = [4, 4]
    with self.test_session():
      tf_x, _ = self._input(shape)
      indices = [0, 0, 0, -1]
      s = tf.segment_sum(data=tf_x, segment_ids=indices)
      with self.assertRaisesOpError("segment ids must be >= 0"):
        s.eval() 
Example #24
Source File: segment_reduction_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSegmentIdsValid(self):
    # This is a baseline for the following SegmentIdsInvalid* tests.
    shape = [4, 4]
    with self.test_session():
      tf_x, _ = self._input(shape)
      indices = [0, 0, 0, 1]
      result = tf.segment_sum(data=tf_x, segment_ids=indices).eval()
      self.assertAllEqual([[15, 18, 21, 24], [13, 14, 15, 16]], result) 
Example #25
Source File: segment_reduction_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSegmentIdsInvalid1(self):
    shape = [4, 4]
    with self.test_session():
      tf_x, _ = self._input(shape)
      indices = [-1, -1, 0, 0]
      s = tf.segment_sum(data=tf_x, segment_ids=indices)
      with self.assertRaisesOpError("segment ids do not start at 0"):
        s.eval() 
Example #26
Source File: segment_reduction_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSegmentIdsInvalid3(self):
    shape = [4, 4]
    with self.test_session():
      tf_x, _ = self._input(shape)
      indices = [0, 0, 2, 2]
      s = tf.segment_sum(data=tf_x, segment_ids=indices)
      with self.assertRaisesOpError("segment ids are not increasing by 1"):
        s.eval() 
Example #27
Source File: segment_reduction_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSegmentIdsInvalid4(self):
    shape = [4, 4]
    with self.test_session():
      tf_x, _ = self._input(shape)
      indices = [0, 1, 0, 1]
      s = tf.segment_sum(data=tf_x, segment_ids=indices)
      with self.assertRaisesOpError("segment ids are not increasing by 1"):
        s.eval() 
Example #28
Source File: segment_reduction_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSegmentIdsInvalid5(self):
    shape = [4, 4]
    with self.test_session():
      tf_x, _ = self._input(shape)
      indices = [0, 1, 2, 0]
      s = tf.segment_sum(data=tf_x, segment_ids=indices)
      with self.assertRaisesOpError(
          r"Segment id 1 out of range \[0, 1\), probably "
          "because 'segment_ids' input is not sorted."):
        s.eval() 
Example #29
Source File: grimme_d3.py    From PhysNet with MIT License 4 votes vote down vote up
def edisp(Z, r, idx_i, idx_j, cutoff=None, r2=None, 
    r6=None, r8=None, s6=d3_s6, s8=d3_s8, a1=d3_a1, a2=d3_a2, k1=d3_k1, k2=d3_k2, 
    k3=d3_k3, c6ab=d3_c6ab, r0ab=d3_r0ab, rcov=d3_rcov, r2r4=d3_r2r4):
    '''
    compute d3 dispersion energy in Hartree
    r: distance in bohr!
    '''
    #compute all necessary quantities
    Zi = tf.gather(Z, idx_i)
    Zj = tf.gather(Z, idx_j)
    ZiZj = tf.stack([Zi,Zj],axis=1) #necessary for gatherin
    nc = _ncoord(Zi, Zj, r, idx_i, cutoff=cutoff, rcov=rcov) #coordination numbers
    nci = tf.gather(nc, idx_i)
    ncj = tf.gather(nc, idx_j)
    c6 = _getc6(ZiZj, nci, ncj, c6ab=c6ab, k3=k3) #c6 coefficients
    c8 = 3*c6*tf.cast(tf.gather(r2r4, Zi),c6.dtype)*tf.cast(tf.gather(r2r4, Zj),c6.dtype) #c8 coefficient
    
    #compute all necessary powers of the distance
    if r2 is None:
        r2 = r**2 #square of distances
    if r6 is None:
        r6 = r2**3
    if r8 is None:
        r8 = r6*r2

    #Becke-Johnson damping, zero-damping introduces spurious repulsion
    #and is therefore not supported/implemented
    tmp = a1*tf.sqrt(c8/c6) + a2
    tmp2 = tmp**2
    tmp6 = tmp2**3
    tmp8 = tmp6*tmp2
    if cutoff is None:
        e6 = 1/(r6+tmp6)
        e8 = 1/(r8+tmp8)
    else: #apply cutoff
        cut2 = cutoff**2
        cut6 = cut2**3
        cut8 = cut6*cut2
        cut6tmp6 = cut6 + tmp6
        cut8tmp8 = cut8 + tmp8
        e6 = 1/(r6+tmp6) - 1/cut6tmp6 + 6*cut6/cut6tmp6**2 * (r/cutoff-1)
        e8 = 1/(r8+tmp8) - 1/cut8tmp8 + 8*cut8/cut8tmp8**2 * (r/cutoff-1)
        e6 = tf.where(r < cutoff, e6, tf.zeros_like(e6))
        e8 = tf.where(r < cutoff, e8, tf.zeros_like(e8))
    e6 = -0.5*s6*c6*e6
    e8 = -0.5*s8*c8*e8
    return tf.segment_sum(e6+e8,idx_i) 
Example #30
Source File: utils.py    From GenerativeAdversarialUserModel with MIT License 4 votes vote down vote up
def construct_computation_graph(self):

        denseshape = [self.placeholder['section_length'], self.placeholder['item_size']]

        # (1) history feature --- net ---> clicked_feature
        # (1) construct cumulative history
        click_history = [[] for _ in xrange(self.pw_dim)]
        for ii in xrange(self.pw_dim):
            position_weight = tf.get_variable('p_w'+str(ii), [self.band_size], initializer=tf.constant_initializer(0.0001))
            cumsum_tril_value = tf.gather(position_weight, self.placeholder['cumsum_tril_value_indices'])
            cumsum_tril_matrix = tf.SparseTensor(self.placeholder['cumsum_tril_indices'], cumsum_tril_value,
                                                 [self.placeholder['section_length'], self.placeholder['section_length']])  # sec by sec
            click_history[ii] = tf.sparse_tensor_dense_matmul(cumsum_tril_matrix, self.placeholder['Xs_clicked'])  # Xs_clicked: section by _f_dim
        concat_history = tf.concat(click_history, axis=1)
        disp_history_feature = tf.gather(concat_history, self.placeholder['disp_2d_split_sec_ind'])

        # (4) combine features
        concat_disp_features = tf.reshape(tf.concat([disp_history_feature, self.placeholder['disp_current_feature']], axis=1),
                                          [-1, self.f_dim * self.pw_dim + self.f_dim])

        # (5) compute utility
        u_disp = mlp(concat_disp_features, self.hidden_dims, 1, tf.nn.elu, 1e-3, act_last=False)

        # (5)
        exp_u_disp = tf.exp(u_disp)
        sum_exp_disp_ubar_ut = tf.segment_sum(exp_u_disp, self.placeholder['disp_2d_split_sec_ind'])
        sum_click_u_bar_ut = tf.gather(u_disp, self.placeholder['click_2d_subindex'])

        # (6) loss and precision
        click_tensor = tf.SparseTensor(self.placeholder['click_indices'], self.placeholder['click_values'], denseshape)
        click_cnt = tf.sparse_reduce_sum(click_tensor, axis=1)
        loss_sum = tf.reduce_sum(- sum_click_u_bar_ut + tf.log(sum_exp_disp_ubar_ut + 1))
        event_cnt = tf.reduce_sum(click_cnt)
        loss = loss_sum / event_cnt

        exp_disp_ubar_ut = tf.SparseTensor(self.placeholder['disp_indices'], tf.reshape(exp_u_disp, [-1]), denseshape)
        dense_exp_disp_util = tf.sparse_tensor_to_dense(exp_disp_ubar_ut, default_value=0.0, validate_indices=False)
        argmax_click = tf.argmax(tf.sparse_tensor_to_dense(click_tensor, default_value=0.0), axis=1)
        argmax_disp = tf.argmax(dense_exp_disp_util, axis=1)

        top_2_disp = tf.nn.top_k(dense_exp_disp_util, k=2, sorted=False)[1]

        precision_1_sum = tf.reduce_sum(tf.cast(tf.equal(argmax_click, argmax_disp), tf.float32))
        precision_1 = precision_1_sum / event_cnt
        precision_2_sum = tf.reduce_sum(tf.cast(tf.equal(tf.reshape(argmax_click, [-1, 1]), tf.cast(top_2_disp, tf.int64)), tf.float32))
        precision_2 = precision_2_sum / event_cnt

        self.lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * 0.05  # regularity
        return loss, precision_1, precision_2, loss_sum, precision_1_sum, precision_2_sum, event_cnt