Python tensorflow.keras.backend.dot() Examples

The following are 29 code examples of tensorflow.keras.backend.dot(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.backend , or try the search function .
Example #1
Source File: set2set.py    From megnet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _lstm(self, h, c):
        # lstm implementation here
        z = kb.dot(h, self.recurrent_kernel)
        if self.use_bias:
            z += self.recurrent_bias
        z0 = z[:, :, :self.n_hidden]
        z1 = z[:, :, self.n_hidden:2 * self.n_hidden]
        z2 = z[:, :, 2 * self.n_hidden:3 * self.n_hidden]
        z3 = z[:, :, 3 * self.n_hidden:]
        i = self.recurrent_activation(z0)
        f = self.recurrent_activation(z1)
        # print(z.shape, f.shape, c.shape, z2.shape)
        c = f * c + i * self.activation_lstm(z2)
        o = self.recurrent_activation(z3)
        h = o * self.activation_lstm(c)
        return h, c 
Example #2
Source File: global_pool.py    From spektral with MIT License 6 votes vote down vote up
def call(self, inputs):
        if self.data_mode == 'disjoint':
            X, I = inputs
            if K.ndim(I) == 2:
                I = I[:, 0]
        else:
            X = inputs
        attn_coeff = K.dot(X, self.attn_kernel)
        attn_coeff = K.squeeze(attn_coeff, -1)
        attn_coeff = K.softmax(attn_coeff)
        if self.data_mode == 'single':
            output = K.dot(attn_coeff[None, ...], X)
        elif self.data_mode == 'batch':
            output = K.batch_dot(attn_coeff, X)
        else:
            output = attn_coeff[:, None] * X
            output = tf.math.segment_sum(output, I)

        return output 
Example #3
Source File: graph_conv_skip.py    From spektral with MIT License 6 votes vote down vote up
def call(self, inputs):
        features = inputs[0]
        fltr = inputs[1]

        # Convolution
        output = K.dot(features, self.kernel_1)
        output = ops.filter_dot(fltr, output)

        # Skip connection
        skip = K.dot(features, self.kernel_2)
        output += skip

        if self.use_bias:
            output = K.bias_add(output, self.bias)
        if self.activation is not None:
            output = self.activation(output)
        return output 
Example #4
Source File: RigidTransformation3DImputation.py    From aitom with GNU General Public License v3.0 6 votes vote down vote up
def _mask_rotation_matrix_zyz(self, params):
        phi = params[0] * 2 * np.pi - np.pi;       theta = params[1] * 2 * np.pi - np.pi;     psi_t = params[2] * 2 * np.pi - np.pi;
        
        loc_r = params[3:6] * 0    # magnitude of Fourier transformation is translation-invariant 
        
        
        a1 = self._rotation_matrix_axis(2, psi_t)
        a2 = self._rotation_matrix_axis(1, theta)
        a3 = self._rotation_matrix_axis(2, phi)     
        rm = K.dot(K.dot(a3,a2),a1)
        
        rm = tf.transpose(rm)
        
        c = K.dot(-rm, K.expand_dims(loc_r))
        
        rm = K.flatten(rm)
        
        theta = K.concatenate([rm[:3], c[0], rm[3:6], c[1], rm[6:9], c[2]])

        return theta 
Example #5
Source File: time_frequency.py    From kapre with MIT License 6 votes vote down vote up
def call(self, x):
        power_spectrogram = super(Melspectrogram, self).call(x)
        # now,  channels_first: (batch_sample, n_ch, n_freq, n_time)
        #       channels_last: (batch_sample, n_freq, n_time, n_ch)
        if self.image_data_format == 'channels_first':
            power_spectrogram = K.permute_dimensions(power_spectrogram, [0, 1, 3, 2])
        else:
            power_spectrogram = K.permute_dimensions(power_spectrogram, [0, 3, 2, 1])
        # now, whatever image_data_format, (batch_sample, n_ch, n_time, n_freq)
        output = K.dot(power_spectrogram, self.freq2mel)
        if self.image_data_format == 'channels_first':
            output = K.permute_dimensions(output, [0, 1, 3, 2])
        else:
            output = K.permute_dimensions(output, [0, 3, 2, 1])
        if self.power_melgram != 2.0:
            output = K.pow(K.sqrt(output), self.power_melgram)
        if self.return_decibel_melgram:
            output = backend_keras.amplitude_to_decibel(output)
        return output 
Example #6
Source File: RigidTransformation3DImputation.py    From aitom with GNU General Public License v3.0 6 votes vote down vote up
def _rotation_matrix_zyz(self, params):
        phi = params[0] * 2 * np.pi - np.pi;       theta = params[1] * 2 * np.pi - np.pi;     psi_t = params[2] * 2 * np.pi - np.pi;
        
        loc_r = params[3:6] * 2 - 1
        
        
        a1 = self._rotation_matrix_axis(2, psi_t)       # first rotate about z axis for angle psi_t
        a2 = self._rotation_matrix_axis(1, theta)
        a3 = self._rotation_matrix_axis(2, phi)     
        rm = K.dot(K.dot(a3,a2),a1)
        
        rm = tf.transpose(rm)
        
        c = K.dot(-rm, K.expand_dims(loc_r))
        
        rm = K.flatten(rm)
        
        theta = K.concatenate([rm[:3], c[0], rm[3:6], c[1], rm[6:9], c[2]])

        return theta 
Example #7
Source File: layers.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def build(self, input_shape):



        # Create a trainable weight variable for this layer.
        self.kernel = self.add_weight(name='mult-kernel',
                                    shape=(np.prod(self.orig_input_shape),
                                           self.output_len),
                                    initializer=self.kernel_initializer,
                                    trainable=True)

        M = K.reshape(self.kernel, [-1, self.output_len])  # D x d
        mt = K.transpose(M) # d x D
        mtm_inv = tf.matrix_inverse(K.dot(mt, M))  # d x d
        self.W = K.dot(mtm_inv, mt) # d x D

        if self.use_bias:
            self.bias = self.add_weight(name='bias-kernel',
                                        shape=(self.output_len, ),
                                        initializer=self.bias_initializer,
                                        trainable=True)

        # self.sigma_sq = self.add_weight(name='bias-kernel',
        #                                 shape=(1, ),
        #                                 initializer=self.initializer,
        #                                 trainable=True)

        super(SpatiallySparse_Dense, self).build(input_shape)  # Be sure to call this somewhere! 
Example #8
Source File: cells.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def call(self, inputs, **kwargs):
        assert isinstance(inputs, list) and len(inputs) == 3
        first, second, features = inputs[0], inputs[1], inputs[2]
        if not self.from_logits:
            first = K.clip(first, 1e-10, 1.0)
            second = K.clip(second, 1e-10, 1.0)
            first_, second_ = K.log(first), K.log(second)
        else:
            first_, second_ = first, second
        # embedded_features.shape = (M, T, 1)
        if self.use_intermediate_layer:
            features = K.dot(features, self.first_kernel)
            features = K.bias_add(features, self.first_bias, data_format="channels_last")
            features = self.intermediate_activation(features)
        embedded_features = K.dot(features, self.features_kernel)
        embedded_features = K.bias_add(
            embedded_features, self.features_bias, data_format="channels_last")
        if self.use_dimension_bias:
            tiling_shape = [1] * (K.ndim(first) - 1) + [K.shape(first)[-1]]
            embedded_features = K.tile(embedded_features, tiling_shape)
            embedded_features = K.bias_add(
                embedded_features, self.dimensions_bias, data_format="channels_last")
        sigma = K.sigmoid(embedded_features)

        result = weighted_sum(first_, second_, sigma,
                              self.first_threshold, self.second_threshold)
        probs = K.softmax(result)
        if self.return_logits:
            return [probs, result]
        return probs 
Example #9
Source File: cells.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def call(self, inputs, **kwargs):
        gate = K.dot(inputs, self.gate_kernel)
        gate = K.bias_add(gate, self.gate_bias, data_format="channels_last")
        gate = self.activation(gate)
        new_value = K.dot(inputs, self.dense_kernel)
        new_value = K.bias_add(new_value, self.dense_bias, data_format="channels_last")
        return gate * new_value + (1.0 - gate) * inputs 
Example #10
Source File: bilstm_siamese_network.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def _pairwise_distances(self, inputs: List[Tensor]) -> Tensor:
        emb_c, emb_r = inputs
        bs = K.shape(emb_c)[0]
        embeddings = K.concatenate([emb_c, emb_r], 0)
        dot_product = K.dot(embeddings, K.transpose(embeddings))
        square_norm = K.batch_dot(embeddings, embeddings, axes=1)
        distances = K.transpose(square_norm) - 2.0 * dot_product + square_norm
        distances = distances[0:bs, bs:bs+bs]
        distances = K.clip(distances, 0.0, None)
        mask = K.cast(K.equal(distances, 0.0), K.dtype(distances))
        distances = distances + mask * 1e-16
        distances = K.sqrt(distances)
        distances = distances * (1.0 - mask)
        return distances 
Example #11
Source File: cgcnn.py    From megnet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _mlp(self, input_, weights, bias):
        output = kb.dot(input_, weights) + bias
        return output 
Example #12
Source File: megnet.py    From megnet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _mlp(self, input_, weights, biases):
        if biases is None:
            biases = [0] * len(weights)
        act = input_
        for w, b in zip(weights, biases):
            output = kb.dot(act, w) + b
            act = self.activation(output)
        return output 
Example #13
Source File: schnet.py    From megnet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _mlp(self, input_, weights, bias):
        output = kb.dot(input_, weights) + bias
        return output 
Example #14
Source File: set2set.py    From megnet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def call(self, inputs, mask=None):
        features, feature_graph_index = inputs
        feature_graph_index = tf.reshape(feature_graph_index, (-1,))
        _, _, count = tf.unique_with_counts(feature_graph_index)
        m = kb.dot(features, self.m_weight)
        if self.use_bias:
            m += self.m_bias

        self.h = tf.zeros(tf.stack(
            [tf.shape(input=features)[0], tf.shape(input=count)[0], self.n_hidden]))
        self.c = tf.zeros(tf.stack(
            [tf.shape(input=features)[0], tf.shape(input=count)[0], self.n_hidden]))
        q_star = tf.zeros(tf.stack(
            [tf.shape(input=features)[0], tf.shape(input=count)[0], 2 * self.n_hidden]))
        for i in range(self.T):
            self.h, c = self._lstm(q_star, self.c)
            e_i_t = tf.reduce_sum(
                input_tensor=m * repeat_with_index(self.h, feature_graph_index), axis=-1)
            exp = tf.exp(e_i_t)
            # print('exp shape ', exp.shape)
            seg_sum = tf.transpose(
                a=tf.math.segment_sum(
                    tf.transpose(a=exp, perm=[1, 0]),
                    feature_graph_index),
                perm=[1, 0])
            seg_sum = tf.expand_dims(seg_sum, axis=-1)
            # print('seg_sum shape', seg_sum.shape)
            interm = repeat_with_index(seg_sum, feature_graph_index)
            # print('interm shape', interm.shape)
            a_i_t = exp / interm[..., 0]
            # print(a_i_t.shape)
            r_t = tf.transpose(a=tf.math.segment_sum(
                tf.transpose(a=tf.multiply(m, a_i_t[:, :, None]), perm=[1, 0, 2]),
                feature_graph_index), perm=[1, 0, 2])
            q_star = kb.concatenate([self.h, r_t], axis=-1)
        return q_star 
Example #15
Source File: test_save_load_model.py    From fancy-nlp with GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs, **kwargs):
        return K.dot(inputs, self.kernel) 
Example #16
Source File: attention.py    From fancy-nlp with GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs, mask=None):
        """
        convert to query, key, value vectors, shaped [batch_size*num_head, time_step, embed_dim]
        """
        multihead_query = K.concatenate(tf.split(K.dot(inputs, self.w_q),
                                                 self.num_heads, axis=2), axis=0)
        multihead_key = K.concatenate(tf.split(K.dot(inputs, self.w_k),
                                               self.num_heads, axis=2), axis=0)
        multihead_value = K.concatenate(tf.split(K.dot(inputs, self.w_v),
                                                 self.num_heads, axis=2), axis=0)

        """scaled dot product"""
        scaled = K.int_shape(inputs)[-1] ** -0.5
        attend = K.batch_dot(multihead_query, multihead_key, axes=2) * scaled
        # apply mask before normalization (softmax)
        if mask is not None:
            multihead_mask = K.tile(mask, [self.num_heads, 1])
            attend *= K.expand_dims(K.cast(multihead_mask, K.floatx()), 2)
            attend *= K.expand_dims(K.cast(multihead_mask, K.floatx()), 1)
        # normalization
        attend = attend / K.cast(K.sum(attend, axis=-1, keepdims=True) + K.epsilon(), K.floatx())
        # apply attention
        attend = K.batch_dot(attend, multihead_value, axes=(2, 1))
        attend = tf.concat(tf.split(attend, self.num_heads, axis=0), axis=2)
        attend = K.dot(attend, self.w_final)

        if self.residual:
            attend = attend + inputs
        if self.normalize:
            mean = K.mean(attend, axis=-1, keepdims=True)
            std = K.mean(attend, axis=-1, keepdims=True)
            attend = self.gamma * (attend - mean) / (std + K.epsilon()) + self.beta

        return attend 
Example #17
Source File: shapelets.py    From tslearn with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def call(self, x, **kwargs):
        # (x - y)^2 = x^2 + y^2 - 2 * x * y
        x_sq = K.expand_dims(K.sum(x ** 2, axis=2), axis=-1)
        y_sq = K.reshape(K.sum(self.kernel ** 2, axis=1),
                         (1, 1, self.n_shapelets))
        xy = K.dot(x, K.transpose(self.kernel))
        return (x_sq + y_sq - 2 * xy) / K.int_shape(self.kernel)[1] 
Example #18
Source File: noisy_dense.py    From tf2rl with MIT License 5 votes vote down vote up
def call(self, inputs):
        # Implement Eq.(9)
        perturbed_kernel = self.kernel + \
            self.sigma_kernel * K.random_uniform(shape=self.kernel_shape)
        outputs = K.dot(inputs, perturbed_kernel)
        if self.use_bias:
            perturbed_bias = self.bias + \
                self.sigma_bias * K.random_uniform(shape=self.bias_shape)
            outputs = K.bias_add(outputs, perturbed_bias)
        if self.activation is not None:
            outputs = self.activation(outputs)
        return outputs 
Example #19
Source File: topk_pool.py    From spektral with MIT License 5 votes vote down vote up
def compute_scores(self, X, A, I):
        return K.dot(X, K.l2_normalize(self.kernel)) 
Example #20
Source File: sag_pool.py    From spektral with MIT License 5 votes vote down vote up
def compute_scores(self, X, A, I):
        scores = K.dot(X, self.kernel)
        scores = ops.filter_dot(A, scores)
        return scores 
Example #21
Source File: base.py    From spektral with MIT License 5 votes vote down vote up
def call(self, inputs):
        F = K.int_shape(inputs)[-1]
        minkowski_prod_mat = np.eye(F)
        minkowski_prod_mat[-1, -1] = -1.
        minkowski_prod_mat = K.constant(minkowski_prod_mat)
        output = K.dot(inputs, minkowski_prod_mat)
        output = K.dot(output, K.transpose(inputs))
        output = K.clip(output, -10e9, -1.)

        if self.activation is not None:
            output = self.activation(output)

        return output 
Example #22
Source File: base.py    From spektral with MIT License 5 votes vote down vote up
def call(self, inputs):
        if self.trainable_kernel:
            output = K.dot(K.dot(inputs, self.kernel), K.transpose(inputs))
        else:
            output = K.dot(inputs, K.transpose(inputs))
        if self.activation is not None:
            output = self.activation(output)
        return output 
Example #23
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def call(self, inputs):
    """Execute this layer on input tensors.

    Parameters
    ----------
    inputs: list
      List of two tensors (X, Xp). X should be of shape (n_test,
      n_feat) and Xp should be of shape (n_support, n_feat) where
      n_test is the size of the test set, n_support that of the support
      set, and n_feat is the number of per-atom features.

    Returns
    -------
    list
      Returns two tensors of same shape as input. Namely the output
      shape will be [(n_test, n_feat), (n_support, n_feat)]
    """
    if len(inputs) != 2:
      raise ValueError("AttnLSTMEmbedding layer must have exactly two parents")
    # x is test set, xp is support set.
    x, xp = inputs

    # Get initializations
    q = self.q_init
    states = self.states_init

    for d in range(self.max_depth):
      # Process using attention
      # Eqn (4), appendix A.1 of Matching Networks paper
      e = _cosine_dist(x + q, xp)
      a = tf.nn.softmax(e)
      r = backend.dot(a, xp)

      # Generate new attention states
      y = backend.concatenate([q, r], axis=1)
      q, states = self.lstm([y] + states)
    return [x + q, xp] 
Example #24
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def _cosine_dist(x, y):
  """Computes the inner product (cosine distance) between two tensors.

  Parameters
  ----------
  x: tf.Tensor
    Input Tensor
  y: tf.Tensor
    Input Tensor
  """
  denom = (backend.sqrt(backend.sum(tf.square(x)) * backend.sum(tf.square(y))) +
           backend.epsilon())
  return backend.dot(x, tf.transpose(y)) / denom 
Example #25
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def call(self, inputs):
    """Execute this layer on input tensors.

    Parameters
    ----------
    inputs: list
      List of three tensors (x, h_tm1, c_tm1). h_tm1 means "h, t-1".

    Returns
    -------
    list
      Returns h, [h, c]
    """
    x, h_tm1, c_tm1 = inputs

    # Taken from Keras code [citation needed]
    z = backend.dot(x, self.W) + backend.dot(h_tm1, self.U) + self.b

    z0 = z[:, :self.output_dim]
    z1 = z[:, self.output_dim:2 * self.output_dim]
    z2 = z[:, 2 * self.output_dim:3 * self.output_dim]
    z3 = z[:, 3 * self.output_dim:]

    i = self.inner_activation_fn(z0)
    f = self.inner_activation_fn(z1)
    c = f * c_tm1 + i * self.activation_fn(z2)
    o = self.inner_activation_fn(z3)

    h = o * self.activation_fn(c)
    return h, [h, c] 
Example #26
Source File: filterbank.py    From kapre with MIT License 5 votes vote down vote up
def call(self, x):
        # reshape so that the last axis is freq axis
        if self.image_data_format == 'channels_first':
            x = K.permute_dimensions(x, [0, 1, 3, 2])
        else:
            x = K.permute_dimensions(x, [0, 3, 2, 1])
        output = K.dot(x, self.filterbank)
        # reshape back
        if self.image_data_format == 'channels_first':
            return K.permute_dimensions(output, [0, 1, 3, 2])
        else:
            return K.permute_dimensions(output, [0, 3, 2, 1]) 
Example #27
Source File: layers.py    From neuron with GNU General Public License v3.0 4 votes vote down vote up
def call(self, args):

        if not isinstance(args, (list, tuple)):
            args = [args]
        self.cargs = len(args)

        # flatten
        if len(args) == 2:  # input y, m
            # get inputs
            y, y_mask = args 
            a_fact = int(y.get_shape().as_list()[-1] / y_mask.get_shape().as_list()[-1])
            y_mask = K.repeat_elements(y_mask, a_fact, -1)
            y_flat = K.batch_flatten(y)  # N x D
            y_mask_flat = K.batch_flatten(y_mask)  # N x D

            # prepare switching matrix
            W = self.W # d x D

            w_tmp = K.expand_dims(W, 0)  # 1 x d x D
            Wo = K.permute_dimensions(w_tmp, [0, 2, 1]) * K.expand_dims(y_mask_flat, -1)  # N x D x d
            WoT = K.permute_dimensions(Wo, [0, 2, 1])    # N x d x D
            WotWo_inv = tf.matrix_inverse(K.batch_dot(WoT, Wo))  # N x d x d
            pre = K.batch_dot(WotWo_inv, WoT) # N x d x D
            res = K.batch_dot(pre, y_flat)  # N x d

            if self.use_bias:
                res += K.expand_dims(self.bias, 0)

        else:
            x_data = args[0]
            shape = K.shape(x_data)

            x_data = K.batch_flatten(x_data)  # N x d

            if self.use_bias:
                x_data -= self.bias

            res = K.dot(x_data, self.W)

            # reshape
            # Here you can mix integers and symbolic elements of `shape`
            pool_shape = tf.stack([shape[0], *self.orig_input_shape])
            res = K.reshape(res, pool_shape)

        return res 
Example #28
Source File: layers.py    From deepchem with MIT License 4 votes vote down vote up
def angular_symmetry(self, d_cutoff, d, atom_numbers, coordinates):
    """ Angular Symmetry Function """

    max_atoms = self.max_atoms
    embedding = tf.eye(np.max(self.atom_cases) + 1)
    atom_numbers_embedded = tf.nn.embedding_lookup(embedding, atom_numbers)

    Rs = np.linspace(0., self.angular_cutoff, self.angular_length)
    ita = 3 / (Rs[1] - Rs[0])**2
    thetas = np.linspace(0., np.pi, self.angular_length)
    zeta = float(self.angular_length**2)

    ita, zeta, Rs, thetas = np.meshgrid(ita, zeta, Rs, thetas)
    zeta = tf.cast(np.reshape(zeta, (1, 1, 1, 1, -1)), tf.float32)
    ita = tf.cast(np.reshape(ita, (1, 1, 1, 1, -1)), tf.float32)
    Rs = tf.cast(np.reshape(Rs, (1, 1, 1, 1, -1)), tf.float32)
    thetas = tf.cast(np.reshape(thetas, (1, 1, 1, 1, -1)), tf.float32)
    length = zeta.get_shape().as_list()[-1]

    # tf.stack issues again...
    vector_distances = tf.stack([coordinates] * max_atoms, 1) - tf.stack(
        [coordinates] * max_atoms, 2)
    R_ij = tf.stack([d] * max_atoms, axis=3)
    R_ik = tf.stack([d] * max_atoms, axis=2)
    f_R_ij = tf.stack([d_cutoff] * max_atoms, axis=3)
    f_R_ik = tf.stack([d_cutoff] * max_atoms, axis=2)

    # Define angle theta = arccos(R_ij(Vector) dot R_ik(Vector)/R_ij(distance)/R_ik(distance))
    vector_mul = tf.reduce_sum(tf.stack([vector_distances] * max_atoms, axis=3) * \
                               tf.stack([vector_distances] * max_atoms, axis=2), axis=4)
    vector_mul = vector_mul * tf.sign(f_R_ij) * tf.sign(f_R_ik)
    theta = tf.acos(tf.math.divide(vector_mul, R_ij * R_ik + 1e-5))

    R_ij = tf.stack([R_ij] * length, axis=4)
    R_ik = tf.stack([R_ik] * length, axis=4)
    f_R_ij = tf.stack([f_R_ij] * length, axis=4)
    f_R_ik = tf.stack([f_R_ik] * length, axis=4)
    theta = tf.stack([theta] * length, axis=4)

    out_tensor = tf.pow((1. + tf.cos(theta - thetas)) / 2., zeta) * \
                 tf.exp(-ita * tf.square((R_ij + R_ik) / 2. - Rs)) * f_R_ij * f_R_ik * 2

    if self.atomic_number_differentiated:
      out_tensors = []
      for id_j, atom_type_j in enumerate(self.atom_cases):
        for atom_type_k in self.atom_cases[id_j:]:
          selected_atoms = tf.stack([atom_numbers_embedded[:, :, atom_type_j]] * max_atoms, axis=2) * \
                           tf.stack([atom_numbers_embedded[:, :, atom_type_k]] * max_atoms, axis=1)
          selected_atoms = tf.expand_dims(
              tf.expand_dims(selected_atoms, axis=1), axis=4)
          out_tensors.append(
              tf.reduce_sum(out_tensor * selected_atoms, axis=(2, 3)))
      return tf.concat(out_tensors, axis=2)
    else:
      return tf.reduce_sum(out_tensor, axis=(2, 3)) 
Example #29
Source File: layers.py    From deepchem with MIT License 4 votes vote down vote up
def call(self, inputs):
    """Execute this layer on input tensors.

    Parameters
    ----------
    inputs: list
      List of two tensors (X, Xp). X should be of shape (n_test,
      n_feat) and Xp should be of shape (n_support, n_feat) where
      n_test is the size of the test set, n_support that of the
      support set, and n_feat is the number of per-atom features.

    Returns
    -------
    Returns two tensors of same shape as input. Namely the output
    shape will be [(n_test, n_feat), (n_support, n_feat)]
    """
    if len(inputs) != 2:
      raise ValueError(
          "IterRefLSTMEmbedding layer must have exactly two parents")
    x, xp = inputs

    # Get initializations
    p = self.p_init
    q = self.q_init
    # Rename support
    z = xp
    states = self.support_states_init
    x_states = self.test_states_init

    for d in range(self.max_depth):
      # Process support xp using attention
      e = _cosine_dist(z + q, xp)
      a = tf.nn.softmax(e)
      # Get linear combination of support set
      r = backend.dot(a, xp)

      # Process test x using attention
      x_e = _cosine_dist(x + p, z)
      x_a = tf.nn.softmax(x_e)
      s = backend.dot(x_a, z)

      # Generate new support attention states
      qr = backend.concatenate([q, r], axis=1)
      q, states = self.support_lstm([qr] + states)

      # Generate new test attention states
      ps = backend.concatenate([p, s], axis=1)
      p, x_states = self.test_lstm([ps] + x_states)

      # Redefine
      z = r

    return [x + p, xp + q]