Python tensorflow.keras.backend.batch_dot() Examples

The following are 12 code examples of tensorflow.keras.backend.batch_dot(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.backend , or try the search function .
Example #1
Source File: global_pool.py    From spektral with MIT License 6 votes vote down vote up
def call(self, inputs):
        if self.data_mode == 'disjoint':
            X, I = inputs
            if K.ndim(I) == 2:
                I = I[:, 0]
        else:
            X = inputs
        attn_coeff = K.dot(X, self.attn_kernel)
        attn_coeff = K.squeeze(attn_coeff, -1)
        attn_coeff = K.softmax(attn_coeff)
        if self.data_mode == 'single':
            output = K.dot(attn_coeff[None, ...], X)
        elif self.data_mode == 'batch':
            output = K.batch_dot(attn_coeff, X)
        else:
            output = attn_coeff[:, None] * X
            output = tf.math.segment_sum(output, I)

        return output 
Example #2
Source File: global_metrics.py    From ICCV2019-Horde with MIT License 6 votes vote down vote up
def _build_tf_cosine_similarity(max_rank=0, offset=1, eps=1e-12):
    # We build the graph (See utils.generic_utils.tf_recall_at_k for original implementation):
    tf_db = K.placeholder(ndim=2, dtype=K.floatx())  # Where to find
    tf_labels = K.placeholder(ndim=1, dtype=K.floatx())  # and their labels

    tf_batch_query = K.placeholder(ndim=2, dtype=K.floatx())  # Used in case of memory issues
    batch_labels = K.placeholder(ndim=2, dtype=K.floatx())  # and their labels

    all_representations_T = K.expand_dims(tf_db, axis=0)  # 1 x D x N
    batch_representations = K.expand_dims(tf_batch_query, axis=0)  # 1 x n x D
    sim = K.batch_dot(batch_representations, all_representations_T)  # 1 x n x N
    sim = K.squeeze(sim, axis=0)  # n x N
    sim /= tf.linalg.norm(tf_batch_query, axis=1, keepdims=True) + eps
    sim /= tf.linalg.norm(tf_db, axis=0, keepdims=True) + eps

    if max_rank > 0:  # computing r@K or mAP@K
        index_ranking = tf.nn.top_k(sim, k=max_rank + offset).indices
    else:
        index_ranking = tf.contrib.framework.argsort(sim, axis=-1, direction='DESCENDING', stable=True)

    top_k = index_ranking[:, offset:]
    tf_ranking = tf.gather(tf_labels, top_k)

    return tf_db, tf_labels, tf_batch_query, batch_labels, tf_ranking 
Example #3
Source File: keras_layers.py    From DeepPavlov with Apache License 2.0 6 votes vote down vote up
def call(self, x, **kwargs):
        assert isinstance(x, list)
        inp_a, inp_b = x
        last_state = K.expand_dims(inp_b[:, -1, :], 1)
        m = []
        for i in range(self.output_dim):
            outp_a = inp_a * self.W[i]
            outp_last = last_state * self.W[i]
            outp_a = K.l2_normalize(outp_a, -1)
            outp_last = K.l2_normalize(outp_last, -1)
            outp = K.batch_dot(outp_a, outp_last, axes=[2, 2])
            m.append(outp)
        if self.output_dim > 1:
            persp = K.concatenate(m, 2)
        else:
            persp = m[0]
        return [persp, persp] 
Example #4
Source File: keras_layers.py    From DeepPavlov with Apache License 2.0 6 votes vote down vote up
def call(self, x, **kwargs):
        assert isinstance(x, list)
        inp_a, inp_b = x
        m = []
        for i in range(self.output_dim):
            outp_a = inp_a * self.W[i]
            outp_b = inp_b * self.W[i]
            outp_a = K.l2_normalize(outp_a, -1)
            outp_b = K.l2_normalize(outp_b, -1)
            outp = K.batch_dot(outp_a, outp_b, axes=[2, 2])
            outp = K.max(outp, -1, keepdims=True)
            m.append(outp)
        if self.output_dim > 1:
            persp = K.concatenate(m, 2)
        else:
            persp = m[0]
        return [persp, persp] 
Example #5
Source File: keras_layers.py    From DeepPavlov with Apache License 2.0 6 votes vote down vote up
def call(self, x, **kwargs):
        assert isinstance(x, list)
        inp_a, inp_b = x

        outp_a = K.l2_normalize(inp_a, -1)
        outp_b = K.l2_normalize(inp_b, -1)
        alpha = K.batch_dot(outp_b, outp_a, axes=[1, 1])
        alpha = K.l2_normalize(alpha, 1)
        hmean = K.batch_dot(outp_b, alpha, axes=[2, 1])
        kcon = K.eye(K.int_shape(inp_a)[1], dtype='float32')

        m = []
        for i in range(self.output_dim):
            outp_a = inp_a * self.W[i]
            outp_hmean = hmean * self.W[i]
            outp_a = K.l2_normalize(outp_a, -1)
            outp_hmean = K.l2_normalize(outp_hmean, -1)
            outp = K.batch_dot(outp_hmean, outp_a, axes=[2, 2])
            outp = K.sum(outp * kcon, -1, keepdims=True)
            m.append(outp)
        if self.output_dim > 1:
            persp = K.concatenate(m, 2)
        else:
            persp = m[0]
        return [persp, persp] 
Example #6
Source File: layers.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def call(self, x):
        x_orig = x

        # x reshape
        this_bs_int = K.shape(x)[0]
        this_bs = tf.cast(this_bs_int, 'float32')  # this batch size
        prev_count = self.count
        x = K.batch_flatten(x)  # B x N

        # update mean
        new_mean, new_count = _mean_update(self.mean, self.count, x, self.cap)        

        # new C update. Should be B x N x N
        x = K.expand_dims(x, -1)
        C_delta = K.batch_dot(x, K.permute_dimensions(x, [0, 2, 1]))

        # update cov
        prev_cap = K.minimum(prev_count, self.cap)
        C = self.cov * (prev_cap - 1) + K.sum(C_delta, 0)
        new_cov = C / (prev_cap + this_bs - 1)

        # updates
        updates = [(self.count, new_count), (self.mean, new_mean), (self.cov, new_cov)]
        self.add_update(updates, x_orig)

        # prep for broadcasting :(
        p = tf.concat((K.reshape(this_bs_int, (1,)), K.shape(self.cov)), 0)
        z = tf.ones(p)

        return K.minimum(1., new_count/self.cap) * (z * K.expand_dims(new_cov, 0)) 
Example #7
Source File: iic-13.5.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 5 votes vote down vote up
def mi_loss(self, y_true, y_pred):
        """Mutual information loss computed from the joint
           distribution matrix and the marginals

        Arguments:
            y_true (tensor): Not used since this is
                unsupervised learning
            y_pred (tensor): stack of softmax predictions for
                the Siamese latent vectors (Z and Zbar)
        """
        size = self.args.batch_size
        n_labels = y_pred.shape[-1]
        # lower half is Z
        Z = y_pred[0: size, :]
        Z = K.expand_dims(Z, axis=2)
        # upper half is Zbar
        Zbar = y_pred[size: y_pred.shape[0], :]
        Zbar = K.expand_dims(Zbar, axis=1)
        # compute joint distribution (Eq 10.3.2 & .3)
        P = K.batch_dot(Z, Zbar)
        P = K.sum(P, axis=0)
        # enforce symmetric joint distribution (Eq 10.3.4)
        P = (P + K.transpose(P)) / 2.0
        # normalization of total probability to 1.0
        P = P / K.sum(P)
        # marginal distributions (Eq 10.3.5 & .6)
        Pi = K.expand_dims(K.sum(P, axis=1), axis=1)
        Pj = K.expand_dims(K.sum(P, axis=0), axis=0)
        Pi = K.repeat_elements(Pi, rep=n_labels, axis=1)
        Pj = K.repeat_elements(Pj, rep=n_labels, axis=0)
        P = K.clip(P, K.epsilon(), np.finfo(float).max)
        Pi = K.clip(Pi, K.epsilon(), np.finfo(float).max)
        Pj = K.clip(Pj, K.epsilon(), np.finfo(float).max)
        # negative MI loss (Eq 10.3.7)
        neg_mi = K.sum((P * (K.log(Pi) + K.log(Pj) - K.log(P))))
        # each head contribute 1/n_heads to the total loss
        return neg_mi/self.args.heads 
Example #8
Source File: attention.py    From fancy-nlp with GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs, mask=None):
        """
        convert to query, key, value vectors, shaped [batch_size*num_head, time_step, embed_dim]
        """
        multihead_query = K.concatenate(tf.split(K.dot(inputs, self.w_q),
                                                 self.num_heads, axis=2), axis=0)
        multihead_key = K.concatenate(tf.split(K.dot(inputs, self.w_k),
                                               self.num_heads, axis=2), axis=0)
        multihead_value = K.concatenate(tf.split(K.dot(inputs, self.w_v),
                                                 self.num_heads, axis=2), axis=0)

        """scaled dot product"""
        scaled = K.int_shape(inputs)[-1] ** -0.5
        attend = K.batch_dot(multihead_query, multihead_key, axes=2) * scaled
        # apply mask before normalization (softmax)
        if mask is not None:
            multihead_mask = K.tile(mask, [self.num_heads, 1])
            attend *= K.expand_dims(K.cast(multihead_mask, K.floatx()), 2)
            attend *= K.expand_dims(K.cast(multihead_mask, K.floatx()), 1)
        # normalization
        attend = attend / K.cast(K.sum(attend, axis=-1, keepdims=True) + K.epsilon(), K.floatx())
        # apply attention
        attend = K.batch_dot(attend, multihead_value, axes=(2, 1))
        attend = tf.concat(tf.split(attend, self.num_heads, axis=0), axis=2)
        attend = K.dot(attend, self.w_final)

        if self.residual:
            attend = attend + inputs
        if self.normalize:
            mean = K.mean(attend, axis=-1, keepdims=True)
            std = K.mean(attend, axis=-1, keepdims=True)
            attend = self.gamma * (attend - mean) / (std + K.epsilon()) + self.beta

        return attend 
Example #9
Source File: global_metrics.py    From ICCV2019-Horde with MIT License 5 votes vote down vote up
def _build_tf_l2_similarity(max_rank=0, offset=1):
    # We build the graph (See utils.generic_utils.tf_recall_at_k for original implementation):
    tf_db = K.placeholder(ndim=2, dtype=K.floatx())  # Where to find
    tf_labels = K.placeholder(ndim=1, dtype=K.floatx())  # and their labels

    tf_batch_query = K.placeholder(ndim=2, dtype=K.floatx())  # Used in case of memory issues
    batch_labels = K.placeholder(ndim=2, dtype=K.floatx())  # and their labels

    all_representations_T = K.expand_dims(tf_db, axis=0)  # 1 x D x N
    batch_representations = K.expand_dims(tf_batch_query, axis=0)  # 1 x n x D
    dist = -2. * K.batch_dot(batch_representations, all_representations_T)  # 1 x n x N
    dist = K.squeeze(dist, axis=0)  # n x N
    dist += K.sum(tf_batch_query * tf_batch_query, axis=1, keepdims=True)
    dist += K.sum(tf_db * tf_db, axis=0, keepdims=True)

    if max_rank > 0:  # computing r@K or mAP@K
        # top_k finds the k greatest entries and we want the lowest. Note that distance with itself will be last ranked
        dist = -dist
        index_ranking = tf.nn.top_k(dist, k=max_rank + offset).indices
    else:
        index_ranking = tf.contrib.framework.argsort(dist, axis=-1, direction='ASCENDING', stable=True)

    index_ranking = index_ranking[:, offset:]

    tf_ranking = tf.gather(tf_labels, index_ranking)

    return tf_db, tf_labels, tf_batch_query, batch_labels, tf_ranking 
Example #10
Source File: bilstm_siamese_network.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def _pairwise_distances(self, inputs: List[Tensor]) -> Tensor:
        emb_c, emb_r = inputs
        bs = K.shape(emb_c)[0]
        embeddings = K.concatenate([emb_c, emb_r], 0)
        dot_product = K.dot(embeddings, K.transpose(embeddings))
        square_norm = K.batch_dot(embeddings, embeddings, axes=1)
        distances = K.transpose(square_norm) - 2.0 * dot_product + square_norm
        distances = distances[0:bs, bs:bs+bs]
        distances = K.clip(distances, 0.0, None)
        mask = K.cast(K.equal(distances, 0.0), K.dtype(distances))
        distances = distances + mask * 1e-16
        distances = K.sqrt(distances)
        distances = distances * (1.0 - mask)
        return distances 
Example #11
Source File: network.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def biaffine_layer(deps: tf.Tensor, heads: tf.Tensor, deps_dim: int,
                   heads_dim: int, output_dim: int, name: str = "biaffine_layer") -> tf.Tensor:
    """Implements a biaffine layer from [Dozat, Manning, 2016].

    Args:
        deps: the 3D-tensor of dependency states,
        heads: the 3D-tensor of head states,
        deps_dim: the dimension of dependency states,
        heads_dim: the dimension of head_states,
        output_dim: the output dimension
        name: the name of a layer

    Returns:
        `answer` the output 3D-tensor

    """
    input_shape = [kb.shape(deps)[i] for i in range(tf.keras.backend.ndim(deps))]
    first_input = tf.reshape(deps, [-1, deps_dim])  # first_input.shape = (B*L, D1)
    second_input = tf.reshape(heads, [-1, heads_dim])  # second_input.shape = (B*L, D2)
    with tf.variable_scope(name):
        kernel_shape = (deps_dim, heads_dim * output_dim)
        kernel = tf.get_variable('kernel', shape=kernel_shape, initializer=xavier_initializer())
        first = tf.matmul(first_input, kernel)  # (B*L, D2*H)
        first = tf.reshape(first, [-1, heads_dim, output_dim])  # (B*L, D2, H)
        answer = kb.batch_dot(first, second_input, axes=[1, 1])  # (B*L, H)
        first_bias = tf.get_variable('first_bias', shape=(deps_dim, output_dim),
                                     initializer=xavier_initializer())
        answer += tf.matmul(first_input, first_bias)
        second_bias = tf.get_variable('second_bias', shape=(heads_dim, output_dim),
                                      initializer=xavier_initializer())
        answer += tf.matmul(second_input, second_bias)
        label_bias = tf.get_variable('label_bias', shape=(output_dim,),
                                     initializer=xavier_initializer())
        answer = kb.bias_add(answer, label_bias)
        answer = tf.reshape(answer, input_shape[:-1] + [output_dim])  # (B, L, H)
    return answer 
Example #12
Source File: layers.py    From neuron with GNU General Public License v3.0 4 votes vote down vote up
def call(self, args):

        if not isinstance(args, (list, tuple)):
            args = [args]
        self.cargs = len(args)

        # flatten
        if len(args) == 2:  # input y, m
            # get inputs
            y, y_mask = args 
            a_fact = int(y.get_shape().as_list()[-1] / y_mask.get_shape().as_list()[-1])
            y_mask = K.repeat_elements(y_mask, a_fact, -1)
            y_flat = K.batch_flatten(y)  # N x D
            y_mask_flat = K.batch_flatten(y_mask)  # N x D

            # prepare switching matrix
            W = self.W # d x D

            w_tmp = K.expand_dims(W, 0)  # 1 x d x D
            Wo = K.permute_dimensions(w_tmp, [0, 2, 1]) * K.expand_dims(y_mask_flat, -1)  # N x D x d
            WoT = K.permute_dimensions(Wo, [0, 2, 1])    # N x d x D
            WotWo_inv = tf.matrix_inverse(K.batch_dot(WoT, Wo))  # N x d x d
            pre = K.batch_dot(WotWo_inv, WoT) # N x d x D
            res = K.batch_dot(pre, y_flat)  # N x d

            if self.use_bias:
                res += K.expand_dims(self.bias, 0)

        else:
            x_data = args[0]
            shape = K.shape(x_data)

            x_data = K.batch_flatten(x_data)  # N x d

            if self.use_bias:
                x_data -= self.bias

            res = K.dot(x_data, self.W)

            # reshape
            # Here you can mix integers and symbolic elements of `shape`
            pool_shape = tf.stack([shape[0], *self.orig_input_shape])
            res = K.reshape(res, pool_shape)

        return res