Python keras.backend.batch_dot() Examples

The following are 30 code examples of keras.backend.batch_dot(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_linear_operations(self):
        check_two_tensor_operation('dot', (4, 2), (2, 4), BACKENDS)
        check_two_tensor_operation('dot', (4, 2), (5, 2, 3), BACKENDS)

        check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 5, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(2, 2))
        check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(2, 1))
        check_two_tensor_operation('batch_dot', (4, 2), (4, 2, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))
        check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
                                   BACKENDS, cntk_two_dynamicity=True, axes=1)
        check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))

        check_single_tensor_operation('transpose', (4, 2), BACKENDS)
        check_single_tensor_operation('reverse', (4, 3, 2), BACKENDS, axes=1)
        check_single_tensor_operation('reverse', (4, 3, 2), [KTH, KTF], axes=(1, 2)) 
Example #2
Source File: capsule.py    From CapsNet with MIT License 6 votes vote down vote up
def call(self, inputs, **kwargs):
        # (batch_size, 1, input_num_capsule, input_dim_capsule)
        expand_inputs = K.expand_dims(inputs, axis=1)
        # (batch_size, num_capsule, input_num_capsule, input_dim_capsule)
        expand_inputs = K.tile(expand_inputs, (1, self.num_capsule, 1, 1))
        # (batch_size, num_capsule, input_num_capsule, dim_capsule)
        u_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, axes=[2, 3]), expand_inputs)

        if self.num_routing <= 0:
            self.num_routing = 3
        # (batch_size, num_capsule, input_num_capsule)
        b = K.zeros((K.shape(u_hat)[0], self.num_capsule, self.input_num_capsule))
        for i in xrange(self.num_routing):
            # (batch_size, num_capsule, input_num_capsule)
            c = softmax(b, axis=1)
            # (batch_size, num_capsule, dim_capsule)
            s = K.batch_dot(c, u_hat, axes=[2, 2])
            squashed_s = squash(s)
            if i < self.num_routing - 1:
                # (batch_size, num_capsule, input_num_capsule)
                b += K.batch_dot(squashed_s, u_hat, axes=[2, 3])
        return squashed_s 
Example #3
Source File: layers.py    From DeepLearn with MIT License 6 votes vote down vote up
def call(self , x, mask=None):
        
        e1=x[0].T
        e2=x[1].T
        
        batch_size = K.shape(x[0])[0]
        sim = []
        V_out = K.dot(self.V, K.concatenate([e1,e2],axis=0))     

        for i in range(self.k): 
            temp = K.batch_dot(K.dot(e1.T,self.W[i,:,:]),e2.T,axes=1)
            sim.append(temp)
        sim=K.reshape(sim,(self.k,batch_size))

        tensor_bi_product = self.activation(V_out+sim)
        tensor_bi_product = K.dot(self.U.T, tensor_bi_product).T

        return tensor_bi_product 
Example #4
Source File: capsule.py    From nlp_research with MIT License 6 votes vote down vote up
def routing(u_hat_vecs, beta_a, iterations, output_capsule_num, i_activations):
    b = keras.backend.zeros_like(u_hat_vecs[:,:,:,0])
    if i_activations is not None:
        i_activations = i_activations[...,tf.newaxis]
    for i in range(iterations):
        if False:
            leak = tf.zeros_like(b, optimize=True)
            leak = tf.reduce_sum(leak, axis=1, keep_dims=True)
            leaky_logits = tf.concat([leak, b], axis=1)
            leaky_routing = tf.nn.softmax(leaky_logits, dim=1)
            c = tf.split(leaky_routing, [1, output_capsule_num], axis=1)[1]
        else:
            c = softmax(b, 1)
#        if i_activations is not None:
#            tf.transpose(tf.transpose(c, perm=[0,2,1]) * i_activations, perm=[0,2,1])
        outputs = squash_v1(K.batch_dot(c, u_hat_vecs, [2, 2]))
        if i < iterations - 1:
            b = b + K.batch_dot(outputs, u_hat_vecs, [2, 3])
    poses = outputs
    activations = K.sqrt(K.sum(K.square(poses), 2))
    return poses, activations 
Example #5
Source File: capsulelayers.py    From textcaps with MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        inputs_expand = K.expand_dims(inputs, 1)
        
        inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])
        
        if(self.channels!=0):
            W2 = K.repeat_elements(self.W,int(self.input_num_capsule/self.channels),1)
        else:
            W2 = self.W
            
        inputs_hat = K.map_fn(lambda x: K.batch_dot(x, W2, [2, 3]) , elems=inputs_tiled)

        b = tf.zeros(shape=[K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule])

        assert self.routings > 0, 'The routings should be > 0.'
        for i in range(self.routings):

            c = tf.nn.softmax(b, dim=1)
            outputs = squash(K.batch_dot(c, inputs_hat, [2, 2])+ self.B)

            if i < self.routings - 1:
                b += K.batch_dot(outputs, inputs_hat, [2, 3])

        return outputs 
Example #6
Source File: models.py    From DigiX_HuaWei_Population_Age_Attribution_Predict with MIT License 6 votes vote down vote up
def call(self, u_vecs):
        if self.share_weights:
            u_hat_vecs = K.conv1d(u_vecs, self.W)
        else:
            u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])

        batch_size = K.shape(u_vecs)[0]
        input_num_capsule = K.shape(u_vecs)[1]
        u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
                                            self.num_capsule, self.dim_capsule))
        u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))

        b = K.zeros_like(u_hat_vecs[:, :, :, 0])  # shape = [None, num_capsule, input_num_capsule]
        for i in range(self.routings):
            b = K.permute_dimensions(b, (0, 2, 1))  # shape = [None, input_num_capsule, num_capsule]
            c = K.softmax(b)
            c = K.permute_dimensions(c, (0, 2, 1))
            b = K.permute_dimensions(b, (0, 2, 1))
            outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))
            if i < self.routings - 1:
                b = K.batch_dot(outputs, u_hat_vecs, [2, 3])

        return outputs 
Example #7
Source File: scale_dot_product_attention.py    From Keras-TextClassification with MIT License 6 votes vote down vote up
def call(self, inputs, mask=None, **kwargs):
        if isinstance(inputs, list):
            query, key, value = inputs
        else:
            query = key = value = inputs
        if isinstance(mask, list):
            mask = mask[1]
        feature_dim = K.shape(query)[-1]
        e = K.batch_dot(query, key, axes=2) / K.sqrt(K.cast(feature_dim, dtype=K.floatx()))
        e = K.exp(e - K.max(e, axis=-1, keepdims=True))
        if self.history_only:
            query_len, key_len = K.shape(query)[1], K.shape(key)[1]
            indices = K.tile(K.expand_dims(K.arange(key_len), axis=0), [query_len, 1])
            upper = K.expand_dims(K.arange(key_len), axis=-1)
            e *= K.expand_dims(K.cast(indices <= upper, K.floatx()), axis=0)
        if mask is not None:
            e *= K.cast(K.expand_dims(mask, axis=-2), K.floatx())
        a = e / (K.sum(e, axis=-1, keepdims=True) + K.epsilon())
        v = K.batch_dot(a, value)
        if self.return_attention:
            return [v, a]
        return v 
Example #8
Source File: multi_dim_attention.py    From nlp_toolkit with MIT License 6 votes vote down vote up
def call(self, x, mask=None):
        uit = K.tanh(K.dot(x, self.Ws1))
        ait = K.dot(uit, self.Ws2)
        ait = K.permute_dimensions(ait, (0, 2, 1))
        A = K.softmax(ait, axis=1)
        M = K.batch_dot(A, x)
        if self.punish:
            A_T = K.permute_dimensions(A, (0, 2, 1))
            tile_eye = K.tile(K.eye(self.weight_ws2), [self.batch_size, 1])
            tile_eye = K.reshape(
                tile_eye, shape=[-1, self.weight_ws2, self.weight_ws2])
            AA_T = K.batch_dot(A, A_T) - tile_eye
            P = K.l2_normalize(AA_T, axis=(1, 2))
            return M, P
        else:
            return M 
Example #9
Source File: capsule.py    From Keras-TextClassification with MIT License 6 votes vote down vote up
def call(self, u_vecs):
        if self.share_weights:
            u_hat_vecs = K.conv1d(u_vecs, self.W)
        else:
            u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])

        batch_size = K.shape(u_vecs)[0]
        input_num_capsule = K.shape(u_vecs)[1]
        u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
                                            self.num_capsule, self.dim_capsule))
        u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
        # final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule]

        b = K.zeros_like(u_hat_vecs[:, :, :, 0])  # shape = [None, num_capsule, input_num_capsule]
        outputs = None
        for i in range(self.routings):
            b = K.permute_dimensions(b, (0, 2, 1))  # shape = [None, input_num_capsule, num_capsule]
            c = K.softmax(b)
            c = K.permute_dimensions(c, (0, 2, 1))
            b = K.permute_dimensions(b, (0, 2, 1))
            outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))
            if i < self.routings - 1:
                b = K.batch_dot(outputs, u_hat_vecs, [2, 3])

        return outputs 
Example #10
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_linear_operations(self):
        check_two_tensor_operation('dot', (4, 2), (2, 4), BACKENDS)
        check_two_tensor_operation('dot', (4, 2), (5, 2, 3), BACKENDS)

        check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 5, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(2, 2))
        check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(2, 1))
        check_two_tensor_operation('batch_dot', (4, 2), (4, 2, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))
        check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
                                   BACKENDS, cntk_two_dynamicity=True, axes=1)
        check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))

        check_single_tensor_operation('transpose', (4, 2), BACKENDS)
        check_single_tensor_operation('reverse', (4, 3, 2), BACKENDS, axes=1)
        check_single_tensor_operation('reverse', (4, 3, 2), [KTH, KTF], axes=(1, 2)) 
Example #11
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_linear_operations(self):
        check_two_tensor_operation('dot', (4, 2), (2, 4), BACKENDS)
        check_two_tensor_operation('dot', (4, 2), (5, 2, 3), BACKENDS)

        check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 5, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(2, 2))
        check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(2, 1))
        check_two_tensor_operation('batch_dot', (4, 2), (4, 2, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))
        check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
                                   BACKENDS, cntk_two_dynamicity=True, axes=1)
        check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))

        check_single_tensor_operation('transpose', (4, 2), BACKENDS)
        check_single_tensor_operation('reverse', (4, 3, 2), BACKENDS, axes=1)
        check_single_tensor_operation('reverse', (4, 3, 2), [KTH, KTF], axes=(1, 2)) 
Example #12
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_linear_operations(self):
        check_two_tensor_operation('dot', (4, 2), (2, 4), BACKENDS)
        check_two_tensor_operation('dot', (4, 2), (5, 2, 3), BACKENDS)

        check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 5, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(2, 2))
        check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(2, 1))
        check_two_tensor_operation('batch_dot', (4, 2), (4, 2, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))
        check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
                                   BACKENDS, cntk_two_dynamicity=True, axes=1)
        check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))

        check_single_tensor_operation('transpose', (4, 2), BACKENDS)
        check_single_tensor_operation('reverse', (4, 3, 2), BACKENDS, axes=1)
        check_single_tensor_operation('reverse', (4, 3, 2), [KTH, KTF], axes=(1, 2)) 
Example #13
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_linear_operations(self):
        check_two_tensor_operation('dot', (4, 2), (2, 4), BACKENDS)
        check_two_tensor_operation('dot', (4, 2), (5, 2, 3), BACKENDS)

        check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 5, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(2, 2))
        check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(2, 1))
        check_two_tensor_operation('batch_dot', (4, 2), (4, 2, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))
        check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
                                   BACKENDS, cntk_two_dynamicity=True, axes=1)
        check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))

        check_single_tensor_operation('transpose', (4, 2), BACKENDS)
        check_single_tensor_operation('reverse', (4, 3, 2), BACKENDS, axes=1)
        check_single_tensor_operation('reverse', (4, 3, 2), [KTH, KTF], axes=(1, 2)) 
Example #14
Source File: ntm.py    From ntm_keras with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _get_weight_vector(self, M, w_tm1, k, beta, g, s, gamma):
#        M = tf.Print(M, [M, w_tm1, k], message='get weights beg1: ')
#        M = tf.Print(M, [beta, g, s, gamma], message='get weights beg2: ')
        # Content adressing, see Chapter 3.3.1:
        num = beta * _cosine_distance(M, k)
        w_c  = K.softmax(num) # It turns out that equation (5) is just softmax.
        # Location adressing, see Chapter 3.3.2:
        # Equation 7:
        w_g = (g * w_c) + (1-g)*w_tm1
        # C_s is the circular convolution
        #C_w = K.sum((self.C[None, :, :, :] * w_g[:, None, None, :]),axis=3)
        # Equation 8:
        # TODO: Explain
        C_s = K.sum(K.repeat_elements(self.C[None, :, :, :], self.batch_size, axis=0) * s[:,:,None,None], axis=1)
        w_tilda = K.batch_dot(C_s, w_g)
        # Equation 9:
        w_out = _renorm(w_tilda ** gamma)

        return w_out 
Example #15
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_linear_operations(self):
        check_two_tensor_operation('dot', (4, 2), (2, 4), BACKENDS)
        check_two_tensor_operation('dot', (4, 2), (5, 2, 3), BACKENDS)

        check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 5, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(2, 2))
        check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(2, 1))
        check_two_tensor_operation('batch_dot', (4, 2), (4, 2, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))
        check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
                                   BACKENDS, cntk_two_dynamicity=True, axes=1)
        check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))

        check_single_tensor_operation('transpose', (4, 2), BACKENDS)
        check_single_tensor_operation('reverse', (4, 3, 2), BACKENDS, axes=1)
        check_single_tensor_operation('reverse', (4, 3, 2), [KTH, KTF], axes=(1, 2)) 
Example #16
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_linear_operations(self):
        check_two_tensor_operation('dot', (4, 2), (2, 4), BACKENDS)
        check_two_tensor_operation('dot', (4, 2), (5, 2, 3), BACKENDS)

        check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 5, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(2, 2))
        check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(2, 1))
        check_two_tensor_operation('batch_dot', (4, 2), (4, 2, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))
        check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
                                   BACKENDS, cntk_two_dynamicity=True, axes=1)
        check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))

        check_single_tensor_operation('transpose', (4, 2), BACKENDS)
        check_single_tensor_operation('reverse', (4, 3, 2), BACKENDS, axes=1)
        check_single_tensor_operation('reverse', (4, 3, 2), [KTH, KTF], axes=(1, 2)) 
Example #17
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_linear_operations(self):
        check_two_tensor_operation('dot', (4, 2), (2, 4), BACKENDS)
        check_two_tensor_operation('dot', (4, 2), (5, 2, 3), BACKENDS)

        check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 5, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(2, 2))
        check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(2, 1))
        check_two_tensor_operation('batch_dot', (4, 2), (4, 2, 3),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))
        check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
                                   BACKENDS, cntk_two_dynamicity=True, axes=1)
        check_two_tensor_operation('batch_dot', (32, 20), (32, 20),
                                   BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))

        check_single_tensor_operation('transpose', (4, 2), BACKENDS)
        check_single_tensor_operation('reverse', (4, 3, 2), BACKENDS, axes=1)
        check_single_tensor_operation('reverse', (4, 3, 2), [KTH, KTF], axes=(1, 2)) 
Example #18
Source File: QnARecurAtteLatest3Attenenhance.py    From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License 5 votes vote down vote up
def bd2(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[2,1])
    return result 
Example #19
Source File: QnARecurAtteLatest3Attenenhance.py    From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License 5 votes vote down vote up
def bd(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,1])
    return result 
Example #20
Source File: QnARecurAtteLatest3Attenenhance.py    From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License 5 votes vote down vote up
def bd3(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,2])
    return result 
Example #21
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_batch_dot_shape(self):
        x_batch = K.ones(shape=(32, 20))
        y_batch = K.ones(shape=(32, 20))
        xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=1)
        assert_allclose(K.eval(xy_batch_dot), np.ones((32, 1)) * 20, atol=1e-05)
        xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=0)
        assert_allclose(K.eval(xy_batch_dot), np.ones((20, 1)) * 32, atol=1e-05)
        # making sure swapping axes when ndim == 2 works
        x_batch = K.ones(shape=(32, 20))
        y_batch = K.ones(shape=(20, 32))
        xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=(0, 1))
        assert_allclose(K.eval(xy_batch_dot), np.ones((20, 1)) * 32, atol=1e-05)
        xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=(1, 0))
        assert_allclose(K.eval(xy_batch_dot), np.ones((32, 1)) * 20, atol=1e-05) 
Example #22
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_batch_dot_shape(self):
        x_batch = K.ones(shape=(32, 20))
        y_batch = K.ones(shape=(32, 20))
        xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=1)
        assert_allclose(K.eval(xy_batch_dot), np.ones((32, 1)) * 20, atol=1e-05)
        xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=0)
        assert_allclose(K.eval(xy_batch_dot), np.ones((20, 1)) * 32, atol=1e-05)
        # making sure swapping axes when ndim == 2 works
        x_batch = K.ones(shape=(32, 20))
        y_batch = K.ones(shape=(20, 32))
        xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=(0, 1))
        assert_allclose(K.eval(xy_batch_dot), np.ones((20, 1)) * 32, atol=1e-05)
        xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=(1, 0))
        assert_allclose(K.eval(xy_batch_dot), np.ones((32, 1)) * 20, atol=1e-05) 
Example #23
Source File: QnARecurAtteLatest3Attenenhance.py    From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License 5 votes vote down vote up
def bd4(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[2,2])
    return result 
Example #24
Source File: cifar10_cnn_capsule.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def call(self, inputs):
        """Following the routing algorithm from Hinton's paper,
        but replace b = b + <u,v> with b = <u,v>.

        This change can improve the feature representation of Capsule.

        However, you can replace
            b = K.batch_dot(outputs, hat_inputs, [2, 3])
        with
            b += K.batch_dot(outputs, hat_inputs, [2, 3])
        to realize a standard routing.
        """

        if self.share_weights:
            hat_inputs = K.conv1d(inputs, self.kernel)
        else:
            hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1])

        batch_size = K.shape(inputs)[0]
        input_num_capsule = K.shape(inputs)[1]
        hat_inputs = K.reshape(hat_inputs,
                               (batch_size, input_num_capsule,
                                self.num_capsule, self.dim_capsule))
        hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3))

        b = K.zeros_like(hat_inputs[:, :, :, 0])
        for i in range(self.routings):
            c = softmax(b, 1)
            if K.backend() == 'theano':
                o = K.sum(o, axis=1)
            o = self.activation(K.batch_dot(c, hat_inputs, [2, 2]))
            if i < self.routings - 1:
                b = K.batch_dot(o, hat_inputs, [2, 3])
                if K.backend() == 'theano':
                    o = K.sum(o, axis=1)

        return o 
Example #25
Source File: QnARecurAtteLatest.py    From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License 5 votes vote down vote up
def bd3(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,2])
    return result 
Example #26
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_batch_dot_shape(self):
        x_batch = K.ones(shape=(32, 20))
        y_batch = K.ones(shape=(32, 20))
        xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=1)
        assert_allclose(K.eval(xy_batch_dot), np.ones((32, 1)) * 20, atol=1e-05)
        xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=0)
        assert_allclose(K.eval(xy_batch_dot), np.ones((20, 1)) * 32, atol=1e-05)
        # making sure swapping axes when ndim == 2 works
        x_batch = K.ones(shape=(32, 20))
        y_batch = K.ones(shape=(20, 32))
        xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=(0, 1))
        assert_allclose(K.eval(xy_batch_dot), np.ones((20, 1)) * 32, atol=1e-05)
        xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=(1, 0))
        assert_allclose(K.eval(xy_batch_dot), np.ones((32, 1)) * 20, atol=1e-05) 
Example #27
Source File: cifar10_cnn_capsule.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def call(self, inputs):
        """Following the routing algorithm from Hinton's paper,
        but replace b = b + <u,v> with b = <u,v>.

        This change can improve the feature representation of Capsule.

        However, you can replace
            b = K.batch_dot(outputs, hat_inputs, [2, 3])
        with
            b += K.batch_dot(outputs, hat_inputs, [2, 3])
        to realize a standard routing.
        """

        if self.share_weights:
            hat_inputs = K.conv1d(inputs, self.kernel)
        else:
            hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1])

        batch_size = K.shape(inputs)[0]
        input_num_capsule = K.shape(inputs)[1]
        hat_inputs = K.reshape(hat_inputs,
                               (batch_size, input_num_capsule,
                                self.num_capsule, self.dim_capsule))
        hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3))

        b = K.zeros_like(hat_inputs[:, :, :, 0])
        for i in range(self.routings):
            c = softmax(b, 1)
            if K.backend() == 'theano':
                o = K.sum(o, axis=1)
            o = self.activation(K.batch_dot(c, hat_inputs, [2, 2]))
            if i < self.routings - 1:
                b = K.batch_dot(o, hat_inputs, [2, 3])
                if K.backend() == 'theano':
                    o = K.sum(o, axis=1)

        return o 
Example #28
Source File: QnARecurAtteLatest.py    From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License 5 votes vote down vote up
def bd2(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[2,1])
    return result 
Example #29
Source File: cifar10_cnn_capsule.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def call(self, inputs):
        """Following the routing algorithm from Hinton's paper,
        but replace b = b + <u,v> with b = <u,v>.

        This change can improve the feature representation of Capsule.

        However, you can replace
            b = K.batch_dot(outputs, hat_inputs, [2, 3])
        with
            b += K.batch_dot(outputs, hat_inputs, [2, 3])
        to realize a standard routing.
        """

        if self.share_weights:
            hat_inputs = K.conv1d(inputs, self.kernel)
        else:
            hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1])

        batch_size = K.shape(inputs)[0]
        input_num_capsule = K.shape(inputs)[1]
        hat_inputs = K.reshape(hat_inputs,
                               (batch_size, input_num_capsule,
                                self.num_capsule, self.dim_capsule))
        hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3))

        b = K.zeros_like(hat_inputs[:, :, :, 0])
        for i in range(self.routings):
            c = softmax(b, 1)
            if K.backend() == 'theano':
                o = K.sum(o, axis=1)
            o = self.activation(K.batch_dot(c, hat_inputs, [2, 2]))
            if i < self.routings - 1:
                b = K.batch_dot(o, hat_inputs, [2, 3])
                if K.backend() == 'theano':
                    o = K.sum(o, axis=1)

        return o 
Example #30
Source File: cifar10_cnn_capsule.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def call(self, inputs):
        """Following the routing algorithm from Hinton's paper,
        but replace b = b + <u,v> with b = <u,v>.

        This change can improve the feature representation of Capsule.

        However, you can replace
            b = K.batch_dot(outputs, hat_inputs, [2, 3])
        with
            b += K.batch_dot(outputs, hat_inputs, [2, 3])
        to realize a standard routing.
        """

        if self.share_weights:
            hat_inputs = K.conv1d(inputs, self.kernel)
        else:
            hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1])

        batch_size = K.shape(inputs)[0]
        input_num_capsule = K.shape(inputs)[1]
        hat_inputs = K.reshape(hat_inputs,
                               (batch_size, input_num_capsule,
                                self.num_capsule, self.dim_capsule))
        hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3))

        b = K.zeros_like(hat_inputs[:, :, :, 0])
        for i in range(self.routings):
            c = softmax(b, 1)
            if K.backend() == 'theano':
                o = K.sum(o, axis=1)
            o = self.activation(K.batch_dot(c, hat_inputs, [2, 2]))
            if i < self.routings - 1:
                b = K.batch_dot(o, hat_inputs, [2, 3])
                if K.backend() == 'theano':
                    o = K.sum(o, axis=1)

        return o