Python keras.backend.shape() Examples

The following are 30 code examples for showing how to use keras.backend.shape(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras.backend , or try the search function .

Example 1
Project: models   Author: kipoi   File: model.py    License: MIT License 6 votes vote down vote up
def predict_on_batch(self, inputs):
            if inputs.shape == (2,):
                inputs = inputs[np.newaxis, :]
            # Encode
            max_len = len(max(inputs, key=len))
            one_hot_ref =  self.encode(inputs[:,0])
            one_hot_alt = self.encode(inputs[:,1])
            # Construct dummy library indicator
            indicator = np.zeros((inputs.shape[0],2))
            indicator[:,1] = 1
            # Compute fold change for all three frames
            fc_changes = []
            for shift in range(3):
                if shift > 0:
                    shifter = np.zeros((one_hot_ref.shape[0],1,4))
                    one_hot_ref = np.concatenate([one_hot_ref, shifter], axis=1)
                    one_hot_alt = np.concatenate([one_hot_alt, shifter], axis=1)
                pred_ref = self.model.predict_on_batch([one_hot_ref, indicator]).reshape(-1)
                pred_variant = self.model.predict_on_batch([one_hot_alt, indicator]).reshape(-1)
                fc_changes.append(np.log2(pred_variant/pred_ref))
            # Return
            return {"mrl_fold_change":fc_changes[0], 
                    "shift_1":fc_changes[1],
                    "shift_2":fc_changes[2]} 
Example 2
Project: keras-utility-layer-collection   Author: zimmerrol   File: attention.py    License: MIT License 6 votes vote down vote up
def build(self, input_shape):
        self._validate_input_shape(input_shape)

        self.input_spec = InputSpec(shape=input_shape)
        
        if not self.layer.built:
            self.layer.build(input_shape)
            self.layer.built = True
            
        input_dim = input_shape[-1]

        if self.layer.return_sequences:
            output_dim = self.layer.compute_output_shape(input_shape)[0][-1]
        else:
            output_dim = self.layer.compute_output_shape(input_shape)[-1]
      
        self._W1 = self.add_weight(shape=(input_dim, input_dim), name="{}_W1".format(self.name), initializer=self.weight_initializer)
        self._W2 = self.add_weight(shape=(output_dim, input_dim), name="{}_W2".format(self.name), initializer=self.weight_initializer)
        self._W3 = self.add_weight(shape=(2*input_dim, input_dim), name="{}_W3".format(self.name), initializer=self.weight_initializer)
        self._b2 = self.add_weight(shape=(input_dim,), name="{}_b2".format(self.name), initializer=self.weight_initializer)
        self._b3 = self.add_weight(shape=(input_dim,), name="{}_b3".format(self.name), initializer=self.weight_initializer)
        self._V = self.add_weight(shape=(input_dim,1), name="{}_V".format(self.name), initializer=self.weight_initializer)
        
        super(AttentionRNNWrapper, self).build() 
Example 3
Project: keras-utility-layer-collection   Author: zimmerrol   File: attention.py    License: MIT License 6 votes vote down vote up
def step(self, x, states):   
        h = states[0]
        # states[1] necessary?

        # equals K.dot(X, self._W1) + self._b2 with X.shape=[bs, T, input_dim]
        total_x_prod = states[-1]
        # comes from the constants (equals the input sequence)
        X = states[-2]
        
        # expand dims to add the vector which is only valid for this time step
        # to total_x_prod which is valid for all time steps
        hw = K.expand_dims(K.dot(h, self._W2), 1)
        additive_atn = total_x_prod + hw
        attention = K.softmax(K.dot(additive_atn, self._V), axis=1)
        x_weighted = K.sum(attention * X, [1])

        x = K.dot(K.concatenate([x, x_weighted], 1), self._W3) + self._b3
        
        h, new_states = self.layer.cell.call(x, states[:-2])
        
        return h, new_states 
Example 4
Project: keras-utility-layer-collection   Author: zimmerrol   File: attention.py    License: MIT License 6 votes vote down vote up
def step(self, x, states):  
        h = states[0]
        # states[1] necessary?
        
        # comes from the constants
        X_static = states[-2]
        # equals K.dot(static_x, self._W1) + self._b2 with X.shape=[bs, L, static_input_dim]
        total_x_static_prod = states[-1]

        # expand dims to add the vector which is only valid for this time step
        # to total_x_prod which is valid for all time steps
        hw = K.expand_dims(K.dot(h, self._W2), 1)
        additive_atn = total_x_static_prod + hw
        attention = K.softmax(K.dot(additive_atn, self._V), axis=1)
        static_x_weighted = K.sum(attention * X_static, [1])
        
        x = K.dot(K.concatenate([x, static_x_weighted], 1), self._W3) + self._b3

        h, new_states = self.layer.cell.call(x, states[:-2])
        
        # append attention to the states to "smuggle" it out of the RNN wrapper
        attention = K.squeeze(attention, -1)
        h = K.concatenate([h, attention])

        return h, new_states 
Example 5
Project: steppy-toolkit   Author: minerva-ml   File: contrib.py    License: MIT License 6 votes vote down vote up
def call(self, x, mask=None):
        # computes a probability distribution over the timesteps
        # uses 'max trick' for numerical stability
        # reshape is done to avoid issue with Tensorflow
        # and 1-dimensional weights
        logits = K.dot(x, self.W)
        x_shape = K.shape(x)
        logits = K.reshape(logits, (x_shape[0], x_shape[1]))
        ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True))

        # masked timesteps have zero weight
        if mask is not None:
            mask = K.cast(mask, K.floatx())
            ai = ai * mask
        att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon())
        weighted_input = x * K.expand_dims(att_weights)
        result = K.sum(weighted_input, axis=1)
        if self.return_attention:
            return [result, att_weights]
        return result 
Example 6
Project: CapsNet   Author: l11x0m7   File: capsule.py    License: MIT License 6 votes vote down vote up
def call(self, inputs, **kwargs):
        # (batch_size, 1, input_num_capsule, input_dim_capsule)
        expand_inputs = K.expand_dims(inputs, axis=1)
        # (batch_size, num_capsule, input_num_capsule, input_dim_capsule)
        expand_inputs = K.tile(expand_inputs, (1, self.num_capsule, 1, 1))
        # (batch_size, num_capsule, input_num_capsule, dim_capsule)
        u_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, axes=[2, 3]), expand_inputs)

        if self.num_routing <= 0:
            self.num_routing = 3
        # (batch_size, num_capsule, input_num_capsule)
        b = K.zeros((K.shape(u_hat)[0], self.num_capsule, self.input_num_capsule))
        for i in xrange(self.num_routing):
            # (batch_size, num_capsule, input_num_capsule)
            c = softmax(b, axis=1)
            # (batch_size, num_capsule, dim_capsule)
            s = K.batch_dot(c, u_hat, axes=[2, 2])
            squashed_s = squash(s)
            if i < self.num_routing - 1:
                # (batch_size, num_capsule, input_num_capsule)
                b += K.batch_dot(squashed_s, u_hat, axes=[2, 3])
        return squashed_s 
Example 7
def call(self,x,mask=None):
        conv_input,theta = x
        s = theta.shape
        theta = T.reshape(theta,[-1,s[2]])
        m = K.not_equal(conv_input,0.)

        #### For translation
        trans = _trans(theta)
        output = _transform_trans(trans, conv_input)
        output = output * K.cast(m,K.floatx())

        ### For rotation
        M = _fusion(theta)
        output = _transform_rot(M,output)

        return output 
Example 8
def call(self,x,training=None):
        deta1 = 0.3
        deta2 = 0.3
        deta3 = 0.3
        seed = np.random.randint(1, 10e6)
        rng = RandomStreams(seed=seed)
        theta1 = rng.uniform(size=(x.shape[0],1),low=-deta1,high=deta1,dtype='float32')
        theta2 = rng.uniform(size=(x.shape[0],1),low=-deta2,high=deta2,dtype='float32')
        theta3 = rng.uniform(size=(x.shape[0],1),low=-deta3,high=deta3,dtype='float32')
        theta = K.concatenate([theta1,theta2,theta3],axis=-1)
        theta = K.tile(theta,x.shape[1])
        theta = theta.reshape((x.shape[0], x.shape[1], 3))

        theta = theta.reshape((theta.shape[0]*theta.shape[1], theta.shape[2]))
        M = _fusion(theta)
        output = _transform_rot(M, x)

        return K.in_train_phase(output,x,training = training) 
Example 9
Project: fancy-cnn   Author: textclf   File: timedistributed.py    License: MIT License 6 votes vote down vote up
def get_output(self, train=False):
        def format_shape(shape):
            if K._BACKEND == 'tensorflow':
                def trf(x):
                    try:
                        return int(x)
                    except TypeError:
                        return x

                return map(trf, shape)
            return shape

        X = self.get_input(train)

        in_shape = format_shape(K.shape(X))
        batch_flatten_len = K.prod(in_shape[:2])
        cast_in_shape = (batch_flatten_len, ) + tuple(in_shape[i] for i in range(2, K.ndim(X)))
        
        pre_outs = self.layer(K.reshape(X, cast_in_shape))
        
        out_shape = format_shape(K.shape(pre_outs))
        cast_out_shape = (in_shape[0], in_shape[1]) + tuple(out_shape[i] for i in range(1, K.ndim(pre_outs)))
        
        outputs = K.reshape(pre_outs, cast_out_shape)
        return outputs 
Example 10
Project: dataiku-contrib   Author: dataiku   File: model.py    License: Apache License 2.0 6 votes vote down vote up
def clip_boxes_graph(boxes, window):
    """
    boxes: [N, (y1, x1, y2, x2)]
    window: [4] in the form y1, x1, y2, x2
    """
    # Split
    wy1, wx1, wy2, wx2 = tf.split(window, 4)
    y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
    # Clip
    y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
    x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
    y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
    x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
    clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
    clipped.set_shape((clipped.shape[0], 4))
    return clipped 
Example 11
Project: dataiku-contrib   Author: dataiku   File: model.py    License: Apache License 2.0 6 votes vote down vote up
def build_rpn_model(anchor_stride, anchors_per_location, depth):
    """Builds a Keras model of the Region Proposal Network.
    It wraps the RPN graph so it can be used multiple times with shared
    weights.
    anchors_per_location: number of anchors per pixel in the feature map
    anchor_stride: Controls the density of anchors. Typically 1 (anchors for
                   every pixel in the feature map), or 2 (every other pixel).
    depth: Depth of the backbone feature map.
    Returns a Keras Model object. The model outputs, when called, are:
    rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
    rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
    rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
                applied to anchors.
    """
    input_feature_map = KL.Input(shape=[None, None, depth],
                                 name="input_rpn_feature_map")
    outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
    return KM.Model([input_feature_map], outputs, name="rpn_model")


############################################################
#  Feature Pyramid Network Heads
############################################################ 
Example 12
Project: dataiku-contrib   Author: dataiku   File: model.py    License: Apache License 2.0 6 votes vote down vote up
def get_anchors(self, image_shape):
        """Returns anchor pyramid for the given image size."""
        backbone_shapes = compute_backbone_shapes(self.config, image_shape)
        # Cache anchors and reuse if image shape is the same
        if not hasattr(self, "_anchor_cache"):
            self._anchor_cache = {}
        if not tuple(image_shape) in self._anchor_cache:
            # Generate Anchors
            a = utils.generate_pyramid_anchors(
                self.config.RPN_ANCHOR_SCALES,
                self.config.RPN_ANCHOR_RATIOS,
                backbone_shapes,
                self.config.BACKBONE_STRIDES,
                self.config.RPN_ANCHOR_STRIDE)
            # Keep a copy of the latest anchors in pixel coordinates because
            # it's used in inspect_model notebooks.
            # TODO: Remove this after the notebook are refactored to not use it
            self.anchors = a
            # Normalize coordinates
            self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
        return self._anchor_cache[tuple(image_shape)] 
Example 13
Project: keras-yolo3   Author: bing0037   File: model.py    License: MIT License 6 votes vote down vote up
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
    '''Get corrected boxes'''
    box_yx = box_xy[..., ::-1]
    box_hw = box_wh[..., ::-1]
    input_shape = K.cast(input_shape, K.dtype(box_yx))
    image_shape = K.cast(image_shape, K.dtype(box_yx))
    new_shape = K.round(image_shape * K.min(input_shape/image_shape))
    offset = (input_shape-new_shape)/2./input_shape
    scale = input_shape/new_shape
    box_yx = (box_yx - offset) * scale
    box_hw *= scale

    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes =  K.concatenate([
        box_mins[..., 0:1],  # y_min
        box_mins[..., 1:2],  # x_min
        box_maxes[..., 0:1],  # y_max
        box_maxes[..., 1:2]  # x_max
    ])

    # Scale boxes back to original image shape.
    boxes *= K.concatenate([image_shape, image_shape])
    return boxes 
Example 14
Project: Python-Deep-Learning-SE   Author: ivan-vasilev   File: chapter_06_001.py    License: MIT License 6 votes vote down vote up
def sampling(args: tuple):
    """
    Reparameterization trick by sampling z from unit Gaussian
    :param args: (tensor, tensor) mean and log of variance of q(z|x)
    :returns tensor: sampled latent vector z
    """

    # unpack the input tuple
    z_mean, z_log_var = args

    # mini-batch size
    mb_size = K.shape(z_mean)[0]

    # latent space size
    dim = K.int_shape(z_mean)[1]

    # random normal vector with mean=0 and std=1.0
    epsilon = K.random_normal(shape=(mb_size, dim))

    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
Example 15
Project: multi-object-tracking   Author: jguoaj   File: model.py    License: GNU General Public License v3.0 6 votes vote down vote up
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
    '''Get corrected boxes'''
    box_yx = box_xy[..., ::-1]
    box_hw = box_wh[..., ::-1]
    input_shape = K.cast(input_shape, K.dtype(box_yx))
    image_shape = K.cast(image_shape, K.dtype(box_yx))
    new_shape = K.round(image_shape * K.min(input_shape/image_shape))
    offset = (input_shape-new_shape)/2./input_shape
    scale = input_shape/new_shape
    box_yx = (box_yx - offset) * scale
    box_hw *= scale

    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes =  K.concatenate([
        box_mins[..., 0:1],  # y_min
        box_mins[..., 1:2],  # x_min
        box_maxes[..., 0:1],  # y_max
        box_maxes[..., 1:2]  # x_max
    ])

    # Scale boxes back to original image shape.
    boxes *= K.concatenate([image_shape, image_shape])
    return boxes 
Example 16
Project: vision-web-service   Author: sherlockchou86   File: model.py    License: MIT License 6 votes vote down vote up
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
    '''Get corrected boxes'''
    box_yx = box_xy[..., ::-1]
    box_hw = box_wh[..., ::-1]
    input_shape = K.cast(input_shape, K.dtype(box_yx))
    image_shape = K.cast(image_shape, K.dtype(box_yx))
    new_shape = K.round(image_shape * K.min(input_shape/image_shape))
    offset = (input_shape-new_shape)/2./input_shape
    scale = input_shape/new_shape
    box_yx = (box_yx - offset) * scale
    box_hw *= scale

    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes =  K.concatenate([
        box_mins[..., 0:1],  # y_min
        box_mins[..., 1:2],  # x_min
        box_maxes[..., 0:1],  # y_max
        box_maxes[..., 1:2]  # x_max
    ])

    # Scale boxes back to original image shape.
    boxes *= K.concatenate([image_shape, image_shape])
    return boxes 
Example 17
Project: DeepLearn   Author: GauravBh1010tt   File: layers.py    License: MIT License 6 votes vote down vote up
def build(self,input_shape):
        
        self.W = self.add_weight(name='w',shape=(self.k, self.d, self.d),
                                      initializer='glorot_uniform',trainable=True)
                                      #initializer='ones',trainable=False)
        
        self.V = self.add_weight(name='v',shape=(self.k, self.d*2),
                                      initializer='glorot_uniform',trainable=True)
                                    #initializer='ones',trainable=False)
                                  
        #self.b = self.add_weight(name='b',shape=(self.k,1),
        #                              initializer='glorot_uniform',trainable=True)
#                                    initializer='ones',trainable=False)
        
        self.U = self.add_weight(name='u',shape=(self.k,1),
#                                      initializer='ones',trainable=False)
                initializer='glorot_uniform',trainable=True)
                                  
        super(ntn, self).build(input_shape) 
Example 18
Project: DeepLearn   Author: GauravBh1010tt   File: layers.py    License: MIT License 6 votes vote down vote up
def call(self , x, mask=None):
        
        e1=x[0].T
        e2=x[1].T
        
        batch_size = K.shape(x[0])[0]
        sim = []
        V_out = K.dot(self.V, K.concatenate([e1,e2],axis=0))     

        for i in range(self.k): 
            temp = K.batch_dot(K.dot(e1.T,self.W[i,:,:]),e2.T,axes=1)
            sim.append(temp)
        sim=K.reshape(sim,(self.k,batch_size))

        tensor_bi_product = self.activation(V_out+sim)
        tensor_bi_product = K.dot(self.U.T, tensor_bi_product).T

        return tensor_bi_product 
Example 19
Project: PanopticSegmentation   Author: dmechea   File: model.py    License: MIT License 6 votes vote down vote up
def clip_boxes_graph(boxes, window):
    """
    boxes: [N, (y1, x1, y2, x2)]
    window: [4] in the form y1, x1, y2, x2
    """
    # Split
    wy1, wx1, wy2, wx2 = tf.split(window, 4)
    y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
    # Clip
    y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
    x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
    y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
    x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
    clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
    clipped.set_shape((clipped.shape[0], 4))
    return clipped 
Example 20
Project: PanopticSegmentation   Author: dmechea   File: model.py    License: MIT License 6 votes vote down vote up
def build_rpn_model(anchor_stride, anchors_per_location, depth):
    """Builds a Keras model of the Region Proposal Network.
    It wraps the RPN graph so it can be used multiple times with shared
    weights.

    anchors_per_location: number of anchors per pixel in the feature map
    anchor_stride: Controls the density of anchors. Typically 1 (anchors for
                   every pixel in the feature map), or 2 (every other pixel).
    depth: Depth of the backbone feature map.

    Returns a Keras Model object. The model outputs, when called, are:
    rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
    rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
    rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
                applied to anchors.
    """
    input_feature_map = KL.Input(shape=[None, None, depth],
                                 name="input_rpn_feature_map")
    outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
    return KM.Model([input_feature_map], outputs, name="rpn_model")


############################################################
#  Feature Pyramid Network Heads
############################################################ 
Example 21
Project: PanopticSegmentation   Author: dmechea   File: model.py    License: MIT License 6 votes vote down vote up
def get_anchors(self, image_shape):
        """Returns anchor pyramid for the given image size."""
        backbone_shapes = compute_backbone_shapes(self.config, image_shape)
        # Cache anchors and reuse if image shape is the same
        if not hasattr(self, "_anchor_cache"):
            self._anchor_cache = {}
        if not tuple(image_shape) in self._anchor_cache:
            # Generate Anchors
            a = utils.generate_pyramid_anchors(
                self.config.RPN_ANCHOR_SCALES,
                self.config.RPN_ANCHOR_RATIOS,
                backbone_shapes,
                self.config.BACKBONE_STRIDES,
                self.config.RPN_ANCHOR_STRIDE)
            # Keep a copy of the latest anchors in pixel coordinates because
            # it's used in inspect_model notebooks.
            # TODO: Remove this after the notebook are refactored to not use it
            self.anchors = a
            # Normalize coordinates
            self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
        return self._anchor_cache[tuple(image_shape)] 
Example 22
Project: models   Author: kipoi   File: model.py    License: MIT License 5 votes vote down vote up
def call(self, x):
        shape = K.shape(x)
        x = K.reverse(x, axes=1) # reverse, so that frameness is related to fixed point
        frame_1 = tf.gather(x, K.arange(start=0, stop=shape[1], step=3), axis=1)
        frame_2 = tf.gather(x, K.arange(start=1, stop=shape[1], step=3), axis=1)
        frame_3 = tf.gather(x, K.arange(start=2, stop=shape[1], step=3), axis=1)
        return [frame_1, frame_2, frame_3] 
Example 23
Project: keras-utility-layer-collection   Author: zimmerrol   File: image.py    License: MIT License 5 votes vote down vote up
def build(self, input_shape):
        filter_sizes = [2 * f - f % 2 for f in self._upscaling_factors]

        n_channels = input_shape[-1]

        self._W = self.add_weight(
            shape=(filter_sizes[0], filter_sizes[1], n_channels, n_channels),
            name="W",
            initializer=DiscreteResize2D._bilinear_upsampling_weights,
            trainable=False
        ) 
Example 24
Project: keras-utility-layer-collection   Author: zimmerrol   File: image.py    License: MIT License 5 votes vote down vote up
def call(self, x):
        shape = self.compute_output_shape(x.shape.as_list())
        batch_size = K.shape(x)[0]
        output_shape = (batch_size, *shape)
      
        return K.conv2d_transpose(x, self._W, output_shape=output_shape, strides=tuple(self._upscaling_factors), padding="same") 
Example 25
Project: keras-utility-layer-collection   Author: zimmerrol   File: image.py    License: MIT License 5 votes vote down vote up
def call(self, x):
        self._validate_input_shape(x.shape)
        output_shape = self.compute_output_shape(x.shape.as_list())

        upscaled_x = tf.image.resize_images(x, self._output_shape, method=self._method)

        return upscaled_x 
Example 26
Project: keras-utility-layer-collection   Author: zimmerrol   File: attention.py    License: MIT License 5 votes vote down vote up
def _validate_input_shape(self, input_shape):
        if len(input_shape) != 3:
            raise ValueError("Layer received an input shape {0} but expected three inputs (Q, V, K).".format(input_shape))
        else:
            if input_shape[0][0] != input_shape[1][0] or input_shape[1][0] != input_shape[2][0]:
                raise ValueError("All three inputs (Q, V, K) have to have the same batch size; received batch sizes: {0}, {1}, {2}".format(input_shape[0][0], input_shape[1][0], input_shape[2][0]))
            if input_shape[0][1] != input_shape[1][1] or input_shape[1][1] != input_shape[2][1]:
                raise ValueError("All three inputs (Q, V, K) have to have the same length; received lengths: {0}, {1}, {2}".format(input_shape[0][0], input_shape[1][0], input_shape[2][0]))
            if input_shape[0][2] != input_shape[1][2]:
                raise ValueError("Input shapes of Q {0} and V {1} do not match.".format(input_shape[0], input_shape[1])) 
Example 27
Project: keras-utility-layer-collection   Author: zimmerrol   File: attention.py    License: MIT License 5 votes vote down vote up
def call(self, x, mask=None):
        q, k, v = x
        d_k = q.shape.as_list()[2]

        # in pure tensorflow:
        # weights = tf.matmul(x_batch, tf.transpose(y_batch, perm=[0, 2, 1]))
        # normalized_weights = tf.nn.softmax(weights/scaling)
        # output = tf.matmul(normalized_weights, x_batch)
        
        weights = K.batch_dot(q,  k, axes=[2, 2])

        if mask is not None:
            # add mask weights
            if isinstance(mask, (list, tuple)):
                if len(mask) > 0:
                    raise ValueError("mask can only be a Tensor or a list of length 1 containing a tensor.")

                mask = mask[0]

            weights += -1e10*(1-mask)

        normalized_weights = K.softmax(weights / np.sqrt(d_k))
        output = K.batch_dot(normalized_weights, v)
        
        if self._return_attention:
            return [output, normalized_weights]
        else:
            return output 
Example 28
Project: keras-utility-layer-collection   Author: zimmerrol   File: attention.py    License: MIT License 5 votes vote down vote up
def __call__(self, x, mask=None):
        if isinstance(x, (list, tuple)):
            self.build([it.shape for it in x])
        else:
            self.build(x.shape)

        q, k, v = x
        
        outputs = []
        attentions = []
        for i in range(self._h):
            qi = self._q_layers[i](q)
            ki = self._k_layers[i](k)
            vi = self._v_layers[i](v)
            
            if self._return_attention:
                output, attention = self._sdp_layer([qi, ki, vi], mask=mask)
                outputs.append(output)
                attentions.append(attention)
            else:
                output = self._sdp_layer([qi, ki, vi], mask=mask)
                outputs.append(output)
            
        concatenated_outputs = Concatenate()(outputs)
        output = self._output(concatenated_outputs)
        
        if self._return_attention:
            attention = Concatenate()(attentions)
            # print("attention", attention, attention.shape)
       
        if self._return_attention:
            return [output, attention]
        else:
            return output        

# https://wanasit.github.io/attention-based-sequence-to-sequence-in-keras.html
# https://arxiv.org/pdf/1508.04025.pdf 
Example 29
Project: keras-utility-layer-collection   Author: zimmerrol   File: attention.py    License: MIT License 5 votes vote down vote up
def build(self, input_shape):
        super(SequenceAttention, self).build(input_shape)
        self._validate_input_shape(input_shape)
        
        self._weights = {}
        if self._similarity == self._additive_similarity:
            self._weights["w_a"] = self.add_weight(
                name='w_a', 
                shape=(input_shape[0][-1] + input_shape[1][-1], input_shape[0][-1]),
                initializer=self._kernel_initializer,
                trainable=True
            )
            
            self._weights["v_a"] = self.add_weight(
                name='v_a', 
                shape=(1, input_shape[0][-1]),
                initializer=self._kernel_initializer,
                trainable=True
            )
            
        elif self._similarity == self._multiplicative_similarity:
            self._weights["w_a"] = self.add_weight(
                name='w_a', 
                shape=(input_shape[1][-1], input_shape[0][-1]),
                initializer=self._kernel_initializer,
                trainable=True
            )

        self.built = True 
Example 30
Project: keras-utility-layer-collection   Author: zimmerrol   File: attention.py    License: MIT License 5 votes vote down vote up
def _validate_input_shape(self, input_shape):
        if len(input_shape) != 2:
            raise ValueError("Layer received an input shape {0} but expected two inputs (source, query).".format(input_shape))
        else:
            if input_shape[0][0] != input_shape[1][0]:
                raise ValueError("Both two inputs (source, query) have to have the same batch size; received batch sizes: {0}, {1}".format(input_shape[0][0], input_shape[1][0]))
            if input_shape[0][1] != input_shape[1][1]:
                raise ValueError("Both inputs (source, query) have to have the same length; received lengths: {0}, {1}".format(input_shape[0][0], input_shape[1][0]))