Python keras.backend.concatenate() Examples

The following are code examples for showing how to use keras.backend.concatenate(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: dialectal_arabic_segmenter   Author: qcri   File: ChainCRF.py    GNU Lesser General Public License v3.0 7 votes vote down vote up
def add_boundary_energy(x, b_start=None, b_end=None, mask=None):
    '''Given the observations x, it adds the start boundary energy b_start (resp.
    end boundary energy b_end on the start (resp. end) elements and multiplies
    the mask.'''
    if mask is None:
        if b_start is not None:
            x = K.concatenate([x[:, :1, :] + b_start, x[:, 1:, :]], axis=1)
        if b_end is not None:
            x = K.concatenate([x[:, :-1, :], x[:, -1:, :] + b_end], axis=1)
    else:
        mask = K.cast(mask, K.floatx())
        mask = K.expand_dims(mask, 2)
        x *= mask
        if b_start is not None:
            mask_r = K.concatenate([K.zeros_like(mask[:, :1]), mask[:, :-1]], axis=1)
            start_mask = K.cast(K.greater(mask, mask_r), K.floatx())
            x = x + start_mask * b_start
        if b_end is not None:
            mask_l = K.concatenate([mask[:, 1:], K.zeros_like(mask[:, -1:])], axis=1)
            end_mask = K.cast(K.greater(mask, mask_l), K.floatx())
            x = x + end_mask * b_end
    return x 
Example 2
Project: Logo-Retrieval-in-Commercial-Plaza   Author: zhang-rongchen   File: model_Mobilenet.py    MIT License 6 votes vote down vote up
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
    '''Get corrected boxes'''
    box_yx = box_xy[..., ::-1]
    box_hw = box_wh[..., ::-1]
    input_shape = K.cast(input_shape, K.dtype(box_yx))
    image_shape = K.cast(image_shape, K.dtype(box_yx))
    new_shape = K.round(image_shape * K.min(input_shape/image_shape))
    offset = (input_shape-new_shape)/2./input_shape
    scale = input_shape/new_shape
    box_yx = (box_yx - offset) * scale
    box_hw *= scale

    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes =  K.concatenate([
        box_mins[..., 0:1],  # y_min
        box_mins[..., 1:2],  # x_min
        box_maxes[..., 0:1],  # y_max
        box_maxes[..., 1:2]  # x_max
    ])

    # Scale boxes back to original image shape.
    boxes *= K.concatenate([image_shape, image_shape])
    return boxes 
Example 3
Project: keras-utility-layer-collection   Author: zimmerrol   File: attention.py    MIT License 6 votes vote down vote up
def step(self, x, states):   
        h = states[0]
        # states[1] necessary?

        # equals K.dot(X, self._W1) + self._b2 with X.shape=[bs, T, input_dim]
        total_x_prod = states[-1]
        # comes from the constants (equals the input sequence)
        X = states[-2]
        
        # expand dims to add the vector which is only valid for this time step
        # to total_x_prod which is valid for all time steps
        hw = K.expand_dims(K.dot(h, self._W2), 1)
        additive_atn = total_x_prod + hw
        attention = K.softmax(K.dot(additive_atn, self._V), axis=1)
        x_weighted = K.sum(attention * X, [1])

        x = K.dot(K.concatenate([x, x_weighted], 1), self._W3) + self._b3
        
        h, new_states = self.layer.cell.call(x, states[:-2])
        
        return h, new_states 
Example 4
Project: keras-utility-layer-collection   Author: zimmerrol   File: attention.py    MIT License 6 votes vote down vote up
def step(self, x, states):  
        h = states[0]
        # states[1] necessary?
        
        # comes from the constants
        X_static = states[-2]
        # equals K.dot(static_x, self._W1) + self._b2 with X.shape=[bs, L, static_input_dim]
        total_x_static_prod = states[-1]

        # expand dims to add the vector which is only valid for this time step
        # to total_x_prod which is valid for all time steps
        hw = K.expand_dims(K.dot(h, self._W2), 1)
        additive_atn = total_x_static_prod + hw
        attention = K.softmax(K.dot(additive_atn, self._V), axis=1)
        static_x_weighted = K.sum(attention * X_static, [1])
        
        x = K.dot(K.concatenate([x, static_x_weighted], 1), self._W3) + self._b3

        h, new_states = self.layer.cell.call(x, states[:-2])
        
        # append attention to the states to "smuggle" it out of the RNN wrapper
        attention = K.squeeze(attention, -1)
        h = K.concatenate([h, attention])

        return h, new_states 
Example 5
Project: keras-yolo3   Author: bing0037   File: model.py    MIT License 6 votes vote down vote up
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
    '''Get corrected boxes'''
    box_yx = box_xy[..., ::-1]
    box_hw = box_wh[..., ::-1]
    input_shape = K.cast(input_shape, K.dtype(box_yx))
    image_shape = K.cast(image_shape, K.dtype(box_yx))
    new_shape = K.round(image_shape * K.min(input_shape/image_shape))
    offset = (input_shape-new_shape)/2./input_shape
    scale = input_shape/new_shape
    box_yx = (box_yx - offset) * scale
    box_hw *= scale

    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes =  K.concatenate([
        box_mins[..., 0:1],  # y_min
        box_mins[..., 1:2],  # x_min
        box_maxes[..., 0:1],  # y_max
        box_maxes[..., 1:2]  # x_max
    ])

    # Scale boxes back to original image shape.
    boxes *= K.concatenate([image_shape, image_shape])
    return boxes 
Example 6
Project: multi-object-tracking   Author: jguoaj   File: model.py    GNU General Public License v3.0 6 votes vote down vote up
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
    '''Get corrected boxes'''
    box_yx = box_xy[..., ::-1]
    box_hw = box_wh[..., ::-1]
    input_shape = K.cast(input_shape, K.dtype(box_yx))
    image_shape = K.cast(image_shape, K.dtype(box_yx))
    new_shape = K.round(image_shape * K.min(input_shape/image_shape))
    offset = (input_shape-new_shape)/2./input_shape
    scale = input_shape/new_shape
    box_yx = (box_yx - offset) * scale
    box_hw *= scale

    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes =  K.concatenate([
        box_mins[..., 0:1],  # y_min
        box_mins[..., 1:2],  # x_min
        box_maxes[..., 0:1],  # y_max
        box_maxes[..., 1:2]  # x_max
    ])

    # Scale boxes back to original image shape.
    boxes *= K.concatenate([image_shape, image_shape])
    return boxes 
Example 7
Project: dialectal_arabic_segmenter   Author: qcri   File: ChainCRF.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def _forward(x, reduce_step, initial_states, U, mask=None):
    '''Forward recurrence of the linear chain crf.'''

    def _forward_step(energy_matrix_t, states):
        alpha_tm1 = states[-1]
        new_states = reduce_step(K.expand_dims(alpha_tm1, 2) + energy_matrix_t)
        return new_states[0], new_states

    U_shared = K.expand_dims(K.expand_dims(U, 0), 0)

    if mask is not None:
        mask = K.cast(mask, K.floatx())
        mask_U = K.expand_dims(K.expand_dims(mask[:, :-1] * mask[:, 1:], 2), 3)
        U_shared = U_shared * mask_U

    inputs = K.expand_dims(x[:, 1:, :], 2) + U_shared
    inputs = K.concatenate([inputs, K.zeros_like(inputs[:, -1:, :, :])], axis=1)

    last, values, _ = K.rnn(_forward_step, inputs, initial_states)
    return last, values 
Example 8
Project: solder_joint_detection   Author: lx-onism   File: model.py    MIT License 6 votes vote down vote up
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
    '''Get corrected boxes'''
    box_yx = box_xy[..., ::-1]
    box_hw = box_wh[..., ::-1]
    input_shape = K.cast(input_shape, K.dtype(box_yx))
    image_shape = K.cast(image_shape, K.dtype(box_yx))
    new_shape = K.round(image_shape * K.min(input_shape/image_shape))
    offset = (input_shape-new_shape)/2./input_shape
    scale = input_shape/new_shape
    box_yx = (box_yx - offset) * scale
    box_hw *= scale

    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes =  K.concatenate([
        box_mins[..., 0:1],  # y_min
        box_mins[..., 1:2],  # x_min
        box_maxes[..., 0:1],  # y_max
        box_maxes[..., 1:2]  # x_max
    ])

    # Scale boxes back to original image shape.
    boxes *= K.concatenate([image_shape, image_shape])
    return boxes 
Example 9
Project: vision-web-service   Author: sherlockchou86   File: model.py    MIT License 6 votes vote down vote up
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
    '''Get corrected boxes'''
    box_yx = box_xy[..., ::-1]
    box_hw = box_wh[..., ::-1]
    input_shape = K.cast(input_shape, K.dtype(box_yx))
    image_shape = K.cast(image_shape, K.dtype(box_yx))
    new_shape = K.round(image_shape * K.min(input_shape/image_shape))
    offset = (input_shape-new_shape)/2./input_shape
    scale = input_shape/new_shape
    box_yx = (box_yx - offset) * scale
    box_hw *= scale

    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes =  K.concatenate([
        box_mins[..., 0:1],  # y_min
        box_mins[..., 1:2],  # x_min
        box_maxes[..., 0:1],  # y_max
        box_maxes[..., 1:2]  # x_max
    ])

    # Scale boxes back to original image shape.
    boxes *= K.concatenate([image_shape, image_shape])
    return boxes 
Example 10
Project: DeepLearn   Author: GauravBh1010tt   File: layers.py    MIT License 6 votes vote down vote up
def call(self , x, mask=None):
        
        e1=x[0].T
        e2=x[1].T
        
        batch_size = K.shape(x[0])[0]
        sim = []
        V_out = K.dot(self.V, K.concatenate([e1,e2],axis=0))     

        for i in range(self.k): 
            temp = K.batch_dot(K.dot(e1.T,self.W[i,:,:]),e2.T,axes=1)
            sim.append(temp)
        sim=K.reshape(sim,(self.k,batch_size))

        tensor_bi_product = self.activation(V_out+sim)
        tensor_bi_product = K.dot(self.U.T, tensor_bi_product).T

        return tensor_bi_product 
Example 11
Project: spektral   Author: danielegrattarola   File: convolutional.py    MIT License 6 votes vote down vote up
def call(self, inputs):
        features = inputs[0]
        fltr_list = inputs[1:]

        # Convolution
        supports = list()
        for fltr in fltr_list:
            s = filter_dot(fltr, features)
            supports.append(s)
        supports = K.concatenate(supports, axis=-1)
        output = K.dot(supports, self.kernel)

        if self.use_bias:
            output = K.bias_add(output, self.bias)
        if self.activation is not None:
            output = self.activation(output)
        return output 
Example 12
Project: spektral   Author: danielegrattarola   File: convolutional.py    MIT License 6 votes vote down vote up
def call(self, inputs):
        features = inputs[0]
        fltr = inputs[1]

        if not K.is_sparse(fltr):
            fltr = tf.contrib.layers.dense_to_sparse(fltr)

        features_neigh = self.aggregate_op(
            tf.gather(features, fltr.indices[:, -1]), fltr.indices[:, -2]
        )
        output = K.concatenate([features, features_neigh])
        output = K.dot(output, self.kernel)

        if self.use_bias:
            output = K.bias_add(output, self.bias)
        if self.activation is not None:
            output = self.activation(output)
        output = K.l2_normalize(output, axis=-1)
        return output 
Example 13
Project: keras_extension   Author: k1414st   File: mac.py    MIT License 6 votes vote down vote up
def call(self, inputs, training=None):

        c, r, m, msa = inputs[0], inputs[1], inputs[2], inputs[3]

        mi = K.bias_add(K.dot(K.concatenate([r, m], axis=-1),
                              self.mi_kernel),
                        self.mi_bias)

        md = K.bias_add(K.dot(msa, self.mdsa_kernel) +
                        K.dot(mi, self.mdi_kernel),
                        self.md_bias)
        cd = K.bias_add(K.dot(c, self.cd_kernel),
                        self.cd_bias)
        mi = self.forget_activation(cd) * m + \
            self.forget_activation(1-cd) * md
        return mi 
Example 14
Project: keras_extension   Author: k1414st   File: mac.py    MIT License 6 votes vote down vote up
def call(self, inputs, training=None):

        c_cur, m_prev, knowledge = inputs[0], inputs[1], inputs[2]

        Im = K.expand_dims(K.bias_add(K.dot(m_prev, self.im_kernel),
                                      self.im_bias), axis=1)
        Ik = K.bias_add(K.dot(knowledge, self.ik_kernel),
                        self.ik_bias)
        I = Im * Ik
        Id = K.bias_add(K.dot(K.concatenate([I, knowledge], axis=-1),
                              self.id_kernel),
                        self.id_bias)
        cI = K.expand_dims(c_cur, axis=1) * Id
        ra = K.bias_add(K.dot(cI, self.ra_kernel),
                        self.ra_bias)
        rv = self.attention_activation(ra, axis=1)
        r = K.sum(rv * knowledge, axis=1)
        return r 
Example 15
Project: keras_extension   Author: k1414st   File: mac.py    MIT License 6 votes vote down vote up
def call(self, inputs, training=None):

        c, r, m, msa = inputs[0], inputs[1], inputs[2], inputs[3]

        mi = K.bias_add(K.dot(K.concatenate([r, m], axis=-1),
                              self.mi_kernel),
                        self.mi_bias)

        md = K.bias_add(K.dot(msa, self.mdsa_kernel) +
                        K.dot(mi, self.mdi_kernel),
                        self.md_bias)
        cd = K.bias_add(K.dot(c, self.cd_kernel),
                        self.cd_bias)
        mi = self.forget_activation(cd) * m + \
            self.forget_activation(1-cd) * md
        return mi 
Example 16
Project: Keras-DropBlock   Author: MLearing   File: drop_block.py    MIT License 6 votes vote down vote up
def _compute_valid_seed_region(self, height, width):
        positions = K.concatenate([
            K.expand_dims(K.tile(K.expand_dims(K.arange(height), axis=1), [1, width]), axis=-1),
            K.expand_dims(K.tile(K.expand_dims(K.arange(width), axis=0), [height, 1]), axis=-1),
        ], axis=-1)
        half_block_size = self.block_size // 2
        valid_seed_region = K.switch(
            K.all(
                K.stack(
                    [
                        positions[:, :, 0] >= half_block_size,
                        positions[:, :, 1] >= half_block_size,
                        positions[:, :, 0] < height - half_block_size,
                        positions[:, :, 1] < width - half_block_size,
                    ],
                    axis=-1,
                ),
                axis=-1,
            ),
            K.ones((height, width)),
            K.zeros((height, width)),
        )
        return K.expand_dims(K.expand_dims(valid_seed_region, axis=0), axis=-1) 
Example 17
Project: cdt-ccm-aae   Author: danielegrattarola   File: layers.py    MIT License 6 votes vote down vote up
def call(self, inputs):
        zero = K.constant(0.)

        # Spherical clip
        spherical_clip = self.radius * K.l2_normalize(inputs, -1)
        # Hyperbolic clip
        free_components = inputs[..., :-1]
        bound_component = K.sqrt(K.sum(free_components ** 2, -1)[..., None] + (self.radius ** 2))
        hyperbolic_clip = K.concatenate((free_components, bound_component), -1)

        lt_cond = K.less(self.radius, zero)
        lt_check = K.switch(lt_cond, hyperbolic_clip, inputs)

        gt_cond = K.greater(self.radius, zero)
        output = K.switch(gt_cond, spherical_clip, lt_check)

        return output 
Example 18
Project: cdt-ccm-aae   Author: danielegrattarola   File: model.py    MIT License 6 votes vote down vote up
def geom_reg(r, sigma, l=0.05):
        # To use, set activity_regularizer=self.geom_reg(r_, sigma_) when
        # instantiating z_2 in _model_builder functions
        def geom_regularizer(x):
            sign = np.sign(r)
            free_components = x[..., :-1] ** 2
            bound_component = sign * x[..., -1:] ** 2
            all_components = K.concatenate((free_components, bound_component),
                                           -1)
            ext_product = K.sum(all_components, -1)[..., None]
            output_pre = K.exp(-(ext_product - sign * r ** 2) ** 2 / (2 * sigma ** 2))
            if sign == 0.:
                output_pre = K.zeros_like(output_pre)
            return l * K.sum(1. - output_pre)

        return geom_regularizer 
Example 19
Project: object-detection   Author: kaka-lin   File: keras_yolo.py    MIT License 6 votes vote down vote up
def yolo_body(inputs, num_anchors, num_classes):
    """Create YOLO_V2 model CNN body in Keras."""
    darknet = Model(inputs, darknet_body()(inputs))
    conv20 = compose(
        DarknetConv2D_BN_Leaky(1024, (3, 3)),
        DarknetConv2D_BN_Leaky(1024, (3, 3)))(darknet.output)

    conv13 = darknet.layers[43].output
    conv21 = DarknetConv2D_BN_Leaky(64, (1, 1))(conv13)
    # TODO: Allow Keras Lambda to use func arguments for output_shape?
    conv21_reshaped = Lambda(
        space_to_depth_x2,
        output_shape=space_to_depth_x2_output_shape,
        name='space_to_depth')(conv21)

    x = concatenate([conv21_reshaped, conv20])
    x = DarknetConv2D_BN_Leaky(1024, (3, 3))(x)
    x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x)
    return Model(inputs, x) 
Example 20
Project: BERT   Author: yyht   File: drmm_utils.py    Apache License 2.0 6 votes vote down vote up
def _multi_kmax_context_concat(inputs, top_k, poses):
	x, context_input = inputs
	idxes, topk_vs = list(), list()
	for p in poses:
		val, idx = tf.nn.top_k(tf.slice(x, [0,0,0], [-1,-1, p]), k=top_k, sorted=True, name=None)
		topk_vs.append(val)
		idxes.append(idx)
	concat_topk_max = tf.concat(topk_vs, -1, name='concat_val')
	concat_topk_idx = tf.concat(idxes, -1, name='concat_idx')
	# hack that requires the context to have the same shape as similarity matrices
	# https://stackoverflow.com/questions/41897212/how-to-sort-a-multi-dimensional-tensor-using-the-returned-indices-of-tf-nn-top-k
	shape = tf.shape(x)
	mg = tf.meshgrid(*[tf.range(d) for d in (tf.unstack(shape[:(x.get_shape().ndims - 1)]) + [top_k*len(poses)])], indexing='ij')
	val_contexts = tf.gather_nd(context_input, tf.stack(mg[:-1] + [concat_topk_idx], axis=-1))
	return tf.concat([concat_topk_max, val_contexts], axis=-1)
	# return backend.concatenate([concat_topk_max, val_contexts]) 
Example 21
Project: VisualNN   Author: angelhunt   File: lrn.py    GNU General Public License v3.0 5 votes vote down vote up
def get_output(self, train):
        X = self.get_input(train)
        b, ch, r, c = K.shape(X)
        half_n = self.n // 2
        input_sqr = K.square(X)
        extra_channels = K.zeros((b, ch + 2 * half_n, r, c))
        input_sqr = K.concatenate([extra_channels[:, :half_n, :, :],
                                   input_sqr,
                                   extra_channels[:, half_n + ch:, :, :]],
                                  axis=1)
        scale = self.k
        for i in range(self.n):
            scale += self.alpha * input_sqr[:, i:i + ch, :, :]
        scale = scale ** self.beta
        return X / scale 
Example 22
Project: Logo-Retrieval-in-Commercial-Plaza   Author: zhang-rongchen   File: model_Mobilenet.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 23
Project: mnn-H2   Author: ywfan   File: testH2matrix2d.py    MIT License 5 votes vote down vote up
def padding2d(x, size_x, size_y):
    wx = size_x // 2
    wy = size_y // 2
    nx = x.shape[1]
    ny = x.shape[2]
    # x direction
    y = K.concatenate([x[:,nx-wx:nx,:,:], x, x[:,0:wx,:,:]], axis=1)
    # y direction
    z = K.concatenate([y[:,:, ny-wy:ny,:], y, y[:,:,0:wy,:]], axis=2)
    return z 
Example 24
Project: mnn-H2   Author: ywfan   File: testH2matrix.py    MIT License 5 votes vote down vote up
def padding(x, size):
    return K.concatenate([x[:,x.shape[1]-size//2:x.shape[1],:],
                          x, x[:,0:(size-size//2-1),:]], axis=1)

# === calculate the relative error of the training/test data sets 
Example 25
Project: CapsAttnNet   Author: rstager   File: canlayer.py    MIT License 5 votes vote down vote up
def _part_to_whole_predictions(self, x):
        """
        Estimate the pose of the whole given the pose of the part.
        :param x: set of poses to transform
        """
        # inputs.shape=[ input_num_capsule, input_num_instance, input_dim_capsule]
        # output.shape=[num_instance*num_capsule, num_parts*input_num_capsule*input_num_instance,dim_capsule]
        # xt.shape = [ input_num_capsule, num_instance, input_num_instance, input_dim_capsule]
        # xpart.shape = [ num_instance, input_num_instance, num_capsule, num_part, dim_x,input_num_capsule]
        # gpose.shape = [ input_num_capsule, num_instance, input_num_instance, dim_geom+1]
        xt = K.tile(K.expand_dims(x,1),[1,self.num_instance,1,1])

        tmp = K.reshape( xt[:,:,:,:1],[self.input_num_capsule,self.num_instance,self.input_num_instance,1,1,1])
        tmp = K.tile(tmp,[1,1,1,self.num_capsule,self.num_part,1])
        ppart=K.permute_dimensions(tmp,[1,2,3,4,5,0])

        gpose = K.concatenate([xt[:,:,:,1:dim_geom+1],K.ones_like(xt[:,:,:,:1])]) # add 1 col to allow x-y translate
        gpart = K.concatenate([K.expand_dims(K.dot(gpose[i],self.W1[i]),-1) for i in range(self.input_num_capsule)])
        apart = K.concatenate([K.expand_dims(K.dot(xt[i,:,:,dim_geom+1:],self.W2[i]),-1) for i in range(self.input_num_capsule)])
        whole=K.concatenate([ppart,gpart,apart],4)
        output=K.permute_dimensions(whole,[0,2,3,5,1,4])
        output=K.reshape(output,[self.num_instance*self.num_capsule,
                                 self.num_part*self.input_num_capsule*self.input_num_instance,self.dim_capsule])
        # output = tf.Print(output, [tf.shape(x)], message='x', summarize=16)
        # output = tf.Print(output, [x[0,18,1:3]], message='x ', summarize=3)
        # output = tf.Print(output, [gpose[0,0,0,:]], message='x gpose ', summarize=5)
        # output = tf.Print(output, [gpose[0,1,0,:]], message='x gpose ', summarize=5)
        # output = tf.Print(output, [gpart[0,0,0,0,0,:]], message='x gpart ', summarize=5)
        # output = tf.Print(output, [gpart[0,1,0,0,0,:]], message='x gpart ', summarize=5)
        return output 
Example 26
Project: CapsAttnNet   Author: rstager   File: canlayer.py    MIT License 5 votes vote down vote up
def _best_guess(self, c, inputs_hat):
        '''
        Combine the predicted poses 'input_hats' weighted by c to come up with best_guess of the capsule poses

        :param c: weights to apply to the input poses
        :param inputs_hat: input poses
        :return: best guess at pose
        '''
        # c.shape=[None, num_capsule * num_instance, num_part * input_num_capsule * input_num_instance]
        # inputs_hat.shape = [None,num_instance * num_capsule, num_parts, input_num_capsule * input_num_instance, dim_capsule]
        # guess.shape = [None,num_instance * num_capsule,dim_capsule]

        # take the mean probility
        probability = tf.reduce_mean(inputs_hat[:,:,:,0:1],axis=2)

        # find the mean weighted geometric pose
        sum_weighted_geoms = K.batch_dot(c,inputs_hat[:,:,:,1:dim_geom+1], [2, 2])
        one_over_weight_sums = tf.tile(tf.expand_dims(tf.reciprocal(K.sum(c,axis=-1)),-1),[1,1,dim_geom])
        mean_geom =  one_over_weight_sums*sum_weighted_geoms

        # squash the weighted sum of attributes
        weighted_attrs = K.batch_dot(c,inputs_hat[:,:,:,dim_geom+1:], [2, 2])
        scale = squash_scale(weighted_attrs)

        # use the magnitude of the squashedweighted sum of attributes for probability
        probability = scale

        guess = layers.concatenate([probability,mean_geom,weighted_attrs])
        return guess 
Example 27
Project: deep-models   Author: LaurentMazare   File: rhn.py    Apache License 2.0 5 votes vote down vote up
def preprocess_input(self, x):
    if self.consume_less == 'cpu':
      input_shape = self.input_spec[0].shape
      input_dim = input_shape[2]
      timesteps = input_shape[1]

      x_t = time_distributed_dense(x, self.W_t, self.b_t, self.dropout_W,
                                   input_dim, self.output_dim, timesteps)
      x_h = time_distributed_dense(x, self.W_h, self.b_h, self.dropout_W,
                                   input_dim, self.output_dim, timesteps)
      return K.concatenate([x_t, x_h], axis=2)
    else:
      return x 
Example 28
Project: keras-utility-layer-collection   Author: zimmerrol   File: attention.py    MIT License 5 votes vote down vote up
def _additive_similarity(self, source, query):
        concatenation = K.concatenate([source, query], axis=2)
        nonlinearity = K.tanh(K.dot(concatenation, self._weights["w_a"]))
        
        # tile the weight vector (1, 1, dim) for each time step and each element of the batch -> (bs, T, dim)
        source_shape = K.shape(source)
        vaeff = K.tile(K.expand_dims(self._weights["v_a"], 0), [source_shape[0], source_shape[1], 1])

        similarity = K.batch_dot(K.permute_dimensions(vaeff, [0, 2, 1]), nonlinearity, axes=[1, 2])
        
        return similarity 
Example 29
Project: gandlf   Author: codekansas   File: losses.py    MIT License 5 votes vote down vote up
def rbf_moment_matching(y_true, y_pred, sigmas=[2, 5, 10, 20, 40, 80]):
    """Generative moment matching loss with RBF kernel.

    Reference: https://arxiv.org/abs/1502.02761
    """

    warnings.warn('Moment matching loss is still in development.')

    if len(K.int_shape(y_pred)) != 2 or len(K.int_shape(y_true)) != 2:
        raise ValueError('RBF Moment Matching function currently only works '
                         'for outputs with shape (batch_size, num_features).'
                         'Got y_true="%s" and y_pred="%s".' %
                         (str(K.int_shape(y_pred)), str(K.int_shape(y_true))))

    sigmas = list(sigmas) if isinstance(sigmas, (list, tuple)) else [sigmas]

    x = K.concatenate([y_pred, y_true], 0)

    # Performs dot product between all combinations of rows in X.
    xx = K.dot(x, K.transpose(x))  # (batch_size, batch_size)

    # Performs dot product of all rows with themselves.
    x2 = K.sum(x * x, 1, keepdims=True)  # (batch_size, None)

    # Gets exponent entries of the RBF kernel (without sigmas).
    exponent = xx - 0.5 * x2 - 0.5 * K.transpose(x2)

    # Applies all the sigmas.
    total_loss = None
    for sigma in sigmas:
        kernel_val = K.exp(exponent / sigma)
        loss = K.sum(kernel_val)
        total_loss = loss if total_loss is None else loss + total_loss

    return total_loss 
Example 30
Project: gandlf   Author: codekansas   File: core.py    MIT License 5 votes vote down vote up
def call(self, x, mask=None):
        sims = []
        for n, sim in zip(self.n, self.similarities):
            for _ in range(n):
                batch_size = K.shape(x)[0]
                idx = K.random_uniform((batch_size,), low=0, high=batch_size,
                                       dtype='int32')
                x_shuffled = K.gather(x, idx)
                pair_sim = sim(x, x_shuffled)
                for _ in range(K.ndim(x) - 1):
                    pair_sim = K.expand_dims(pair_sim, dim=1)
                sims.append(pair_sim)

        return K.concatenate(sims, axis=-1) 
Example 31
Project: ccm-aae   Author: danielegrattarola   File: geometry.py    MIT License 5 votes vote down vote up
def ccm_uniform(size, dim=3, r=0., low=-1., high=1., projection='upper'):
    """
    Samples points from a uniform distribution on a constant-curvature manifold.
    If `r=0`, then points are sampled from a uniform distribution in the ambient
    space.
    If a list of radii is passed instead of a single scalar, then the sampling
    is repeated for each value in the list and the results are concatenated
    along the last axis (e.g., see [Grattarola et al. (2018)](https://arxiv.org/abs/1805.06299)).
    :param size: number of points to sample;
    :param dim: dimension of the ambient space;
    :param r: floats or list of floats, radii of the CCMs;
    :param low: lower bound of the uniform distribution from which to sample;
    :param high: upper bound of the uniform distribution from which to sample;
    :param projection: 'upper', 'lower', or 'both'. Whether to project points
    always on the upper or lower branch of the hyperboloid, or on both based
    on the sign of the last coordinate.
    :return: if `r` is a scalar, np.array of shape (size, dim). If `r` is a
    list, np.array of shape (size, len(r) * dim).
    """
    if isinstance(r, int) or isinstance(r, float):
        r = [r]
    elif isinstance(r, list) or isinstance(r, tuple):
        r = r
    else:
        raise TypeError('Radius must be either a single value, a list'
                        'of values (or a tuple).')
    to_ret = []
    for r_ in r:
        to_ret.append(_ccm_uniform(size, dim=dim, r=r_, low=low, high=high,
                                   projection=projection))
    return np.concatenate(to_ret, -1)


# Normal ####################################################################### 
Example 32
Project: ccm-aae   Author: danielegrattarola   File: geometry.py    MIT License 5 votes vote down vote up
def call(self, inputs):
        output_part = []
        manifold_size = K.int_shape(inputs)[-1] // len(self.r)

        for idx, r_ in enumerate(self.r):
            start = idx * manifold_size
            stop = start + manifold_size
            part = inputs[..., start:stop]
            sign = np.sign(r_)
            if sign == 0.:
                # This is weird but necessary to make the layer differentiable
                output_pre = K.sum(inputs, -1, keepdims=True) * 0. + 1.
            else:
                free_components = part[..., :-1] ** 2
                bound_component = sign * part[..., -1:] ** 2
                all_components = K.concatenate((free_components, bound_component), -1)
                ext_product = K.sum(all_components, -1, keepdims=True)
                output_pre = K.exp(-(ext_product - sign * r_ ** 2) ** 2 / (2 * self.sigma ** 2))

            output_part.append(output_pre)

        if len(output_part) >= 2:
            if self.mode == 'average':
                output = Average()(output_part)
            elif self.mode == 'concat':
                output = Concatenate()(output_part)
            else:
                raise ValueError()  # Never gets here
        else:
            output = output_part[0]

        return output 
Example 33
Project: ndsc_code_gakko_workshop   Author: seansaito   File: localize_image.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape):
    """
    Convert final layer features to bounding box parameters.
    """
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3]  # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
                    [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
                    [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust predictions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    return box_xy, box_wh, box_confidence, box_class_probs 
Example 34
Project: ndsc_code_gakko_workshop   Author: seansaito   File: localize_image.py    MIT License 5 votes vote down vote up
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
    """
    Get corrected boxes according to the image's original shapes
    In other words, we scale the predicted bounding boxes to the input's original dimensions
    """
    box_yx = box_xy[..., ::-1]
    box_hw = box_wh[..., ::-1]
    input_shape = K.cast(input_shape, K.dtype(box_yx))
    image_shape = K.cast(image_shape, K.dtype(box_yx))
    new_shape = K.round(image_shape * K.min(input_shape / image_shape))
    offset = (input_shape - new_shape) / 2. / input_shape
    scale = input_shape / new_shape
    box_yx = (box_yx - offset) * scale
    box_hw *= scale

    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes = K.concatenate([
        box_mins[..., 0:1],  # y_min
        box_mins[..., 1:2],  # x_min
        box_maxes[..., 0:1],  # y_max
        box_maxes[..., 1:2]  # x_max
    ])

    # Scale boxes back to original image shape.
    boxes *= K.concatenate([image_shape, image_shape])
    return boxes 
Example 35
Project: keras-yolo3   Author: bing0037   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 36
Project: keras-attention-augmented-convs   Author: titu1994   File: attn_augconv.py    MIT License 5 votes vote down vote up
def rel_to_abs(self, x):
        shape = K.shape(x)
        shape = [shape[i] for i in range(3)]
        B, Nh, L, = shape
        col_pad = K.zeros(K.stack([B, Nh, L, 1]))
        x = K.concatenate([x, col_pad], axis=3)
        flat_x = K.reshape(x, [B, Nh, L * 2 * L])
        flat_pad = K.zeros(K.stack([B, Nh, L - 1]))
        flat_x_padded = K.concatenate([flat_x, flat_pad], axis=2)
        final_x = K.reshape(flat_x_padded, [B, Nh, L + 1, 2 * L - 1])
        final_x = final_x[:, :, :L, L - 1:]
        return final_x 
Example 37
Project: keras-attention-augmented-convs   Author: titu1994   File: attn_augconv.py    MIT License 5 votes vote down vote up
def augmented_conv2d(ip, filters, kernel_size=(3, 3), strides=(1, 1),
                     depth_k=0.2, depth_v=0.2, num_heads=8, relative_encodings=True):
    """
    Builds an Attention Augmented Convolution block.

    Args:
        ip: keras tensor.
        filters: number of output filters.
        kernel_size: convolution kernel size.
        strides: strides of the convolution.
        depth_k: float or int. Number of filters for k.
            Computes the number of filters for `v`.
            If passed as float, computed as `filters * depth_k`.
        depth_v: float or int. Number of filters for v.
            Computes the number of filters for `k`.
            If passed as float, computed as `filters * depth_v`.
        num_heads: int. Number of attention heads.
            Must be set such that `depth_k // num_heads` is > 0.
        relative_encodings: bool. Whether to use relative
            encodings or not.

    Returns:
        a keras tensor.
    """
    # input_shape = K.int_shape(ip)
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    depth_k, depth_v = _normalize_depth_vars(depth_k, depth_v, filters)

    conv_out = _conv_layer(filters - depth_v, kernel_size, strides)(ip)

    # Augmented Attention Block
    qkv_conv = _conv_layer(2 * depth_k + depth_v, (1, 1), strides)(ip)
    attn_out = AttentionAugmentation2D(depth_k, depth_v, num_heads, relative_encodings)(qkv_conv)
    attn_out = _conv_layer(depth_v, kernel_size=(1, 1))(attn_out)

    output = concatenate([conv_out, attn_out], axis=channel_axis)
    return output 
Example 38
Project: PiCamNN   Author: PiSimo   File: keras_yolo.py    MIT License 5 votes vote down vote up
def yolo_boxes_to_corners(box_xy, box_wh):
    """Convert YOLO box predictions to bounding box corners."""
    box_mins = box_xy - (box_wh / 2.)
    box_maxes = box_xy + (box_wh / 2.)

    return K.concatenate([
        box_mins[..., 1:2],  # y_min
        box_mins[..., 0:1],  # x_min
        box_maxes[..., 1:2],  # y_max
        box_maxes[..., 0:1]  # x_max
    ]) 
Example 39
Project: timeception   Author: noureldien   File: keras_utils.py    GNU General Public License v3.0 5 votes vote down vote up
def map_charades(y_true, y_pred):
    """
    Returns mAP
    """
    m_aps = []

    tf_one = tf.constant(1, dtype=tf.float32)

    n_classes = y_pred.shape[1]
    for oc_i in range(n_classes):
        pred_row = y_pred[:, oc_i]
        sorted_idxs = tf_framework.argsort(-pred_row)
        true_row = y_true[:, oc_i]
        true_row = tf.map_fn(lambda i: true_row[i], sorted_idxs, dtype=np.float32)
        tp_poolean = tf.equal(true_row, tf_one)
        tp = tf.cast(tp_poolean, dtype=np.float32)
        fp = K.reverse(tp, axes=0)
        n_pos = tf.reduce_sum(tp)
        f_pcs = tf.cumsum(fp)
        t_pcs = tf.cumsum(tp)
        s = f_pcs + t_pcs

        s = tf.cast(s, tf.float32)
        t_pcs = tf.cast(t_pcs, tf.float32)
        tp_float = tf.cast(tp_poolean, np.float32)

        prec = t_pcs / s
        avg_prec = prec * tp_float

        n_pos = tf.cast(n_pos, tf.float32)
        avg_prec = avg_prec / n_pos
        avg_prec = tf.expand_dims(avg_prec, axis=0)
        m_aps.append(avg_prec)

    m_aps = K.concatenate(m_aps, axis=0)
    mAP = K.mean(m_aps)
    return mAP

# endregion

# region Callbacks 
Example 40
Project: multi-object-tracking   Author: jguoaj   File: model.py    GNU General Public License v3.0 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (box_xy + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = box_wh * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))

    return box_xy, box_wh, box_confidence, box_class_probs 
Example 41
Project: AI2-Reasoning-Challenge-ARC   Author: SebiSebi   File: keras_custom_layers.py    GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs):
        if not isinstance(inputs, list):
            raise ValueError('Similarity layer expects a list '
                             'of tensors as inputs.')
        if len(inputs) != 2:
            raise ValueError('Similarity layer expects two tensors as '
                             'input, {} were given.'.format(len(inputs)))

        x = inputs[0]
        y = inputs[1]

        # Each line in X should have the form: dataX,       1s,      dataX.
        # Each line in Y should have the form: 1s,       dataY,      dataY.
        #
        #                               =>     dataX,    dataY,  dataX * dataY
        #
        x = K.concatenate([x, K.ones(K.shape(x)), x], axis=-1)
        y = K.concatenate([K.ones(K.shape(y)), y, y], axis=-1)

        # Pair each lines and take elementwise product (without summation).
        # x = K.reshape(x, (-1, x.shape[1], 1, x.shape[2]))
        # y = K.reshape(y, (-1, 1, y.shape[1], y.shape[2]))
        x = K.expand_dims(x, axis=2)
        y = K.expand_dims(y, axis=1)
        rez = x * y

        # Apply dot product with a vector.
        rez = rez * self.WS

        # return K.ones((1, 93)) * self.WS
        return K.sum(rez, axis=-1) 
Example 42
Project: AI2-Reasoning-Challenge-ARC   Author: SebiSebi   File: keras_custom_layers.py    GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs):
        if not isinstance(inputs, list):
            raise ValueError('Similarity layer expects a list '
                             'of tensors as inputs.')
        if len(inputs) != 2:
            raise ValueError('Similarity layer expects two tensors as '
                             'input, {} were given.'.format(len(inputs)))

        x = inputs[0]
        y = inputs[1]

        # Each line in X should have the form: dataX,       1s,      dataX.
        # Each line in Y should have the form: 1s,       dataY,      dataY.
        #
        #                               =>     dataX,    dataY,  dataX * dataY
        #
        x = K.concatenate([x, K.ones(K.shape(x)), x], axis=-1)
        y = K.concatenate([K.ones(K.shape(y)), y, y], axis=-1)

        # Pair each lines and take elementwise product (without summation).
        # x = K.reshape(x, (-1, x.shape[1], 1, x.shape[2]))
        # y = K.reshape(y, (-1, 1, y.shape[1], y.shape[2]))
        x = K.expand_dims(x, axis=2)
        y = K.expand_dims(y, axis=1)
        rez = x * y

        # Apply dot product with a vector.
        rez = rez * self.WS

        # return K.ones((1, 93)) * self.WS
        return K.sum(rez, axis=-1) 
Example 43
Project: AI2-Reasoning-Challenge-ARC   Author: SebiSebi   File: keras_custom_layers.py    GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs):
        if not isinstance(inputs, list):
            raise ValueError('Similarity layer expects a list '
                             'of tensors as inputs.')
        if len(inputs) != 2:
            raise ValueError('Similarity layer expects two tensors as '
                             'input, {} were given.'.format(len(inputs)))

        x = inputs[0]
        y = inputs[1]

        # Each line in X should have the form: dataX,       1s,      dataX.
        # Each line in Y should have the form: 1s,       dataY,      dataY.
        #
        #                               =>     dataX,    dataY,  dataX * dataY
        #
        x = K.concatenate([x, K.ones(K.shape(x)), x], axis=-1)
        y = K.concatenate([K.ones(K.shape(y)), y, y], axis=-1)

        # Pair each lines and take elementwise product (without summation).
        # x = K.reshape(x, (-1, x.shape[1], 1, x.shape[2]))
        # y = K.reshape(y, (-1, 1, y.shape[1], y.shape[2]))
        x = K.expand_dims(x, axis=2)
        y = K.expand_dims(y, axis=1)
        rez = x * y

        # Apply dot product with a vector.
        rez = rez * self.WS

        # return K.ones((1, 93)) * self.WS
        return K.sum(rez, axis=-1) 
Example 44
Project: AI2-Reasoning-Challenge-ARC   Author: SebiSebi   File: keras_custom_layers.py    GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs):
        if not isinstance(inputs, list):
            raise ValueError('Similarity layer expects a list '
                             'of tensors as inputs.')
        if len(inputs) != 2:
            raise ValueError('Similarity layer expects two tensors as '
                             'input, {} were given.'.format(len(inputs)))

        x = inputs[0]
        y = inputs[1]

        # Each line in X should have the form: dataX,       1s,      dataX.
        # Each line in Y should have the form: 1s,       dataY,      dataY.
        #
        #                               =>     dataX,    dataY,  dataX * dataY
        #
        x = K.concatenate([x, K.ones(K.shape(x)), x], axis=-1)
        y = K.concatenate([K.ones(K.shape(y)), y, y], axis=-1)

        # Pair each lines and take elementwise product (without summation).
        # x = K.reshape(x, (-1, x.shape[1], 1, x.shape[2]))
        # y = K.reshape(y, (-1, 1, y.shape[1], y.shape[2]))
        x = K.expand_dims(x, axis=2)
        y = K.expand_dims(y, axis=1)
        rez = x * y

        # Apply dot product with a vector.
        rez = rez * self.WS

        # return K.ones((1, 93)) * self.WS
        return K.sum(rez, axis=-1) 
Example 45
Project: solder_joint_detection   Author: lx-onism   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    #print(K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 46
Project: vision-web-service   Author: sherlockchou86   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 47
Project: Un-Fake   Author: Somil112   File: app.py    MIT License 5 votes vote down vote up
def build(self, input_shape, name='embeddings'):        
        fixed_weight = K.variable(self.fixed_weights, name=name+'_fixed')
        variable_weight = K.variable(self.variable_weights, name=name+'_var')
        
        self._trainable_weights.append(variable_weight)
        self._non_trainable_weights.append(fixed_weight)
        
        self.embeddings = K.concatenate([fixed_weight, variable_weight], axis=0)
        
        self.built = True 
Example 48
Project: Un-Fake   Author: Somil112   File: views.py    MIT License 5 votes vote down vote up
def build(self, input_shape, name='embeddings'):        
        fixed_weight = K.variable(self.fixed_weights, name=name+'_fixed')
        variable_weight = K.variable(self.variable_weights, name=name+'_var')
        
        self._trainable_weights.append(variable_weight)
        self._non_trainable_weights.append(fixed_weight)
        
        self.embeddings = K.concatenate([fixed_weight, variable_weight], axis=0)
        
        self.built = True 
Example 49
Project: keras_nade   Author: jgrnt   File: orderless_nade.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def call(self, x, **kwargs):
        from theano import tensor as T
        from theano.tensor.shared_randomstreams import RandomStreams
        if K.backend() == "theano":
            import theano
            mask_rng = RandomStreams(self.seed)

            ints = mask_rng.random_integers(size=K.expand_dims(x.shape[0], 0), high=x.shape[1] - 1)

            def set_value_at_position(i, ns_x):
                zeros = T.zeros_like(ns_x[0, :])
                return T.set_subtensor(zeros[:i], 1)

            result, updates = theano.scan(fn=set_value_at_position,
                                          outputs_info=None,
                                          sequences=ints,
                                          non_sequences=x)
            mask = mask_rng.shuffle_row_elements(result)
        elif K.backend() == "tensorflow":
            import tensorflow as tf
            tf.set_random_seed(self.seed)
            ints = tf.random_uniform(shape=K.expand_dims(tf.shape(x)[0], 0),
                                     maxval=x.shape[1],
                                     dtype=tf.int32)
            result = tf.sequence_mask(ints, maxlen=x.shape[1])
            parallel_iterations = self._deterministic and 1 or 10
            mask = tf.cast(tf.map_fn(tf.random_shuffle, result, parallel_iterations=parallel_iterations), K.floatx())
        else:
            raise NotImplementedError()
        return K.concatenate([x * mask, mask]) 
Example 50
Project: keras_nade   Author: jgrnt   File: orderless_nade.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def logdensity_model(inner_model, num_of_orderings=1):
    input_size = inner_model.input_shape[1][1]

    # This returns a tensor
    inputs = Input(shape=(input_size,))
    batch_size = K.shape(inputs)[0]

    # Collect all outputs per batch here
    outs = []

    for o in range(num_of_orderings):
        mask = np.zeros((1, input_size))
        ordering = np.random.permutation(input_size)
        for i in ordering:
            bn_mask = K.repeat(K.constant(mask), batch_size)[0]
            masked_input = Lambda(lambda x: K.concatenate([x * bn_mask, bn_mask]), output_shape=(input_size * 2,))(inputs)
            inner_result = inner_model([masked_input,
                                        inputs,
                                        Lambda(lambda x: bn_mask, name="mask_{}_{}".format(o, i))(inputs)])
            result = Lambda(lambda x: x[:, i], output_shape=(1,))(inner_result)
            outs.append(result)
            mask[0, i] = 1

    # Sum up output
    if len(outs) == 1:
        intermediate = outs[0]
    else:
        intermediate = Concatenate(axis=0)(outs)
    outputs = Lambda(lambda x: K.logsumexp(x + K.log(1.0 / num_of_orderings)), output_shape=(1,))(intermediate)

    return Model(inputs=inputs, outputs=outputs) 
Example 51
Project: dockerizeme   Author: dockerizeme   File: snippet.py    Apache License 2.0 5 votes vote down vote up
def step(self, inputs, states):
        h = states[0]
        d = states[1]
        n = states[2]
        a_max = states[3]
#         dp_mask = states[2]
#         rec_dp_mask = states[3]
        inputs_joined = K.concatenate([inputs, h], axis=-1)
        u = K.dot(inputs,self.features_kernel)
        u = K.bias_add(u, self.features_bias)
        
        g = K.dot(inputs_joined, self.recurrent_kernel)
        g = K.bias_add(g, self.recurrent_bias)
        
        a = K.dot(inputs_joined, self.average_kernel)
        
        z = u * self.recurrent_activation(g)
        
        a_newmax = K.maximum(a_max, a)
        exp_diff = K.exp(a_max - a_newmax)
        exp_scaled = K.exp(a - a_newmax)
        
        n = n*exp_diff + z*exp_scaled
        d = d*exp_diff + exp_scaled
        h_new = self.activation(n/d)
        a_max = a_newmax
        h = h_new

        return h, [h, d, n, a_max] 
Example 52
Project: cnn-rnf   Author: bloomberg   File: cnn_keras.py    Apache License 2.0 5 votes vote down vote up
def call(self, x):
        chunks = []
        for i in xrange(self.sent_len - self.filter_width + 1):
            chunk = x[:, i:i+self.filter_width, :]
            chunk = K.expand_dims(chunk, 1)
            chunks.append(chunk)
        return K.concatenate(chunks, 1) 
Example 53
Project: keras_extension   Author: k1414st   File: graph.py    MIT License 5 votes vote down vote up
def call(self, inputs):
        """
        Args:
            input[0]: input_layer(N_Batch, L_sequence, Dim_fature)
            input[1]: weighted-digraph(L, L) = (from, to)
        Return:
            output_layer(N_Batch, L_sequence, Dim_feature)
        """
        input_data = inputs[0]
        graph = inputs[1]

        xs = [None] * self.n_layers
        ss = [None] * self.n_layers

        xs[0], ss[0] = self.grnn_layer.call([input_data, graph])
        for i in range(1, self.n_layers):
            xs[i], ss[i] = \
                self.grnn_layer.call([xs[i-1], graph, ss[i-1]], encode=False)
        return xs[self.n_layers-1]

        # return values depend on options.
        if self.return_sequences:
            xs = [K.expand_dims(x, axis=self.output_sequence_axis) for x in xs]
            ss = [K.expand_dims(s, axis=self.output_sequence_axis) for s in ss]
            if self.return_state:
                return (K.concatenate(xs, axis=self.output_sequence_axis),
                        K.concatenate(xs, axis=self.output_sequence_axis))
            else:
                return K.concatenate(xs, axis=self.output_sequence_axis)
        else:
            if self.return_state:
                return xs[-1], ys[-1]
            else:
                return xs[-1] 
Example 54
Project: keras_extension   Author: k1414st   File: layer.py    MIT License 5 votes vote down vote up
def call(self, inputs):
        """
        Args:
            input[0]: input_layer(N_Batch, L_sequence, Dim_fature)
            input[1]: weighted-digraph(L, L) = (from, to)
        Return:
            output_layer(N_Batch, L_sequence, Dim_feature)
        """
        input_data = inputs[0]
        graph = inputs[1]

        xs = [None] * self.n_layers
        ss = [None] * self.n_layers

        xs[0], ss[0] = self.grnn_layer.call([input_data, graph])
        for i in range(1, self.n_layers):
            xs[i], ss[i] = \
                self.grnn_layer.call([xs[i-1], graph, ss[i-1]], encode=False)
        return xs[self.n_layers-1]

        # return values depend on options.
        if self.return_sequences:
            xs = [K.expand_dims(x, axis=self.output_sequence_axis) for x in xs]
            ss = [K.expand_dims(s, axis=self.output_sequence_axis) for s in ss]
            if self.return_state:
                return (K.concatenate(xs, axis=self.output_sequence_axis),
                        K.concatenate(xs, axis=self.output_sequence_axis))
            else:
                return K.concatenate(xs, axis=self.output_sequence_axis)
        else:
            if self.return_state:
                return xs[-1], ys[-1]
            else:
                return xs[-1] 
Example 55
Project: keras_extension   Author: k1414st   File: mac.py    MIT License 5 votes vote down vote up
def call(self, inputs, training=None):

        c_prev, extractor, cw_s = inputs[0], inputs[1], inputs[2]

        q_i = K.bias_add(K.dot(extractor, self.q_kernel),
                         self.q_bias)
        cq_i = K.bias_add(K.dot(K.concatenate([c_prev, q_i], axis=-1),
                                self.cq_kernel),
                          self.cq_bias)
        cqcw = K.expand_dims(cq_i, axis=1) * cw_s
        ca_is = K.bias_add(K.dot(cqcw, self.ca_kernel),
                           self.ca_bias)
        cv_is = self.attention_activation(ca_is, axis=-1)
        c_i = K.sum(cv_is * cw_s, axis=1)
        return c_i 
Example 56
Project: keras_extension   Author: k1414st   File: mac.py    MIT License 5 votes vote down vote up
def call(self, inputs, training=None):

        extractor, cw_s, knowledge = \
            inputs[0], inputs[1], inputs[2]

        c_0 = K.zeros_like(cw_s[:, 0, :])
        m_0 = K.zeros_like(cw_s[:, 0, :])

        # first
        c, m = self.mac_cell.call(
            [c_0, m_0, extractor, cw_s, knowledge, m_0])

        c_seq = K.expand_dims(c, axis=1)
        m_seq = K.expand_dims(m, axis=1)

        # second ~
        for i in range(1, self.recurrent_length):
            # self-attention
            cc = c_seq * K.expand_dims(c, axis=1)
            sv = K.bias_add(K.dot(cc, self.sv_kernel),
                            self.sv_bias)
            sa = self.attention_activation(sv, axis=1)
            m_sa = K.sum(sa * m_seq, axis=1)  # self-attentioned m_1~i

            # MAC cell (main flow)
            c, m = self.mac_cell.call(
                [c, m, extractor, cw_s, knowledge, m_sa])

            # make list of control & memory (for next self-attention)
            c_seq = K.concatenate([c_seq, K.expand_dims(c, axis=1)], axis=1)
            m_seq = K.concatenate([m_seq, K.expand_dims(m, axis=1)], axis=1)

        return [c, m] 
Example 57
Project: keras_extension   Author: k1414st   File: graph.py    MIT License 5 votes vote down vote up
def call(self, inputs):
        """
        Args:
            input[0]: input_layer(N_Batch, L_sequence, Dim_fature)
            input[1]: weighted-digraph(L, L) = (from, to)
        Return:
            output_layer(N_Batch, L_sequence, Dim_feature)
        """
        input_data = inputs[0]
        graph = inputs[1]

        xs = [None] * self.n_layers
        ss = [None] * self.n_layers

        xs[0], ss[0] = self.grnn_layer.call([input_data, graph])
        for i in range(1, self.n_layers):
            xs[i], ss[i] = \
                self.grnn_layer.call([xs[i-1], graph, ss[i-1]], encode=False)
        return xs[self.n_layers-1]

        # return values depend on options.
        if self.return_sequences:
            xs = [K.expand_dims(x, axis=self.output_sequence_axis) for x in xs]
            ss = [K.expand_dims(s, axis=self.output_sequence_axis) for s in ss]
            if self.return_state:
                return (K.concatenate(xs, axis=self.output_sequence_axis),
                        K.concatenate(xs, axis=self.output_sequence_axis))
            else:
                return K.concatenate(xs, axis=self.output_sequence_axis)
        else:
            if self.return_state:
                return xs[-1], ys[-1]
            else:
                return xs[-1] 
Example 58
Project: keras_extension   Author: k1414st   File: merge.py    MIT License 5 votes vote down vote up
def compute_mask(self, inputs, mask=None):
        if mask is None:
            return None
        if not isinstance(mask, list):
            raise ValueError('`mask` should be a list.')
        if not isinstance(inputs, list):
            raise ValueError('`inputs` should be a list.')
        if len(mask) != len(inputs):
            raise ValueError('The lists `inputs` and `mask` '
                             'should have the same length.')
        if all([m is None for m in mask]):
            return None
        masks = [K.expand_dims(m, 0) for m in mask if m is not None]
        return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False) 
Example 59
Project: keras_extension   Author: k1414st   File: mac.py    MIT License 5 votes vote down vote up
def call(self, inputs, training=None):

        c_prev, extractor, cw_s = inputs[0], inputs[1], inputs[2]

        q_i = K.bias_add(K.dot(extractor, self.q_kernel),
                         self.q_bias)
        cq_i = K.bias_add(K.dot(K.concatenate([c_prev, q_i], axis=-1),
                                self.cq_kernel),
                          self.cq_bias)
        cqcw = K.expand_dims(cq_i, axis=1) * cw_s
        ca_is = K.bias_add(K.dot(cqcw, self.ca_kernel),
                           self.ca_bias)
        cv_is = self.attention_activation(ca_is, axis=-1)
        c_i = K.sum(cv_is * cw_s, axis=1)
        return c_i 
Example 60
Project: cdt-ccm-aae   Author: danielegrattarola   File: layers.py    MIT License 5 votes vote down vote up
def call(self, inputs):
        output_part = []
        manifold_size = K.int_shape(inputs)[-1] // len(self.r)

        for idx, r_ in enumerate(self.r):
            start = idx * manifold_size
            stop = start + manifold_size
            part = inputs[..., start:stop]
            sign = np.sign(r_)
            if sign == 0.:
                # This is weird but necessary to make the layer differentiable
                output_pre = K.sum(inputs, -1, keepdims=True) * 0. + 1.
            else:
                free_components = part[..., :-1] ** 2
                bound_component = sign * part[..., -1:] ** 2
                all_components = K.concatenate((free_components, bound_component), -1)
                ext_product = K.sum(all_components, -1, keepdims=True)
                output_pre = K.exp(-(ext_product - sign * r_ ** 2) ** 2 / (2 * self.sigma ** 2))

            output_part.append(output_pre)

        if len(output_part) >= 2:
            if self.mode == 'average':
                output = Average()(output_part)
            elif self.mode == 'concat':
                output = Concatenate()(output_part)
            else:
                raise ValueError()  # Never gets here
        else:
            output = output_part[0]

        return output 
Example 61
Project: object-detection   Author: kaka-lin   File: model.py    MIT License 5 votes vote down vote up
def yolo_boxes_to_corners(box_xy, box_wh):
    """Convert YOLO box predictions to bounding box corners."""
    box_mins = box_xy - (box_wh / 2.)
    box_maxes = box_xy + (box_wh / 2.)

    return K.concatenate([
        box_mins[..., 1:2],  # y_min
        box_mins[..., 0:1],  # x_min
        box_maxes[..., 1:2],  # y_max
        box_maxes[..., 0:1]  # x_max
    ]) 
Example 62
Project: object-detection   Author: kaka-lin   File: model.py    MIT License 5 votes vote down vote up
def yolo_eval(
        yolo_outputs, 
        anchors, 
        num_classes,
        image_shape=(720., 1280.), 
        max_boxes=10, 
        score_threshold=.6, 
        iou_threshold=.5):
    #  Get three scales outputs of the YOLO model
    for i in range(0,3):
        _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[i], anchors[6-3*i:9-3*i], num_classes, i)
        if i==0:
            boxes, box_scores= _boxes, _box_scores
        else:
            boxes = K.concatenate([boxes,_boxes], axis=0)
            box_scores = K.concatenate([box_scores,_box_scores], axis=0)
    
    # Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)
    scores, boxes, classes = yolo_filter_boxes(boxes, box_scores, score_threshold)

    # Scale boxes back to original image shape.
    boxes = scale_boxes(boxes, image_shape)

    # Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line)
    scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes, iou_threshold)

    return scores, boxes, classes 
Example 63
Project: object-detection   Author: kaka-lin   File: keras_yolo.py    MIT License 5 votes vote down vote up
def yolo_boxes_to_corners(box_xy, box_wh):
    """Convert YOLO box predictions to bounding box corners."""
    box_mins = box_xy - (box_wh / 2.)
    box_maxes = box_xy + (box_wh / 2.)

    return K.concatenate([
        box_mins[..., 1:2],  # y_min
        box_mins[..., 0:1],  # x_min
        box_maxes[..., 1:2],  # y_max
        box_maxes[..., 0:1]  # x_max
    ]) 
Example 64
Project: Logo-Retrieval-in-Commercial-Plaza   Author: zhang-rongchen   File: model_Mobilenet.py    MIT License 4 votes vote down vote up
def yolo_eval(yolo_outputs,
              anchors,
              num_classes,
              image_shape,
              max_boxes=20,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLO model on given input and return filtered boxes."""

    num_layers = len(yolo_outputs)

    anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting
    input_shape = K.shape(yolo_outputs[0])[1:3] * 32

    # print("yolo_outputs",yolo_outputs)
    boxes = []
    box_scores = []
    for l in range(num_layers):
        _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
            anchors[anchor_mask[l]], num_classes, input_shape, image_shape)
        boxes.append(_boxes)
        box_scores.append(_box_scores)
    boxes = K.concatenate(boxes, axis=0)
    box_scores = K.concatenate(box_scores, axis=0)

    mask = box_scores >= score_threshold
    max_boxes_tensor = K.constant(max_boxes, dtype='int32')
    boxes_ = []
    scores_ = []
    classes_ = []
    for c in range(num_classes):
        # TODO: use keras backend instead of tf.
        class_boxes = tf.boolean_mask(boxes, mask[:, c])
        class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
        nms_index = tf.image.non_max_suppression(
            class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)
        class_boxes = K.gather(class_boxes, nms_index)
        class_box_scores = K.gather(class_box_scores, nms_index)
        classes = K.ones_like(class_box_scores, 'int32') * c
        boxes_.append(class_boxes)
        scores_.append(class_box_scores)
        classes_.append(classes)
    boxes_ = K.concatenate(boxes_, axis=0)
    scores_ = K.concatenate(scores_, axis=0)
    classes_ = K.concatenate(classes_, axis=0)

    return boxes_, scores_, classes_ 
Example 65
Project: deep-models   Author: LaurentMazare   File: rhn.py    Apache License 2.0 4 votes vote down vote up
def build(self, input_shape):
    self.input_spec = [InputSpec(shape=input_shape)]
    self.input_dim = input_shape[2]

    if self.stateful:
      self.reset_states()
    else:
      # initial states: all-zero tensor of shape (output_dim)
      self.states = [None]

    self.W_t = self.init((self.input_dim, self.output_dim),
                         name='{}_W_t'.format(self.name))
    self.b_t = K.zeros((self.output_dim,), name='{}_b_t'.format(self.name))
    self.W_h = self.init((self.input_dim, self.output_dim),
                         name='{}_W_h'.format(self.name))
    self.b_h = K.zeros((self.output_dim,), name='{}_b_h'.format(self.name))

    self.U_ts, self.b_ts = [], []
    self.U_hs, self.b_hs = [], []
    for l in xrange(self.L):
      self.U_ts.append(self.inner_init((self.output_dim, self.output_dim), name='{}_U_t{}'.format(self.name, l)))
      self.b_ts.append(K.zeros((self.output_dim,), name='{}_b_t{}'.format(self.name, l)))
      self.U_hs.append(self.inner_init((self.output_dim, self.output_dim), name='{}_U_h{}'.format(self.name, l)))
      self.b_hs.append(K.zeros((self.output_dim,), name='{}_b_h{}'.format(self.name, l)))

    self.trainable_weights = [ self.W_t, self.b_t, self.W_h, self.b_h] + self.U_ts + self.U_hs + self.b_ts + self.b_hs

    self.W = K.concatenate([self.W_t, self.W_h])
    self.U = K.concatenate(self.U_ts + self.U_hs)
    self.b = K.concatenate([self.b_t, self.b_h] + self.b_ts + self.b_hs)

    self.regularizers = []
    if self.W_regularizer:
      self.W_regularizer.set_param(self.W)
      self.regularizers.append(self.W_regularizer)
    if self.U_regularizer:
      self.U_regularizer.set_param(self.U)
      self.regularizers.append(self.U_regularizer)
    if self.b_regularizer:
      self.b_regularizer.set_param(self.b)
      self.regularizers.append(self.b_regularizer)

    if self.initial_weights is not None:
      self.set_weights(self.initial_weights)
      del self.initial_weights 
Example 66
Project: keras-minimal-rnn   Author: titu1994   File: minimal_rnn.py    MIT License 4 votes vote down vote up
def build(self, input_shape):
        self.timestep_dim = 1 #input_shape[0]
        self.input_dim = input_shape[1]

        self.kernel = self.add_weight(shape=(self.input_dim, self.units),
                                      name='kernel',
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        self.recurrent_kernel = self.add_weight(
            shape=(self.units, self.units * 2),
            name='recurrent_kernel',
            initializer=self.recurrent_initializer,
            regularizer=self.recurrent_regularizer,
            constraint=self.recurrent_constraint)

        if self.use_bias:
            if self.unit_forget_bias:
                def bias_initializer(shape, *args, **kwargs):
                    return K.concatenate([
                        self.bias_initializer((self.units,), *args, **kwargs),
                        initializers.Ones()((self.units,), *args, **kwargs),
                    ])
            else:
                bias_initializer = self.bias_initializer
            self.bias = self.add_weight(shape=(self.units * 2,),
                                        name='bias',
                                        initializer=bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)

        else:
            self.bias = None
            self.attention_bias = None
            self.attention_recurrent_bias = None

        self.recurrent_kernel_u = self.recurrent_kernel[:, :self.units]
        self.recurrent_kernel_z = self.recurrent_kernel[:, self.units: self.units * 2]

        if self.use_bias:
            self.bias_z = self.bias[:self.units]
            self.bias_u = self.bias[self.units: self.units * 2]
        else:
            self.bias_z = None
            self.bias_u = None
        self.built = True 
Example 67
Project: ccm-aae   Author: danielegrattarola   File: geometry.py    MIT License 4 votes vote down vote up
def ccm_normal(size, dim=3, r=0., tangent_point=None, loc=0., scale=1.):
    """
    Samples points from a Gaussian distribution on a constant-curvature manifold.
    If `r=0`, then points are sampled from a Gaussian distribution in the
    ambient space.
    If a list of radii is passed instead of a single scalar, then the sampling
    is repeated for each value in the list and the results are concatenated
    along the last axis (e.g., see [Grattarola et al. (2018)](https://arxiv.org/abs/1805.06299)).
    :param size: number of points to sample;
    :param tangent_point: np.array, origin of the tangent plane on the CCM
    (extrinsic coordinates); if 'None', defaults to `[0., ..., 0., r]`.
    :param r: floats or list of floats, radii of the CCMs;
    :param dim: dimension of the ambient space;
    :param loc: mean of the Gaussian on the tangent plane;
    :param scale: standard deviation of the Gaussian on the tangent plane;
    :return: if `r` is a scalar, np.array of shape (size, dim). If `r` is a
    list, np.array of shape (size, len(r) * dim).
    """
    if isinstance(r, int) or isinstance(r, float):
        r = [r]
    elif isinstance(r, list) or isinstance(r, tuple):
        r = r
    else:
        raise TypeError('Radius must be either a single value, a list'
                        'of values (or a tuple).')

    if tangent_point is None:
        tangent_point = [None] * len(r)
    elif isinstance(tangent_point, np.ndarray):
        tangent_point = [tangent_point]
    elif isinstance(tangent_point, list) or isinstance(tangent_point, tuple):
        pass
    else:
        raise TypeError('tangent_point must be either a single point or a'
                        'list of points.')

    if len(r) != len(tangent_point):
        raise ValueError('r and tangent_point must have the same length')

    to_ret = []
    for r_, tp_ in zip(r, tangent_point):
        to_ret.append(_ccm_normal(size, dim=dim, r=r_, tangent_point=tp_,
                                  loc=loc, scale=scale))
    return np.concatenate(to_ret, -1)


# Generic ###################################################################### 
Example 68
Project: ndsc_code_gakko_workshop   Author: seansaito   File: localize_image.py    MIT License 4 votes vote down vote up
def yolo_predict(yolo_outputs,
                 anchors,
                 num_classes,
                 image_shape,
                 max_boxes=20,
                 score_threshold=.6,
                 iou_threshold=.5):
    """
    Get prediction from YOLO model on given input and return filtered boxes.
    """
    num_layers = len(yolo_outputs)
    anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]

    input_shape = K.shape(yolo_outputs[0])[1:3] * 32
    boxes = []
    box_scores = []
    for l in range(num_layers):
        # Get the boxes and the confidences for each box
        box_xy, box_wh, box_confidence, box_class_probs = yolo_head(yolo_outputs[l],
                                                                    anchors[anchor_mask[l]],
                                                                    num_classes,
                                                                    input_shape)
        _boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)
        _boxes = K.reshape(_boxes, [-1, 4])
        _box_scores = box_confidence * box_class_probs
        _box_scores = K.reshape(_box_scores, [-1, num_classes])

        boxes.append(_boxes)
        box_scores.append(_box_scores)

    boxes = K.concatenate(boxes, axis=0)
    box_scores = K.concatenate(box_scores, axis=0)

    mask = box_scores >= score_threshold
    max_boxes_tensor = K.constant(max_boxes, dtype='int32')
    boxes_ = []
    scores_ = []
    classes_ = []
    for c in range(num_classes):
        class_boxes = tf.boolean_mask(boxes, mask[:, c])
        class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
        # TensorFlow function for non max suppression of detection candidates
        # This is for filtering overlapping bounding boxes
        nms_index = tf.image.non_max_suppression(
            class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)
        class_boxes = K.gather(class_boxes, nms_index)
        class_box_scores = K.gather(class_box_scores, nms_index)
        classes = K.ones_like(class_box_scores, 'int32') * c
        boxes_.append(class_boxes)
        scores_.append(class_box_scores)
        classes_.append(classes)
    boxes_ = K.concatenate(boxes_, axis=0)
    scores_ = K.concatenate(scores_, axis=0)
    classes_ = K.concatenate(classes_, axis=0)

    return boxes_, scores_, classes_ 
Example 69
Project: keras-yolo3   Author: bing0037   File: model.py    MIT License 4 votes vote down vote up
def yolo_eval(yolo_outputs,
              anchors,
              num_classes,
              image_shape,
              max_boxes=20,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLO model on given input and return filtered boxes."""
    num_layers = len(yolo_outputs)
    anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting
    input_shape = K.shape(yolo_outputs[0])[1:3] * 32
    boxes = []
    box_scores = []
    for l in range(num_layers):
        _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
            anchors[anchor_mask[l]], num_classes, input_shape, image_shape)
        boxes.append(_boxes)
        box_scores.append(_box_scores)
    boxes = K.concatenate(boxes, axis=0)
    box_scores = K.concatenate(box_scores, axis=0)

    mask = box_scores >= score_threshold
    max_boxes_tensor = K.constant(max_boxes, dtype='int32')
    boxes_ = []
    scores_ = []
    classes_ = []
    for c in range(num_classes):
        # TODO: use keras backend instead of tf.
        class_boxes = tf.boolean_mask(boxes, mask[:, c])
        class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
        nms_index = tf.image.non_max_suppression(
            class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)
        class_boxes = K.gather(class_boxes, nms_index)
        class_box_scores = K.gather(class_box_scores, nms_index)
        classes = K.ones_like(class_box_scores, 'int32') * c
        boxes_.append(class_boxes)
        scores_.append(class_box_scores)
        classes_.append(classes)
    boxes_ = K.concatenate(boxes_, axis=0)
    scores_ = K.concatenate(scores_, axis=0)
    classes_ = K.concatenate(classes_, axis=0)

    return boxes_, scores_, classes_ 
Example 70
Project: multi-object-tracking   Author: jguoaj   File: model.py    GNU General Public License v3.0 4 votes vote down vote up
def yolo_eval(yolo_outputs,
              anchors,
              num_classes,
              image_shape,
              max_boxes=20,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLO model on given input and return filtered boxes."""
    anchor_mask = [[6,7,8], [3,4,5], [0,1,2]]
    input_shape = K.shape(yolo_outputs[0])[1:3] * 32
    boxes = []
    box_scores = []
    for l in range(3):
        _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
            anchors[anchor_mask[l]], num_classes, input_shape, image_shape)
        boxes.append(_boxes)
        box_scores.append(_box_scores)
    boxes = K.concatenate(boxes, axis=0)
    box_scores = K.concatenate(box_scores, axis=0)

    mask = box_scores >= score_threshold
    max_boxes_tensor = K.constant(max_boxes, dtype='int32')
    boxes_ = []
    scores_ = []
    classes_ = []
    for c in range(num_classes):
        # TODO: use keras backend instead of tf.
        class_boxes = tf.boolean_mask(boxes, mask[:, c])
        class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
        nms_index = tf.image.non_max_suppression(
            class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)
        class_boxes = K.gather(class_boxes, nms_index)
        class_box_scores = K.gather(class_box_scores, nms_index)
        classes = K.ones_like(class_box_scores, 'int32') * c
        boxes_.append(class_boxes)
        scores_.append(class_box_scores)
        classes_.append(classes)
    boxes_ = K.concatenate(boxes_, axis=0)
    scores_ = K.concatenate(scores_, axis=0)
    classes_ = K.concatenate(classes_, axis=0)

    return boxes_, scores_, classes_ 
Example 71
Project: multi-object-tracking   Author: jguoaj   File: model.py    GNU General Public License v3.0 4 votes vote down vote up
def yolo_loss(args, anchors, num_classes, ignore_thresh=.5):
    '''Return yolo_loss tensor

    Parameters
    ----------
    yolo_outputs: list of tensor, the output of yolo_body
    y_true: list of array, the output of preprocess_true_boxes
    anchors: array, shape=(T, 2), wh
    num_classes: integer
    ignore_thresh: float, the iou threshold whether to ignore object confidence loss

    Returns
    -------
    loss: tensor, shape=(1,)

    '''
    yolo_outputs = args[:3]
    y_true = args[3:]
    anchor_mask = [[6,7,8], [3,4,5], [0,1,2]]
    input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))
    grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(3)]
    loss = 0
    m = K.shape(yolo_outputs[0])[0]

    for l in range(3):
        object_mask = y_true[l][..., 4:5]
        true_class_probs = y_true[l][..., 5:]

        pred_xy, pred_wh, pred_confidence, pred_class_probs = yolo_head(yolo_outputs[l],
             anchors[anchor_mask[l]], num_classes, input_shape)
        pred_box = K.concatenate([pred_xy, pred_wh])

        # Darknet box loss.
        xy_delta = (y_true[l][..., :2]-pred_xy)*grid_shapes[l][::-1]
        wh_delta = K.log(y_true[l][..., 2:4]) - K.log(pred_wh)
        # Avoid log(0)=-inf.
        wh_delta = K.switch(object_mask, wh_delta, K.zeros_like(wh_delta))
        box_delta = K.concatenate([xy_delta, wh_delta], axis=-1)
        box_delta_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]

        # Find ignore mask, iterate over each of batch.
        ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
        object_mask_bool = K.cast(object_mask, 'bool')
        def loop_body(b, ignore_mask):
            true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])
            iou = box_iou(pred_box[b], true_box)
            best_iou = K.max(iou, axis=-1)
            ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))
            return b+1, ignore_mask
        _, ignore_mask = K.control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])
        ignore_mask = ignore_mask.stack()
        ignore_mask = K.expand_dims(ignore_mask, -1)

        box_loss = object_mask * K.square(box_delta*box_delta_scale)
        confidence_loss = object_mask * K.square(1-pred_confidence) + \
            (1-object_mask) * K.square(0-pred_confidence) * ignore_mask
        class_loss = object_mask * K.square(true_class_probs-pred_class_probs)
        loss += K.sum(box_loss) + K.sum(confidence_loss) + K.sum(class_loss)
    return loss / K.cast(m, K.dtype(loss)) 
Example 72
Project: solder_joint_detection   Author: lx-onism   File: model.py    MIT License 4 votes vote down vote up
def yolo_eval(yolo_outputs,
              anchors,
              num_classes,
              image_shape,
              max_boxes=20,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLO model on given input and return filtered boxes."""
    num_layers = len(yolo_outputs)
    anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting
    input_shape = K.shape(yolo_outputs[0])[1:3] * 32
    boxes = []
    box_scores = []
    for l in range(num_layers):
        _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
            anchors[anchor_mask[l]], num_classes, input_shape, image_shape)
        boxes.append(_boxes)
        box_scores.append(_box_scores)
    boxes = K.concatenate(boxes, axis=0)
    box_scores = K.concatenate(box_scores, axis=0)

    mask = box_scores >= score_threshold
    max_boxes_tensor = K.constant(max_boxes, dtype='int32')
    boxes_ = []
    scores_ = []
    classes_ = []
    for c in range(num_classes):
        # TODO: use keras backend instead of tf.
        class_boxes = tf.boolean_mask(boxes, mask[:, c])
        class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
        nms_index = tf.image.non_max_suppression(
            class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)
        class_boxes = K.gather(class_boxes, nms_index)
        class_box_scores = K.gather(class_box_scores, nms_index)
        classes = K.ones_like(class_box_scores, 'int32') * c
        boxes_.append(class_boxes)
        scores_.append(class_box_scores)
        classes_.append(classes)
    boxes_ = K.concatenate(boxes_, axis=0)
    scores_ = K.concatenate(scores_, axis=0)
    classes_ = K.concatenate(classes_, axis=0)

    return boxes_, scores_, classes_ 
Example 73
Project: vision-web-service   Author: sherlockchou86   File: model.py    MIT License 4 votes vote down vote up
def yolo_eval(yolo_outputs,
              anchors,
              num_classes,
              image_shape,
              max_boxes=20,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLO model on given input and return filtered boxes."""
    num_layers = len(yolo_outputs)
    anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting
    input_shape = K.shape(yolo_outputs[0])[1:3] * 32
    boxes = []
    box_scores = []
    for l in range(num_layers):
        _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
            anchors[anchor_mask[l]], num_classes, input_shape, image_shape)
        boxes.append(_boxes)
        box_scores.append(_box_scores)
    boxes = K.concatenate(boxes, axis=0)
    box_scores = K.concatenate(box_scores, axis=0)

    mask = box_scores >= score_threshold
    max_boxes_tensor = K.constant(max_boxes, dtype='int32')
    boxes_ = []
    scores_ = []
    classes_ = []
    for c in range(num_classes):
        # TODO: use keras backend instead of tf.
        class_boxes = tf.boolean_mask(boxes, mask[:, c])
        class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
        nms_index = tf.image.non_max_suppression(
            class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)
        class_boxes = K.gather(class_boxes, nms_index)
        class_box_scores = K.gather(class_box_scores, nms_index)
        classes = K.ones_like(class_box_scores, 'int32') * c
        boxes_.append(class_boxes)
        scores_.append(class_box_scores)
        classes_.append(classes)
    boxes_ = K.concatenate(boxes_, axis=0)
    scores_ = K.concatenate(scores_, axis=0)
    classes_ = K.concatenate(classes_, axis=0)

    return boxes_, scores_, classes_ 
Example 74
Project: KDDCup2019_admin   Author: DominickZhang   File: fm_keras.py    MIT License 4 votes vote down vote up
def call(self, inputs, **kwargs):
        one_hot_feature_index = K.cast(K.slice(inputs, (0, 0), (-1, self.feature_num)), "int32")
        numeric_feature = K.slice(inputs, (0, self.feature_num), (-1, -1))

        ## first order
        first_order_index = K.reshape(one_hot_feature_index, (-1,))
        get_first_order_weights = K.gather(self.w_one_hot, first_order_index)
        first_order_weights = K.reshape(get_first_order_weights, (-1, self.feature_num))

        first_order = K.sum(first_order_weights, 1) + K.sum(K.dot(numeric_feature, self.w_numeric), 1)

        ## second order
        get_second_order_weights = K.gather(self.v_one_hot, first_order_index)
        second_order_weights = K.reshape(get_second_order_weights, (-1, self.feature_num, self.embedding_size))
        numeric_weights = K.expand_dims(self.v_numeric, 0) * K.expand_dims(numeric_feature, -1)

        all_weights = K.concatenate([second_order_weights, numeric_weights], axis=1)
        weights_sum_square = K.sum(K.square(all_weights), 1)
        weights_square_sum = K.square(K.sum(all_weights, 1))
        second_order = 0.5*K.sum(weights_square_sum - weights_sum_square, 1)

        output = first_order + second_order + self.b

        if self.activation is not None:
        	output = self.activation(output)
        output = K.expand_dims(output, -1)
        return output



        '''X_square = K.square(inputs)

        xv = K.square(K.dot(inputs, self.v))
        xw = K.dot(inputs, self.w)

        p = 0.5 * K.sum(xv - K.dot(X_square, K.square(self.v)), 1)
        rp = K.repeat_elements(K.reshape(p, (-1, 1)), self.output_dim, axis=-1)

        f = xw + rp + self.b

        output = K.reshape(f, (-1, self.output_dim))
        
        if self.activation is not None:
            output = self.activation(output)

        return output''' 
Example 75
Project: spektral   Author: danielegrattarola   File: convolutional.py    MIT License 4 votes vote down vote up
def call(self, inputs):
        X = inputs[0]
        A = inputs[1]

        outputs = []
        output_attn = []
        for head in range(self.attn_heads):
            kernel = self.kernels[head]
            attention_kernel = self.attn_kernels[head]  # Attention kernel a in the paper (2F' x 1)

            # Compute inputs to attention network
            features = K.dot(X, kernel)

            # Compue attention coefficients
            # [[a_1], [a_2]]^T [[Wh_i], [Wh_2]] = [a_1]^T [Wh_i] + [a_2]^T [Wh_j]
            attn_for_self = K.dot(features, attention_kernel[0])    # [a_1]^T [Wh_i]
            attn_for_neighs = K.dot(features, attention_kernel[1])  # [a_2]^T [Wh_j]
            if len(K.int_shape(features)) == 2:
                # Single / mixed mode
                attn_for_neighs_T = K.transpose(attn_for_neighs)
            else:
                # Batch mode
                attn_for_neighs_T = K.permute_dimensions(attn_for_neighs, (0, 2, 1))
            attn_coef = attn_for_self + attn_for_neighs_T
            attn_coef = LeakyReLU(alpha=0.2)(attn_coef)

            # Mask values before activation (Vaswani et al., 2017)
            mask = -10e9 * (1.0 - A)
            attn_coef += mask

            # Apply softmax to get attention coefficients
            attn_coef = K.softmax(attn_coef)
            output_attn.append(attn_coef)

            # Apply dropout to attention coefficients
            attn_coef_drop = Dropout(self.dropout_rate)(attn_coef)

            # Convolution
            features = filter_dot(attn_coef_drop, features)
            if self.use_bias:
                features = K.bias_add(features, self.biases[head])

            # Add output of attention head to final output
            outputs.append(features)

        # Aggregate the heads' output according to the reduction method
        if self.concat_heads:
            output = K.concatenate(outputs)
        else:
            output = K.mean(K.stack(outputs), axis=0)

        output = self.activation(output)

        if self.return_attn_coef:
            return output, output_attn
        else:
            return output 
Example 76
Project: keras_extension   Author: k1414st   File: graph.py    MIT License 4 votes vote down vote up
def __graph_attention(self, g, x, w, a, n_heads=1):
        """
        using graph attention mechanism.

        Args:
            g: input Tensor of graph adjacency matrix.
               shape: (B(Batch_size), N(N_nodes), N)
            x: input Tensor of node-data after convolutioned.
               shape: (B, N, F_in(F_inputs))
            w: weight matrix variable
               (to transform input to attentionable hidden states.)
               shape: (F_in, F(F_outputs) * H(N_heads))
            a: merge weight vector from attentionable state to attention value.
               shape: (2 * F,)
        """
        H = n_heads
        F_in, FH = w.shape[0], w.shape[1]
        F = FH // H
        N = g.shape[-1]

        # w = K.reshape(F1, H * F2)  # (F_in, H*F)
        x = K.expand_dims(K.dot(x, w), axis=2)  # (B, N, 1, H*F)
        x = K.concatenate([x[:, :, :, F*i:F*(i+1)]
                           for i in range(H)], axis=2)  # (B, N, H, F)

        # concat meshly
        _x1 = K.tile(K.expand_dims(x, axis=1), (1, N, 1, 1, 1))
        _x2 = K.tile(K.expand_dims(x, axis=2), (1, 1, N, 1, 1))
        x = K.concatenate([_x1, _x2], axis=4)  # (B, N, N, H, 2F)

        def _expand_dims_recursive(x, axis_list):
            assert(len(axis_list) > 0)
            if len(axis_list) == 1:
                return K.expand_dims(x, axis_list[0])
            return _expand_dims_recursive(K.expand_dims(x, axis_list[0]),
                                          axis_list=axis_list[1:])
        # squeeze 2F
        a = _expand_dims_recursive(a, (0, 0, 0, 0))
        x = K.exp(K.relu(K.sum(x * a, axis=-1), alpha=0.2))  # (B, N, N, H)

        # normalize by neighbors
        x_norm = K.sum(x * K.expand_dims(g, axis=-1),
                       axis=2, keepdims=True)  # (B, N, 1, H)
        return x / x_norm  # (B, N, N, H) 
Example 77
Project: keras_extension   Author: k1414st   File: layer.py    MIT License 4 votes vote down vote up
def _graph_attention(self, g, x, w, a, n_heads=1):
        """
        using graph attention mechanism.

        Args:
            g: input Tensor of graph adjacency matrix.
               shape: (B(Batch_size), N(N_nodes), N)
            x: input Tensor of node-data after convolutioned.
               shape: (B, N, F_in(F_inputs))
            w: weight matrix variable
               (to transform input to attentionable hidden states.)
               shape: (F_in, F(F_outputs) * H(N_heads))
            a: merge weight vector from attentionable state to attention value.
               shape: (2 * F,)
        """
        H = n_heads
        F_in, FH = w.shape[0], w.shape[1]
        F = FH // H
        N = g.shape[-1]

        # w = K.reshape(F1, H * F2)  # (F_in, H*F)
        x = K.expand_dims(K.dot(x, w), axis=2)  # (B, N, 1, H*F)
        x = K.concatenate([x[:, :, :, F*i:F*(i+1)]
                           for i in range(H)], axis=2)  # (B, N, H, F)

        # concat meshly
        _x1 = K.tile(K.expand_dims(x, axis=1), (1, N, 1, 1, 1))
        _x2 = K.tile(K.expand_dims(x, axis=2), (1, 1, N, 1, 1))
        x = K.concatenate([_x1, _x2], axis=4)  # (B, N, N, H, 2F)

        def _expand_dims_recursive(x, axis_list):
            assert(len(axis_list) > 0)
            if len(axis_list) == 1:
                return K.expand_dims(x, axis_list[0])
            return _expand_dims_recursive(K.expand_dims(x, axis_list[0]),
                                          axis_list=axis_list[1:])
        # squeeze 2F
        a = _expand_dims_recursive(a, (0, 0, 0, 0))
        x = K.exp(K.relu(K.sum(x * a, axis=-1), alpha=0.2))  # (B, N, N, H)

        # normalize by neighbors
        x_norm = K.sum(x * K.expand_dims(g, axis=-1),
                       axis=2, keepdims=True)  # (B, N, 1, H)
        return x / x_norm  # (B, N, N, H) 
Example 78
Project: keras_extension   Author: k1414st   File: graph.py    MIT License 4 votes vote down vote up
def __graph_attention(self, g, x, w, a, n_heads=1):
        """
        using graph attention mechanism.

        Args:
            g: input Tensor of graph adjacency matrix.
               shape: (B(Batch_size), N(N_nodes), N)
            x: input Tensor of node-data after convolutioned.
               shape: (B, N, F_in(F_inputs))
            w: weight matrix variable
               (to transform input to attentionable hidden states.)
               shape: (F_in, F(F_outputs) * H(N_heads))
            a: merge weight vector from attentionable state to attention value.
               shape: (2 * F,)
        """
        H = n_heads
        F_in, FH = w.shape[0], w.shape[1]
        F = FH // H
        N = g.shape[-1]

        # w = K.reshape(F1, H * F2)  # (F_in, H*F)
        x = K.expand_dims(K.dot(x, w), axis=2)  # (B, N, 1, H*F)
        x = K.concatenate([x[:, :, :, F*i:F*(i+1)]
                           for i in range(H)], axis=2)  # (B, N, H, F)

        # concat meshly
        _x1 = K.tile(K.expand_dims(x, axis=1), (1, N, 1, 1, 1))
        _x2 = K.tile(K.expand_dims(x, axis=2), (1, 1, N, 1, 1))
        x = K.concatenate([_x1, _x2], axis=4)  # (B, N, N, H, 2F)

        def _expand_dims_recursive(x, axis_list):
            assert(len(axis_list) > 0)
            if len(axis_list) == 1:
                return K.expand_dims(x, axis_list[0])
            return _expand_dims_recursive(K.expand_dims(x, axis_list[0]),
                                          axis_list=axis_list[1:])
        # squeeze 2F
        a = _expand_dims_recursive(a, (0, 0, 0, 0))
        x = K.exp(K.relu(K.sum(x * a, axis=-1), alpha=0.2))  # (B, N, N, H)

        # normalize by neighbors
        x_norm = K.sum(x * K.expand_dims(g, axis=-1),
                       axis=2, keepdims=True)  # (B, N, 1, H)
        return x / x_norm  # (B, N, N, H) 
Example 79
Project: keras_extension   Author: k1414st   File: layer.py    MIT License 4 votes vote down vote up
def _graph_attention(self, g, x, w, a, n_heads=1):
        """
        using graph attention mechanism.

        Args:
            g: input Tensor of graph adjacency matrix.
               shape: (B(Batch_size), N(N_nodes), N)
            x: input Tensor of node-data after convolutioned.
               shape: (B, N, F_in(F_inputs))
            w: weight matrix variable
               (to transform input to attentionable hidden states.)
               shape: (F_in, F(F_outputs) * H(N_heads))
            a: merge weight vector from attentionable state to attention value.
               shape: (2 * F,)
        """
        H = n_heads
        F_in, FH = w.shape[0], w.shape[1]
        F = FH // H
        N = g.shape[-1]

        # w = K.reshape(F1, H * F2)  # (F_in, H*F)
        x = K.expand_dims(K.dot(x, w), axis=2)  # (B, N, 1, H*F)
        x = K.concatenate([x[:, :, :, F*i:F*(i+1)]
                           for i in range(H)], axis=2)  # (B, N, H, F)

        # concat meshly
        _x1 = K.tile(K.expand_dims(x, axis=1), (1, N, 1, 1, 1))
        _x2 = K.tile(K.expand_dims(x, axis=2), (1, 1, N, 1, 1))
        x = K.concatenate([_x1, _x2], axis=4)  # (B, N, N, H, 2F)

        def _expand_dims_recursive(x, axis_list):
            assert(len(axis_list) > 0)
            if len(axis_list) == 1:
                return K.expand_dims(x, axis_list[0])
            return _expand_dims_recursive(K.expand_dims(x, axis_list[0]),
                                          axis_list=axis_list[1:])
        # squeeze 2F
        a = _expand_dims_recursive(a, (0, 0, 0, 0))
        x = K.exp(K.relu(K.sum(x * a, axis=-1), alpha=0.2))  # (B, N, N, H)

        # normalize by neighbors
        x_norm = K.sum(x * K.expand_dims(g, axis=-1),
                       axis=2, keepdims=True)  # (B, N, 1, H)
        return x / x_norm  # (B, N, N, H) 
Example 80
Project: keras_extension   Author: k1414st   File: merge.py    MIT License 4 votes vote down vote up
def call(self, inputs):
        if not isinstance(inputs, list):
            raise ValueError('A merge layer should be called '
                             'on a list of inputs.')
        if self._reshape_required:
            reshaped_inputs = []
            input_ndims = list(map(K.ndim, inputs))
            if None not in input_ndims:
                # If ranks of all inputs are available,
                # we simply expand each of them at axis=1
                # until all of them have the same rank.
                max_ndim = max(input_ndims)
                for x in inputs:
                    x_ndim = K.ndim(x)
                    for _ in range(max_ndim - x_ndim):
                        x = K.expand_dims(x, 1)
                    reshaped_inputs.append(x)
                return self._merge_function(reshaped_inputs)
            else:
                # Transpose all inputs so that batch size is the last dimension.
                # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
                transposed = False
                for x in inputs:
                    x_ndim = K.ndim(x)
                    if x_ndim is None:
                        x_shape = K.shape(x)
                        batch_size = x_shape[0]
                        new_shape = K.concatenate(
                            [x_shape[1:], K.expand_dims(batch_size)])
                        x_transposed = K.reshape(x, K.stack(
                            [batch_size, K.prod(x_shape[1:])]))
                        x_transposed = K.permute_dimensions(x_transposed, (1, 0))
                        x_transposed = K.reshape(x_transposed, new_shape)
                        reshaped_inputs.append(x_transposed)
                        transposed = True
                    elif x_ndim > 1:
                        dims = list(range(1, x_ndim)) + [0]
                        reshaped_inputs.append(K.permute_dimensions(x, dims))
                        transposed = True
                    else:
                        # We don't transpose inputs if they are 1D vectors or scalars.
                        reshaped_inputs.append(x)
                y = self._merge_function(reshaped_inputs)
                y_ndim = K.ndim(y)
                if transposed:
                    # If inputs have been transposed, we have to transpose the output too.
                    if y_ndim is None:
                        y_shape = K.shape(y)
                        y_ndim = K.shape(y_shape)[0]
                        batch_size = y_shape[y_ndim - 1]
                        new_shape = K.concatenate(
                            [K.expand_dims(batch_size), y_shape[:y_ndim - 1]])
                        y = K.reshape(y, (-1, batch_size))
                        y = K.permute_dimensions(y, (1, 0))
                        y = K.reshape(y, new_shape)
                    elif y_ndim > 1:
                        dims = [y_ndim - 1] + list(range(y_ndim - 1))
                        y = K.permute_dimensions(y, dims)
                return y
        else:
            return self._merge_function(inputs)