Python keras.backend.tile() Examples

The following are code examples for showing how to use keras.backend.tile(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: deep-models   Author: LaurentMazare   File: rhn.py    Apache License 2.0 6 votes vote down vote up
def get_constants(self, x):
    constants = []
    if 0 < self.dropout_U < 1:
      ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
      ones = K.tile(ones, (1, self.output_dim))
      B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
      constants.append(B_U)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(3)])

    if 0 < self.dropout_W < 1:
      input_shape = self.input_spec[0].shape
      input_dim = input_shape[-1]
      ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
      ones = K.tile(ones, (1, input_dim))
      B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
      constants.append(B_W)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(3)])
    return constants 
Example 2
Project: CapsNet   Author: l11x0m7   File: capsule.py    MIT License 6 votes vote down vote up
def call(self, inputs, **kwargs):
        # (batch_size, 1, input_num_capsule, input_dim_capsule)
        expand_inputs = K.expand_dims(inputs, axis=1)
        # (batch_size, num_capsule, input_num_capsule, input_dim_capsule)
        expand_inputs = K.tile(expand_inputs, (1, self.num_capsule, 1, 1))
        # (batch_size, num_capsule, input_num_capsule, dim_capsule)
        u_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, axes=[2, 3]), expand_inputs)

        if self.num_routing <= 0:
            self.num_routing = 3
        # (batch_size, num_capsule, input_num_capsule)
        b = K.zeros((K.shape(u_hat)[0], self.num_capsule, self.input_num_capsule))
        for i in xrange(self.num_routing):
            # (batch_size, num_capsule, input_num_capsule)
            c = softmax(b, axis=1)
            # (batch_size, num_capsule, dim_capsule)
            s = K.batch_dot(c, u_hat, axes=[2, 2])
            squashed_s = squash(s)
            if i < self.num_routing - 1:
                # (batch_size, num_capsule, input_num_capsule)
                b += K.batch_dot(squashed_s, u_hat, axes=[2, 3])
        return squashed_s 
Example 3
Project: dockerizeme   Author: dockerizeme   File: snippet.py    Apache License 2.0 6 votes vote down vote up
def get_initial_states(self, inputs):
        #states: h, d, n, a_max
        # build an all-zero tensor of shape (samples, output_dim)
        initial_state = K.zeros_like(inputs)  # (samples, timesteps, input_dim)
        initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
        initial_state = K.expand_dims(initial_state)  # (samples, 1)
        initial_state = K.tile(initial_state, [1, self.units])  # (samples, output_dim)
        initial_states = [initial_state for _ in range(len(self.states)-1)]
        
        initial_state = K.zeros_like(inputs)  # (samples, timesteps, input_dim)
        initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
        initial_state = K.expand_dims(initial_state)  # (samples, 1)
        initial_state = K.tile(initial_state, [1, self.units])
        dtype = initial_state.dtype.name
        min_value = np.asscalar(np.array([1E38]).astype(dtype))
        initial_state = initial_state - min_value
        initial_states.append(initial_state)
        return initial_states 
Example 4
Project: group-ksparse-temporal-cnns   Author: srph25   File: ops.py    MIT License 6 votes vote down vote up
def diff(a, n=1, axis=-1):
    if axis == -1:
        axis = K.ndim(a) - 1
    a_aug = K.tile(K.expand_dims(a, axis=1), [1, n] + [1] * (K.ndim(a) - 1))
    pattern = (axis,) + tuple(set(range(K.ndim(a))) - {axis})
    inv_pattern = tuple(range(1, axis + 1)) + (0,) + tuple(range(axis + 1, K.ndim(a)))
    def step(inputs, states):
        prev_output = states[0]
        t = states[1]
        t_int = K.cast(t[0], 'int32')
        prev_output_aug = K.permute_dimensions(prev_output, pattern)
        inputs_aug = K.permute_dimensions(inputs, pattern)
        output_aug = K.switch(K.all(t_int > 0),
                              K.concatenate([inputs_aug[:t_int], prev_output_aug[1:] - prev_output_aug[:-1]], axis=0),
                              K.concatenate([inputs_aug[:1], inputs_aug[1:] - inputs_aug[:-1]], axis=0))
        output = K.permute_dimensions(output_aug, inv_pattern)
        return output, [output, t + 1]
    d_aug = K.permute_dimensions(K.rnn(step, a_aug, [K.zeros_like(a), K.zeros((1,))])[0], pattern)
    d = K.permute_dimensions(d_aug[-(K.shape(a)[axis] - n):], inv_pattern)
    return d 
Example 5
Project: group-ksparse-temporal-cnns   Author: srph25   File: ops.py    MIT License 6 votes vote down vote up
def cumsum(a, n=1, axis=-1):
    if axis == -1:
        axis = K.ndim(a) - 1
    a_aug = K.tile(K.expand_dims(a, axis=1), [1, n] + [1] * (K.ndim(a) - 1))
    pattern = (axis,) + tuple(set(range(K.ndim(a))) - {axis})
    inv_pattern = tuple(range(1, axis + 1)) + (0,) + tuple(range(axis + 1, K.ndim(a)))
    def step(inputs, states):
        prev_output = states[0]
        t = states[1]
        t_int = K.cast(t[0], 'int32')
        prev_output_aug = K.permute_dimensions(prev_output, pattern)
        inputs_aug = K.permute_dimensions(inputs, pattern)
        output_aug = K.switch(K.all(t_int > 0), K.cumsum(prev_output_aug, axis=0), K.cumsum(inputs_aug, axis=0))
        output = K.permute_dimensions(output_aug, inv_pattern)
        return output, [output, t + 1]
    c_aug = K.permute_dimensions(K.rnn(step, a_aug, [K.zeros_like(a), K.zeros((1,))])[0], pattern)
    c = K.permute_dimensions(c_aug[-(K.shape(a)[axis] - n):], inv_pattern)
    return c 
Example 6
Project: keras_bn_library   Author: bnsnapper   File: rnnrbm.py    MIT License 6 votes vote down vote up
def get_constants(self, x):
		constants = []
		if 0 < self.dropout_U < 1:
			ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
			ones = K.tile(ones, (1, self.hidden_recurrent_dim))
			B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
			constants.append(B_U)
		else:
			constants.append(K.cast_to_floatx(1.))
        
		if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
			input_shape = self.input_spec[0].shape
			input_dim = input_shape[-1]
			ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
			ones = K.tile(ones, (1, input_dim))
			B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
			constants.append(B_W)
		else:
			constants.append(K.cast_to_floatx(1.))

		return constants 
Example 7
Project: keras_bn_library   Author: bnsnapper   File: recurrent.py    MIT License 6 votes vote down vote up
def get_constants(self, x):
		constants = []
		if 0 < self.dropout_U < 1:
			ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
			ones = K.tile(ones, (1, self.input_dim))
			B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
			constants.append(B_U)
		else:
			constants.append([K.cast_to_floatx(1.) for _ in range(4)])

		if 0 < self.dropout_W < 1:
			input_shape = K.int_shape(x)
			input_dim = input_shape[-1]
			ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
			ones = K.tile(ones, (1, int(input_dim)))
			B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
			constants.append(B_W)
		else:
			constants.append([K.cast_to_floatx(1.) for _ in range(4)])
		return constants 
Example 8
Project: yoctol-keras-layer-zoo   Author: Yoctol   File: rnn_cell.py    GNU General Public License v3.0 6 votes vote down vote up
def get_constants(self, inputs, training=None):
        constants = self.recurrent_layer.get_constants(
            inputs=inputs,
            training=training
        )

        if 0 < self.dense_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.recurrent_layer.units))

            def dropped_inputs():
                return K.dropout(ones, self.dense_dropout)
            out_dp_mask = [K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training)]
            constants.append(out_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.)])

        return constants 
Example 9
Project: NTM-Keras   Author: SigmaQuan   File: lstm2ntm.py    MIT License 6 votes vote down vote up
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants 
Example 10
Project: pointnet2-keras   Author: HarborZeng   File: model_cls.py    MIT License 6 votes vote down vote up
def set_abstraction_msg(xyz, points, npoint, radius_list, nsample_list, mlp_list, is_training, use_nchw):
    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
    new_points_list = []
    for i in range(len(radius_list)):
        radius = radius_list[i]
        nsample = nsample_list[i]
        group_idx = query_ball_point(radius, nsample, xyz, new_xyz)
        grouped_xyz = group_point(xyz, group_idx[0])
        grouped_xyz -= K.tile(Lambda(lambda x: K.expand_dims(x, axis=2))(new_xyz), [1, 1, nsample, 1])
        if points is not None:
            grouped_points = group_point(points, group_idx[0])
            grouped_points = Lambda(lambda x: K.concatenate(x, axis=-1))([grouped_points, grouped_xyz])
        else:
            grouped_points = grouped_xyz
        if use_nchw: grouped_points = Lambda(lambda x: K.permute_dimensions(x, [0, 3, 1, 2]))(grouped_points)
        for j, num_out_channel in enumerate(mlp_list[i]):
            grouped_points = Conv2D(num_out_channel, 1, activation="relu")(grouped_points)
            grouped_points = BatchNormalization()(grouped_points, training=is_training)
        if use_nchw: grouped_points = Lambda(lambda x: K.permute_dimensions(x, [0, 2, 3, 1]))(grouped_points)
        new_points = Lambda(lambda x: K.max(x, axis=2))(grouped_points)
        new_points_list.append(new_points)
    new_points_concat = Lambda(lambda x: K.concatenate(x, axis=-1))(new_points_list)
    return new_xyz, new_points_concat 
Example 11
Project: textcaps   Author: vinojjayasundara   File: capsulelayers.py    MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        inputs_expand = K.expand_dims(inputs, 1)
        
        inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])
        
        if(self.channels!=0):
            W2 = K.repeat_elements(self.W,int(self.input_num_capsule/self.channels),1)
        else:
            W2 = self.W
            
        inputs_hat = K.map_fn(lambda x: K.batch_dot(x, W2, [2, 3]) , elems=inputs_tiled)

        b = tf.zeros(shape=[K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule])

        assert self.routings > 0, 'The routings should be > 0.'
        for i in range(self.routings):

            c = tf.nn.softmax(b, dim=1)
            outputs = squash(K.batch_dot(c, inputs_hat, [2, 2])+ self.B)

            if i < self.routings - 1:
                b += K.batch_dot(outputs, inputs_hat, [2, 3])

        return outputs 
Example 12
Project: RPGOne   Author: RTHMaK   File: backend.py    Apache License 2.0 6 votes vote down vote up
def hardmax(unnormalized_attention, knowledge_length):
    """
    A similar operation to softmax, except all of the weight is placed on the mode of the
    distribution.  So, e.g., this function transforms [.34, .2, -1.4] -> [1, 0, 0].

    TODO(matt): we really should have this take an optional mask...
    """
    # (batch_size, knowledge_length)
    max_values = K.max(unnormalized_attention, axis=1, keepdims=True)
    tiled_max_values = K.tile(max_values, (1, knowledge_length))
    # We now have a matrix where every column in each row has the max knowledge score value from
    # the corresponding row in the unnormalized attention matrix.  Next, we will compare that
    # all-max matrix with the original input, resulting in ones where the column equals max and
    # zero everywhere else.
    # Shape: (batch_size, knowledge_length)
    max_attention = K.equal(unnormalized_attention, tiled_max_values)
    # Needs to be cast to be compatible with TensorFlow
    return K.cast(max_attention, 'float32') 
Example 13
Project: RPGOne   Author: RTHMaK   File: tree_composition_lstm.py    Apache License 2.0 6 votes vote down vote up
def get_initial_states(self, inputs):
        # The initial buffer is sent into the TreeLSTM as a part of the input.
        # i.e., inputs is a concatenation of the transitions and the initial buffer.
        # (batch_size, buffer_limit, output_dim+1)
        # We will now separate the buffer and the transitions and initialize the
        # buffer state of the TreeLSTM with the initial buffer value.
        # The rest of the input is the transitions, which we do not need now.

        # Take the buffer out.
        init_h_for_buffer = inputs[:, :, 1:]  # (batch_size, buffer_limit, output_dim)
        # Initializing all c as zeros.
        init_c_for_buffer = K.zeros_like(init_h_for_buffer)

        # Each element in the buffer is a concatenation of h and c for the corresponding
        # node
        init_buffer = K.concatenate([init_h_for_buffer, init_c_for_buffer], axis=-1)
        # We need a symbolic all zero tensor of size (samples, stack_limit, 2*output_dim) for
        # init_stack The problem is the first dim (samples) is a place holder and not an actual
        # value. So we'll use the following trick
        temp_state = K.zeros_like(inputs)  # (samples, buffer_ops_limit, input_dim)
        temp_state = K.tile(K.sum(temp_state, axis=(1, 2)),
                            (self.stack_limit, 2*self.output_dim, 1))  # (stack_limit, 2*output_dim, samples)
        init_stack = K.permute_dimensions(temp_state, (2, 0, 1))  # (samples, stack_limit, 2*output_dim)
        return [init_buffer, init_stack] 
Example 14
Project: world_models   Author: llSourcell   File: arch.py    MIT License 6 votes vote down vote up
def tf_normal(y_true, mu, sigma, pi):

    rollout_length = K.shape(y_true)[1]
    y_true = K.tile(y_true,(1,1,GAUSSIAN_MIXTURES))
    y_true = K.reshape(y_true, [-1, rollout_length, GAUSSIAN_MIXTURES,Z_DIM])

    oneDivSqrtTwoPI = 1 / math.sqrt(2*math.pi)
    result = y_true - mu
#   result = K.permute_dimensions(result, [2,1,0])
    result = result * (1 / (sigma + 1e-8))
    result = -K.square(result)/2
    result = K.exp(result) * (1/(sigma + 1e-8))*oneDivSqrtTwoPI
    result = result * pi
    result = K.sum(result, axis=2) #### sum over gaussians
    #result = K.prod(result, axis=2) #### multiply over latent dims
    return result 
Example 15
Project: dynamic_memory_networks_with_keras   Author: vchudinov   File: attention_cells.py    GNU General Public License v3.0 6 votes vote down vote up
def get_initial_state(self, inputs):
        return K.zeros(shape= [self.batch_size, self.units])
        # build an all-zero tensor of shape (samples, output_dim)
        # What the hell am I doing here?
      #  print("--------------------")
       # initial_state = K.zeros_like(inputs)  # (samples, timesteps, input_dim)
      #  print(initial_state.shape)
      #  initial_state = initial_state[:, :, :-1]
        #initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
        #print(initial_state.shape)
        #initial_state = K.expand_dims(initial_state)  # (samples, 1)
        #print(initial_state.shape)
        #raise SystemExit
      #  if hasattr(self.state_size, '__len__'):
      #      return [K.tile(initial_state, [1, dim])
       #             for dim in self.state_size]
       # else:
        #    return [K.tile(initial_state, [1, self.state_size])] 
Example 16
Project: Logo-Retrieval-in-Commercial-Plaza   Author: zhang-rongchen   File: model_Mobilenet.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 17
Project: smach_based_introspection_framework   Author: birlrobotics   File: layer_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_constants(self, inputs, training=None):
        constants = []
        if self.implementation != 0 and 0 < self.dropout < 1:
            input_shape = K.int_shape(inputs)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))

            def dropped_inputs():
                return K.dropout(ones, self.dropout)

            dp_mask = [K.in_train_phase(dropped_inputs,
                                        ones,
                                        training=training) for _ in range(4)]
            constants.append(dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)
            rec_dp_mask = [K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training) for _ in range(4)]
            constants.append(rec_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        # append the input as well for use later
        constants.append(inputs)
        return constants 
Example 18
Project: CapsAttnNet   Author: rstager   File: canlayer.py    MIT License 5 votes vote down vote up
def _part_to_whole_predictions(self, x):
        """
        Estimate the pose of the whole given the pose of the part.
        :param x: set of poses to transform
        """
        # inputs.shape=[ input_num_capsule, input_num_instance, input_dim_capsule]
        # output.shape=[num_instance*num_capsule, num_parts*input_num_capsule*input_num_instance,dim_capsule]
        # xt.shape = [ input_num_capsule, num_instance, input_num_instance, input_dim_capsule]
        # xpart.shape = [ num_instance, input_num_instance, num_capsule, num_part, dim_x,input_num_capsule]
        # gpose.shape = [ input_num_capsule, num_instance, input_num_instance, dim_geom+1]
        xt = K.tile(K.expand_dims(x,1),[1,self.num_instance,1,1])

        tmp = K.reshape( xt[:,:,:,:1],[self.input_num_capsule,self.num_instance,self.input_num_instance,1,1,1])
        tmp = K.tile(tmp,[1,1,1,self.num_capsule,self.num_part,1])
        ppart=K.permute_dimensions(tmp,[1,2,3,4,5,0])

        gpose = K.concatenate([xt[:,:,:,1:dim_geom+1],K.ones_like(xt[:,:,:,:1])]) # add 1 col to allow x-y translate
        gpart = K.concatenate([K.expand_dims(K.dot(gpose[i],self.W1[i]),-1) for i in range(self.input_num_capsule)])
        apart = K.concatenate([K.expand_dims(K.dot(xt[i,:,:,dim_geom+1:],self.W2[i]),-1) for i in range(self.input_num_capsule)])
        whole=K.concatenate([ppart,gpart,apart],4)
        output=K.permute_dimensions(whole,[0,2,3,5,1,4])
        output=K.reshape(output,[self.num_instance*self.num_capsule,
                                 self.num_part*self.input_num_capsule*self.input_num_instance,self.dim_capsule])
        # output = tf.Print(output, [tf.shape(x)], message='x', summarize=16)
        # output = tf.Print(output, [x[0,18,1:3]], message='x ', summarize=3)
        # output = tf.Print(output, [gpose[0,0,0,:]], message='x gpose ', summarize=5)
        # output = tf.Print(output, [gpose[0,1,0,:]], message='x gpose ', summarize=5)
        # output = tf.Print(output, [gpart[0,0,0,0,0,:]], message='x gpart ', summarize=5)
        # output = tf.Print(output, [gpart[0,1,0,0,0,:]], message='x gpart ', summarize=5)
        return output 
Example 19
Project: CapsAttnNet   Author: rstager   File: canlayer.py    MIT License 5 votes vote down vote up
def _best_guess(self, c, inputs_hat):
        '''
        Combine the predicted poses 'input_hats' weighted by c to come up with best_guess of the capsule poses

        :param c: weights to apply to the input poses
        :param inputs_hat: input poses
        :return: best guess at pose
        '''
        # c.shape=[None, num_capsule * num_instance, num_part * input_num_capsule * input_num_instance]
        # inputs_hat.shape = [None,num_instance * num_capsule, num_parts, input_num_capsule * input_num_instance, dim_capsule]
        # guess.shape = [None,num_instance * num_capsule,dim_capsule]

        # take the mean probility
        probability = tf.reduce_mean(inputs_hat[:,:,:,0:1],axis=2)

        # find the mean weighted geometric pose
        sum_weighted_geoms = K.batch_dot(c,inputs_hat[:,:,:,1:dim_geom+1], [2, 2])
        one_over_weight_sums = tf.tile(tf.expand_dims(tf.reciprocal(K.sum(c,axis=-1)),-1),[1,1,dim_geom])
        mean_geom =  one_over_weight_sums*sum_weighted_geoms

        # squash the weighted sum of attributes
        weighted_attrs = K.batch_dot(c,inputs_hat[:,:,:,dim_geom+1:], [2, 2])
        scale = squash_scale(weighted_attrs)

        # use the magnitude of the squashedweighted sum of attributes for probability
        probability = scale

        guess = layers.concatenate([probability,mean_geom,weighted_attrs])
        return guess 
Example 20
Project: keras-utility-layer-collection   Author: zimmerrol   File: attention.py    MIT License 5 votes vote down vote up
def _additive_similarity(self, source, query):
        concatenation = K.concatenate([source, query], axis=2)
        nonlinearity = K.tanh(K.dot(concatenation, self._weights["w_a"]))
        
        # tile the weight vector (1, 1, dim) for each time step and each element of the batch -> (bs, T, dim)
        source_shape = K.shape(source)
        vaeff = K.tile(K.expand_dims(self._weights["v_a"], 0), [source_shape[0], source_shape[1], 1])

        similarity = K.batch_dot(K.permute_dimensions(vaeff, [0, 2, 1]), nonlinearity, axes=[1, 2])
        
        return similarity 
Example 21
Project: keras-minimal-rnn   Author: titu1994   File: minimal_rnn.py    MIT License 5 votes vote down vote up
def _generate_recurrent_dropout_mask(self, inputs, training=None):
        if 0 < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.dropout)

            self._recurrent_dropout_mask = [K.in_train_phase(
                dropped_inputs,
                ones,
                training=training)
                for _ in range(2)]
        else:
            self._recurrent_dropout_mask = None 
Example 22
Project: ndsc_code_gakko_workshop   Author: seansaito   File: localize_image.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape):
    """
    Convert final layer features to bounding box parameters.
    """
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3]  # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
                    [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
                    [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust predictions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    return box_xy, box_wh, box_confidence, box_class_probs 
Example 23
Project: keras-yolo3   Author: bing0037   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 24
Project: keras-attention-augmented-convs   Author: titu1994   File: attn_augconv.py    MIT License 5 votes vote down vote up
def relative_logits_1d(self, q, rel_k, H, W, transpose_mask):
        rel_logits = tf.einsum('bhxyd,md->bhxym', q, rel_k)
        rel_logits = K.reshape(rel_logits, [-1, self.num_heads * H, W, 2 * W - 1])
        rel_logits = self.rel_to_abs(rel_logits)
        rel_logits = K.reshape(rel_logits, [-1, self.num_heads, H, W, W])
        rel_logits = K.expand_dims(rel_logits, axis=3)
        rel_logits = K.tile(rel_logits, [1, 1, 1, H, 1, 1])
        rel_logits = K.permute_dimensions(rel_logits, transpose_mask)
        rel_logits = K.reshape(rel_logits, [-1, self.num_heads, H * W, H * W])
        return rel_logits 
Example 25
Project: trVAE   Author: theislab   File: _utils.py    MIT License 5 votes vote down vote up
def compute_kernel(x, y, kernel='rbf', **kwargs):
    """
        Computes RBF kernel between x and y.
        # Parameters
            x: Tensor
                Tensor with shape [batch_size, z_dim]
            y: Tensor
                Tensor with shape [batch_size, z_dim]
        # Returns
            returns the computed RBF kernel between x and y
    """
    scales = kwargs.get("scales", [])
    if kernel == "rbf":
        x_size = K.shape(x)[0]
        y_size = K.shape(y)[0]
        dim = K.shape(x)[1]
        tiled_x = K.tile(K.reshape(x, K.stack([x_size, 1, dim])), K.stack([1, y_size, 1]))
        tiled_y = K.tile(K.reshape(y, K.stack([1, y_size, dim])), K.stack([x_size, 1, 1]))
        return K.exp(-K.mean(K.square(tiled_x - tiled_y), axis=2) / K.cast(dim, tf.float32))
    elif kernel == 'raphy':
        scales = K.variable(value=np.asarray(scales))
        squared_dist = K.expand_dims(squared_distance(x, y), 0)
        scales = K.expand_dims(K.expand_dims(scales, -1), -1)
        weights = K.eval(K.shape(scales)[0])
        weights = K.variable(value=np.asarray(weights))
        weights = K.expand_dims(K.expand_dims(weights, -1), -1)
        return K.sum(weights * K.exp(-squared_dist / (K.pow(scales, 2))), 0)
    elif kernel == "multi-scale-rbf":
        sigmas = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, 1e3, 1e4, 1e5, 1e6]

        beta = 1. / (2. * (K.expand_dims(sigmas, 1)))
        distances = squared_distance(x, y)
        s = K.dot(beta, K.reshape(distances, (1, -1)))

        return K.reshape(tf.reduce_sum(tf.exp(-s), 0), K.shape(distances)) / len(sigmas) 
Example 26
Project: multi-object-tracking   Author: jguoaj   File: model.py    GNU General Public License v3.0 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (box_xy + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = box_wh * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))

    return box_xy, box_wh, box_confidence, box_class_probs 
Example 27
Project: solder_joint_detection   Author: lx-onism   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    #print(K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 28
Project: vision-web-service   Author: sherlockchou86   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 29
Project: gccaps   Author: tqbl   File: capsules.py    MIT License 5 votes vote down vote up
def call(self, inputs, training=None):
        """Apply transformation followed by capsule routing."""
        # Create dimension for output capsules and tile along this dim
        # (None, *n_capsules*, n_input_capsules, dim_input_capsules)
        inputs_tiled = K.tile(K.expand_dims(inputs, 1),
                              [1, self.n_capsules, 1, 1])

        # Apply linear transformation to compute prediction vectors
        inputs_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, [2, 3]),
                              elems=inputs_tiled)
        # Add bias to prediction vectors if specified
        if self.use_bias:
            inputs_hat = K.bias_add(inputs_hat, self.bias,
                                    data_format='channels_first')

        # Initialize logit variables to zero
        b = K.zeros(shape=[K.shape(inputs_hat)[0],
                           self.n_capsules,
                           self.n_input_capsules])

        # Apply routing algorithm
        for i in range(self.routings):
            # Compute coupling coefficients
            c = K.softmax(b, axis=1)
            # Apple squashing function
            outputs = squash(K.batch_dot(c, inputs_hat, [2, 2]))
            # Update logits by computing agreement
            if i < self.routings - 1:
                b += K.batch_dot(outputs, inputs_hat, [2, 3])

        return outputs 
Example 30
Project: keras_extension   Author: k1414st   File: graph.py    MIT License 5 votes vote down vote up
def get_initial_state(self, inputs):
        # build an all-zero tensor of shape (samples, output_dim)
        # reshape to (N, nodes, input_dim) -> (N, nodes, hidden_dim)
        initial_state = K.zeros_like(inputs)[:, :, 0]
        initial_state = K.expand_dims(initial_state, axis=2)
        return K.tile(initial_state, [1, 1, self.cell.units]) 
Example 31
Project: keras_extension   Author: k1414st   File: layer.py    MIT License 5 votes vote down vote up
def get_initial_state(self, inputs):
        # build an all-zero tensor of shape (samples, output_dim)
        # reshape to (N, nodes, input_dim) -> (N, nodes, hidden_dim)
        initial_state = K.zeros_like(inputs)[:, :, 0]
        initial_state = K.expand_dims(initial_state, axis=2)
        return K.tile(initial_state, [1, 1, self.cell.units]) 
Example 32
Project: keras_extension   Author: k1414st   File: layer.py    MIT License 5 votes vote down vote up
def get_initial_state(self, inputs):
        # build an all-zero tensor of shape (samples, output_dim)
        # reshape to (N, nodes, input_dim) -> (N, nodes, hidden_dim)
        initial_state = K.zeros_like(inputs)[:, :, 0]
        initial_state = K.expand_dims(initial_state, axis=2)
        return K.tile(initial_state, [1, 1, self.cell.units]) 
Example 33
Project: object-detection   Author: kaka-lin   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, n):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    conv_dims = K.shape(feats)[1:3]  # assuming channels last
    # In YOLO the height index is the inner most iteration.
    conv_height_index = K.arange(0, stop=conv_dims[0])
    conv_width_index = K.arange(0, stop=conv_dims[1])
    conv_height_index = K.tile(conv_height_index, [conv_dims[1]])

    conv_width_index = K.tile(K.expand_dims(conv_width_index, 0), [conv_dims[0], 1])
    conv_width_index = K.flatten(K.transpose(conv_width_index))
    conv_index = K.transpose(K.stack([conv_height_index, conv_width_index]))
    conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
    conv_index = K.cast(conv_index, K.dtype(feats))

    feats = K.reshape(feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5])
    conv_dims = K.cast(K.reshape(conv_dims, [1, 1, 1, 1, 2]), K.dtype(feats))

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    # Note: YOLO iterates over height index before width index.
    # TODO: It works with +1, don't know why.
    box_xy = (box_xy + conv_index + 1) / conv_dims
    # TODO: Input layer size
    box_wh = box_wh * anchors_tensor / conv_dims / {0:32, 1:16, 2:8}[n]

    return [box_xy, box_wh, box_confidence, box_class_probs] 
Example 34
Project: keras_bn_library   Author: bnsnapper   File: rnnrbm.py    MIT License 5 votes vote down vote up
def get_initial_states(self, x):
		print("initial state building")
		# build an all-zero tensor of shape (samples, output_dim)
		initial_state = K.zeros_like(x)  # (samples, timesteps, input_dim)
		initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
		initial_state = K.expand_dims(initial_state)  # (samples, 1)
		initial_states=[]
		for dim in self.states_dim:
			initial_states.append(K.tile(initial_state, [1, dim]))  # (samples, output_dim)
		#initial_states = [initial_state for _ in range(len(self.states))]
		return initial_states 
Example 35
Project: keras_bn_library   Author: bnsnapper   File: recurrent.py    MIT License 5 votes vote down vote up
def get_initial_states(self, x):
		print("initial state building")
		# build an all-zero tensor of shape (samples, output_dim)
		initial_state = K.zeros_like(x)  # (samples, timesteps, input_dim)
		initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
		initial_state = K.expand_dims(initial_state)  # (samples, 1)
		initial_state = K.tile(initial_state, [1, self.input_dim])

		initial_states = [initial_state for _ in range(len(self.states))]
		return initial_states 
Example 36
Project: keras_bn_library   Author: bnsnapper   File: recurrent.py    MIT License 5 votes vote down vote up
def get_initial_states(self, x):
		initial_state = K.zeros_like(x)  # (samples, timesteps, input_dim)
		initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
		initial_state = K.expand_dims(initial_state)  # (samples, 1)
		initial_states=[]
		for dim in self.states_dim:
			initial_states.append(K.tile(initial_state, [1, dim]))
		return initial_states 
Example 37
Project: Keras_YOLOv3_Mobilenet   Author: xuqing88   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 38
Project: Keras_YOLOv3_Mobilenet   Author: xuqing88   File: model_Mobilenet.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 39
Project: yoctol-keras-layer-zoo   Author: Yoctol   File: rnn_decoder.py    GNU General Public License v3.0 5 votes vote down vote up
def compute_mask(self, inputs, mask):
        output_mask =  self.layer.compute_mask(
            inputs=inputs,
            mask=mask,
        )

        if self.time_steps is None:
            return output_mask
        else:
            output_mask = K.ones_like(output_mask)
            output_mask = K.any(output_mask, axis=1, keepdims=True)
            return K.tile(output_mask, [1, self.time_steps]) 
Example 40
Project: yoctol-keras-layer-zoo   Author: Yoctol   File: rnn_cell.py    GNU General Public License v3.0 5 votes vote down vote up
def get_initial_state(self, inputs):
        dense_initial_state = K.zeros_like(inputs)
        dense_initial_state = K.sum(dense_initial_state, axis=(1, 2))
        dense_initial_state = K.expand_dims(dense_initial_state)
        dense_initial_state = K.tile(dense_initial_state, [1, self.dense_layer.units])
        return [dense_initial_state] + self.recurrent_layer.get_initial_state(inputs) 
Example 41
Project: YOLO-3D-Box   Author: scutan90   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (box_xy + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = box_wh * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))

    return box_xy, box_wh, box_confidence, box_class_probs 
Example 42
Project: deep_sort_yolov3   Author: Qidian213   File: model.py    GNU General Public License v3.0 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (box_xy + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = box_wh * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))

    return box_xy, box_wh, box_confidence, box_class_probs 
Example 43
Project: keras-yolo3-master   Author: lijialinneu   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 44
Project: deep_sort_tiny_yolo3   Author: scofield1991   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 45
Project: ImageAI   Author: OlafenwaMoses   File: utils.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):

    num_anchors = len(anchors)

    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3]
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])


    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 46
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modeltree.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1]) 
Example 47
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelmerge.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5+3+3*BIN])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    # 3d dim noline 
    #box_dim = K.exp(feats[..., 5:8]) ##no exp 
    #box_dim = K.sigmoid(feats[..., 5:8]) ##no exp 
    box_dim = feats[..., 5:8] ##no exp 
    box_3d_conf = K.sigmoid(feats[..., 8:10])
    #box_3d_cossin = K.l2_normalize(K.reshape(feats[..., 10:14], [-1,-1,-1, BIN, 2]))
    box_3d_cossin = K.l2_normalize(feats[..., 10:14])
    #K.sigmoid(feats[..., 8:14])
    box_class_probs = K.sigmoid(feats[..., 14:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh, box_dim, box_3d_conf, box_3d_cossin
    return box_xy, box_wh, box_confidence, box_dim, box_3d_conf, box_3d_cossin, box_class_probs 
Example 48
Project: yolov3-3dcarbox   Author: zoujialong9012   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 49
Project: NTM-Keras   Author: SigmaQuan   File: ntm.py    MIT License 5 votes vote down vote up
def get_constants(self, x):
        print("begin get_constants(self, x)")
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.controller_output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        # if 0 < self.dropout_R < 1:
        #     input_shape = self.input_spec[0].shape
        #     input_dim = input_shape[-1]
        #     ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
        #     ones = K.tile(ones, (1, int(input_dim)))
        #     B_R = [K.in_train_phase(K.dropout(ones, self.dropout_R), ones) for _ in range(4)]
        #     constants.append(B_R)
        # else:
        #     constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        print("end get_constants(self, x)")
        return constants 
Example 50
Project: C2   Author: Master-cai   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 51
Project: FSA-Net   Author: shamangary   File: layers.py    Apache License 2.0 5 votes vote down vote up
def _make_regular_grids(self, batch_size, height, width):
        # making a single regular grid
        x_linspace = K_linspace(-1., 1., width)
        y_linspace = K_linspace(-1., 1., height)
        x_coordinates, y_coordinates = K_meshgrid(x_linspace, y_linspace)
        x_coordinates = K.flatten(x_coordinates)
        y_coordinates = K.flatten(y_coordinates)
        ones = K.ones_like(x_coordinates)
        grid = K.concatenate([x_coordinates, y_coordinates, ones], 0)

        # repeating grids for each batch
        grid = K.flatten(grid)
        grids = K.tile(grid, K.stack([batch_size]))
        return K.reshape(grids, (batch_size, 3, height * width)) 
Example 52
Project: FSA-Net   Author: shamangary   File: FSANET_model.py    Apache License 2.0 5 votes vote down vote up
def call(self, input):                
        sum = K.sum(input,axis=-1,keepdims=True)        
        tiled = K.tile(sum,(1,1,self.tile_count))        
        return tiled 
Example 53
Project: Vehicle-Detection-and-Tracking-Usig-YOLO-and-Deep-Sort-with-Keras-and-Tensorflow   Author: Akhtar303   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (box_xy + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = box_wh * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))

    return box_xy, box_wh, box_confidence, box_class_probs 
Example 54
Project: yoloface   Author: sthanhng   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    '''Convert final layer features to bounding box parameters'''

    num_anchors = len(anchors)

    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    # height, width
    grid_shape = K.shape(feats)[1:3]
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
                    [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
                    [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1],
                                                         K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1],
                                                              K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 55
Project: Object-and-facial-detection-in-python   Author: grebtsew   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 56
Project: pointnet2-keras   Author: HarborZeng   File: model_cls.py    MIT License 5 votes vote down vote up
def sample_and_group_all(xyz, points, use_xyz=True):
    """
    Inputs:
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Outputs:
        new_xyz: (batch_size, 1, 3) as (0,0,0)
        new_points: (batch_size, 1, ndataset, 3+channel) TF tensor
    Note:
        Equivalent to sample_and_group with npoint=1, radius=inf, use (0,0,0) as the centroid
    """
    batch_size = xyz.get_shape()[0].value
    nsample = xyz.get_shape()[1].value
    new_xyz = tf.constant(np.tile(np.array([0, 0, 0]).reshape((1, 1, 3)), (batch_size, 1, 1)), dtype=tf.float32)  # (batch_size, 1, 3)
    idx = tf.constant(np.tile(np.array(range(nsample)).reshape((1, 1, nsample)), (batch_size, 1, 1)))
    grouped_xyz = Lambda(lambda x: K.reshape(x, (batch_size, 1, nsample, 3)))(xyz)  # (batch_size, npoint=1, nsample, 3)
    if points is not None:
        if use_xyz:
            new_points = Lambda(lambda x: K.concatenate(x, axis=2))([xyz, points])  # (batch_size, 16, 259)
        else:
            new_points = points
        new_points = Lambda(lambda x: K.expand_dims(x, 1))(new_points)  # (batch_size, 1, 16, 259)
    else:
        new_points = grouped_xyz
    return new_xyz, new_points, idx, grouped_xyz 
Example 57
Project: RPGOne   Author: RTHMaK   File: backend.py    Apache License 2.0 5 votes vote down vote up
def tile_vector(vector, matrix):
    """
    NOTE: If your matrix has known shape (i.e., the relevant dimension from `K.int_shape(matrix) is
    not None`), you should just use `K.repeat_elements(vector)` instead of this.  This method
    works, however, when the number of rows in your matrix is unknown at graph compilation time.

    This method takes a (collection of) vector(s) (shape: (batch_size, vector_dim)), and tiles that
    vector a number of times, giving a matrix of shape (batch_size, tile_length, vector_dim).  (I
    say "vector" and "matrix" here because I'm ignoring the batch_size).  We need the matrix as
    input so we know what the tile_length is - the matrix is otherwise ignored.

    This is necessary in a number of places in the code.  For instance, if you want to do a dot
    product of a vector with all of the vectors in a matrix, the most efficient way to do that is
    to tile the vector first, then do an element-wise product with the matrix, then sum out the
    last mode.  So, we capture this functionality here.

    This is not done as a Keras Layer, however; if you want to use this function, you'll need to do
    it _inside_ of a Layer somehow, either in a Lambda or in the call() method of a Layer you're
    writing.
    """
    # Tensorflow can't use unknown sizes at runtime, so we have to make use of the broadcasting
    # ability of TF and Theano instead to create the tiled sentence encoding.

    # Shape: (tile_length, batch_size, vector_dim)
    k_ones = K.permute_dimensions(K.ones_like(matrix), [1, 0, 2])

    # Now we have a (tile_length, batch_size, vector_dim)*(batch_size, vector_dim)
    # elementwise multiplication which is broadcast. We then reshape back.
    tiled_vector = K.permute_dimensions(k_ones * vector, [1, 0, 2])
    return tiled_vector 
Example 58
Project: ReID   Author: Wowoho   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (box_xy + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = box_wh * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))

    return box_xy, box_wh, box_confidence, box_class_probs 
Example 59
Project: deeplogic   Author: nuric   File: ilp.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def call(self, inputs):
    """Return the rule weights."""
    h = K.constant(self.goal) # (1, 1, pred_len, embed_size)
    b = K.softmax(self.body)
    r = K.concatenate([h, b], axis=1)
    r = K.expand_dims(r, axis=0)
    r = K.tile(r, [K.shape(inputs)[0], 1, 1, 1, 1])
    return r 
Example 60
Project: onnx-keras   Author: leodestiny   File: backend.py    MIT License 5 votes vote down vote up
def _bin_op(cls, node, input_dict, op_func, inputlist=True):
        x = input_dict[node.inputs[0]]
        y = input_dict[node.inputs[1]]
        # print(node.op_type, x.shape, y.shape)
        if isinstance(y, np.ndarray):
            # print('is numpy')
            # tmp = np.reshape(y, [1]+list(y.shape))
            tmp = np.tile(y, [32] + [1] * len(y.shape))
            cls.extra_input_array.append(tmp)
            y = keras.layers.Input(shape=tmp.shape[1:])
            cls.extra_input.append(y)

        broadcast = node.attrs.get("broadcast", 1)
        if broadcast == 0:
            warnings.warn("Definition of {} with broadcast disabled is not "
                          "yet supported.".format(node.op_type), UserWarning)
        if "axis" in node.attrs.keys():
            num_ones_to_append = len(x.get_shape()) - \
                                 len(y.get_shape()) - \
                                 node.attrs["axis"] + 1
            if num_ones_to_append > 0:
                ones = np.ones([num_ones_to_append], 'int32')
                broadcasted_shape = np.concatenate([K.int_shape(y)[1:], ones], axis=0)
                y = keras.layers.Reshape(broadcasted_shape)(y)
        if inputlist:
            return op_func([x, y])
        else:
            return op_func(x, y) 
Example 61
Project: onnx-keras   Author: leodestiny   File: backend.py    MIT License 5 votes vote down vote up
def handle_tile(cls, node, input_dict):
        x = input_dict[node.inputs[0]]
        repeats = input_dict[node.inputs[1]]
        return [Lambda(lambda a: K.tile(a, repeats))(x)] 
Example 62
Project: EQanalytics   Author: AntonMu   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 63
Project: dynamic_memory_networks_with_keras   Author: vchudinov   File: attention_cells.py    GNU General Public License v3.0 5 votes vote down vote up
def _generate_recurrent_dropout_mask(self, inputs, training=None):
        if 0 < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.dropout)

            self._recurrent_dropout_mask = [K.in_train_phase(
                dropped_inputs,
                ones,
                training=training)
                for _ in range(3)]
        else:
            self._recurrent_dropout_mask = None 
Example 64
Project: basasuya-yolo3   Author: Basasuya   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (box_xy + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = box_wh * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))

    return box_xy, box_wh, box_confidence, box_class_probs 
Example 65
Project: keras-yolov3-KF-objectTracking   Author: mattzheng   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 66
Project: keras-training   Author: hls-fpga-machine-learning   File: quantized_layers.py    GNU General Public License v3.0 5 votes vote down vote up
def get_constants(self, inputs, training=None):
        constants = []
        if 0 < self.dropout < 1:
            input_shape = K.int_shape(inputs)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))

            def dropped_inputs():
                return K.dropout(ones, self.dropout)

            dp_mask = K.in_train_phase(dropped_inputs,
                                       ones,
                                       training=training)
            constants.append(dp_mask)
        else:
            constants.append(K.cast_to_floatx(1.))

        if 0 < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)
            rec_dp_mask = K.in_train_phase(dropped_inputs,
                                           ones,
                                           training=training)
            constants.append(rec_dp_mask)
        else:
            constants.append(K.cast_to_floatx(1.))
        return constants 
Example 67
Project: BlurbGenreCollection-HMC   Author: uhh-lt   File: capsulelayers.py    Apache License 2.0 4 votes vote down vote up
def call(self, inputs, training=None):
        # inputs.shape=[None, input_num_capsule, input_dim_capsule]
        # inputs_expand.shape=[None, 1, input_num_capsule, input_dim_capsule]
        inputs_expand = K.expand_dims(inputs, 1)

        # Replicate num_capsule dimension to prepare being multiplied by W
        # inputs_tiled.shape=[None, num_capsule, input_num_capsule, input_dim_capsule]
        inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])

        # Compute `inputs * W` by scanning inputs_tiled on dimension 0.
        # x.shape=[num_capsule, input_num_capsule, input_dim_capsule]
        # W.shape=[num_capsule, input_num_capsule, dim_capsule, input_dim_capsule]
        # Regard the first two dimensions as `batch` dimension,
        # then matmul: [input_dim_capsule] x [dim_capsule, input_dim_capsule]^T -> [dim_capsule].
        # inputs_hat.shape = [None, num_capsule, input_num_capsule, dim_capsule]
        inputs_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, [2, 3]), elems=inputs_tiled)

        # Begin: Routing algorithm ---------------------------------------------------------------------#
        # The prior for coupling coefficient, initialized as zeros.
        # b.shape = [None, self.num_capsule, self.input_num_capsule].
        b = tf.zeros(shape=[K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule])

        assert self.routings > 0, 'The routings should be > 0.'
        for i in range(self.routings):
            # c.shape=[batch_size, num_capsule, input_num_capsule]
            c = tf.nn.softmax(b, dim=1)

            # c.shape =  [batch_size, num_capsule, input_num_capsule]
            # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
            # The first two dimensions as `batch` dimension,
            # then matmal: [input_num_capsule] x [input_num_capsule, dim_capsule] -> [dim_capsule].
            # outputs.shape=[None, num_capsule, dim_capsule]
            outputs = squash(K.batch_dot(c, inputs_hat, [2, 2]))  # [None, 10, 16]

            if i < self.routings - 1:
                # outputs.shape =  [None, num_capsule, dim_capsule]
                # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
                # The first two dimensions as `batch` dimension,
                # then matmal: [dim_capsule] x [input_num_capsule, dim_capsule]^T -> [input_num_capsule].
                # b.shape=[batch_size, num_capsule, input_num_capsule]
                b += K.batch_dot(outputs, inputs_hat, [2, 3])
        # End: Routing algorithm -----------------------------------------------------------------------#

        return outputs 
Example 68
Project: CapsNet-Fashion-MNIST   Author: subarnop   File: capsulelayers.py    GNU General Public License v3.0 4 votes vote down vote up
def call(self, inputs, training=None):
        # inputs.shape=[None, input_num_capsule, input_dim_vector]
        # Expand dims to [None, input_num_capsule, 1, 1, input_dim_vector]
        inputs_expand = K.expand_dims(K.expand_dims(inputs, 2), 2)

        # Replicate num_capsule dimension to prepare being multiplied by W
        # Now it has shape = [None, input_num_capsule, num_capsule, 1, input_dim_vector]
        inputs_tiled = K.tile(inputs_expand, [1, 1, self.num_capsule, 1, 1])

        """
        # Begin: inputs_hat computation V1 ---------------------------------------------------------------------#
        # Compute `inputs * W` by expanding the first dim of W. More time-consuming and need batch_size.
        # w_tiled.shape = [batch_size, input_num_capsule, num_capsule, input_dim_vector, dim_vector]
        w_tiled = K.tile(K.expand_dims(self.W, 0), [self.batch_size, 1, 1, 1, 1])

        # Transformed vectors, inputs_hat.shape = [None, input_num_capsule, num_capsule, 1, dim_vector]
        inputs_hat = K.batch_dot(inputs_tiled, w_tiled, [4, 3])
        # End: inputs_hat computation V1 ---------------------------------------------------------------------#
        """

        # Begin: inputs_hat computation V2 ---------------------------------------------------------------------#
        # Compute `inputs * W` by scanning inputs_tiled on dimension 0. This is faster but requires Tensorflow.
        # inputs_hat.shape = [None, input_num_capsule, num_capsule, 1, dim_vector]
        inputs_hat = tf.scan(lambda ac, x: K.batch_dot(x, self.W, [3, 2]),
                             elems=inputs_tiled,
                             initializer=K.zeros([self.input_num_capsule, self.num_capsule, 1, self.dim_vector]))
        # End: inputs_hat computation V2 ---------------------------------------------------------------------#
        """
        # Begin: routing algorithm V1, dynamic ------------------------------------------------------------#
        def body(i, b, outputs):
            c = tf.nn.softmax(b, dim=2)  # dim=2 is the num_capsule dimension
            outputs = squash(K.sum(c * inputs_hat, 1, keepdims=True))
            if i != 1:
                b = b + K.sum(inputs_hat * outputs, -1, keepdims=True)
            return [i-1, b, outputs]
        cond = lambda i, b, inputs_hat: i > 0
        loop_vars = [K.constant(self.num_routing), self.bias, K.sum(inputs_hat, 1, keepdims=True)]
        shape_invariants = [tf.TensorShape([]),
                            tf.TensorShape([None, self.input_num_capsule, self.num_capsule, 1, 1]),
                            tf.TensorShape([None, 1, self.num_capsule, 1, self.dim_vector])]
        _, _, outputs = tf.while_loop(cond, body, loop_vars, shape_invariants)
        # End: routing algorithm V1, dynamic ------------------------------------------------------------#
        """

        # Begin: routing algorithm V2, static -----------------------------------------------------------#
        # Routing algorithm V2. Use iteration. V2 and V1 both work without much difference on performance
        assert self.num_routing > 0, 'The num_routing should be > 0.'
        for i in range(self.num_routing):
            c = tf.nn.softmax(self.bias, dim=2)  # dim=2 is the num_capsule dimension
            # outputs.shape=[None, 1, num_capsule, 1, dim_vector]
            outputs = squash(K.sum(c * inputs_hat, 1, keepdims=True))

            # last iteration needs not compute bias which will not be passed to the graph any more anyway.
            if i != self.num_routing - 1:
                # self.bias = K.update_add(self.bias, K.sum(inputs_hat * outputs, [0, -1], keepdims=True))
                self.bias += K.sum(inputs_hat * outputs, -1, keepdims=True)
            # tf.summary.histogram('BigBee', self.bias)  # for debugging
        # End: routing algorithm V2, static ------------------------------------------------------------#

        return K.reshape(outputs, [-1, self.num_capsule, self.dim_vector]) 
Example 69
Project: keras_extension   Author: k1414st   File: graph.py    MIT License 4 votes vote down vote up
def __graph_attention(self, g, x, w, a, n_heads=1):
        """
        using graph attention mechanism.

        Args:
            g: input Tensor of graph adjacency matrix.
               shape: (B(Batch_size), N(N_nodes), N)
            x: input Tensor of node-data after convolutioned.
               shape: (B, N, F_in(F_inputs))
            w: weight matrix variable
               (to transform input to attentionable hidden states.)
               shape: (F_in, F(F_outputs) * H(N_heads))
            a: merge weight vector from attentionable state to attention value.
               shape: (2 * F,)
        """
        H = n_heads
        F_in, FH = w.shape[0], w.shape[1]
        F = FH // H
        N = g.shape[-1]

        # w = K.reshape(F1, H * F2)  # (F_in, H*F)
        x = K.expand_dims(K.dot(x, w), axis=2)  # (B, N, 1, H*F)
        x = K.concatenate([x[:, :, :, F*i:F*(i+1)]
                           for i in range(H)], axis=2)  # (B, N, H, F)

        # concat meshly
        _x1 = K.tile(K.expand_dims(x, axis=1), (1, N, 1, 1, 1))
        _x2 = K.tile(K.expand_dims(x, axis=2), (1, 1, N, 1, 1))
        x = K.concatenate([_x1, _x2], axis=4)  # (B, N, N, H, 2F)

        def _expand_dims_recursive(x, axis_list):
            assert(len(axis_list) > 0)
            if len(axis_list) == 1:
                return K.expand_dims(x, axis_list[0])
            return _expand_dims_recursive(K.expand_dims(x, axis_list[0]),
                                          axis_list=axis_list[1:])
        # squeeze 2F
        a = _expand_dims_recursive(a, (0, 0, 0, 0))
        x = K.exp(K.relu(K.sum(x * a, axis=-1), alpha=0.2))  # (B, N, N, H)

        # normalize by neighbors
        x_norm = K.sum(x * K.expand_dims(g, axis=-1),
                       axis=2, keepdims=True)  # (B, N, 1, H)
        return x / x_norm  # (B, N, N, H) 
Example 70
Project: keras_extension   Author: k1414st   File: layer.py    MIT License 4 votes vote down vote up
def _graph_attention(self, g, x, w, a, n_heads=1):
        """
        using graph attention mechanism.

        Args:
            g: input Tensor of graph adjacency matrix.
               shape: (B(Batch_size), N(N_nodes), N)
            x: input Tensor of node-data after convolutioned.
               shape: (B, N, F_in(F_inputs))
            w: weight matrix variable
               (to transform input to attentionable hidden states.)
               shape: (F_in, F(F_outputs) * H(N_heads))
            a: merge weight vector from attentionable state to attention value.
               shape: (2 * F,)
        """
        H = n_heads
        F_in, FH = w.shape[0], w.shape[1]
        F = FH // H
        N = g.shape[-1]

        # w = K.reshape(F1, H * F2)  # (F_in, H*F)
        x = K.expand_dims(K.dot(x, w), axis=2)  # (B, N, 1, H*F)
        x = K.concatenate([x[:, :, :, F*i:F*(i+1)]
                           for i in range(H)], axis=2)  # (B, N, H, F)

        # concat meshly
        _x1 = K.tile(K.expand_dims(x, axis=1), (1, N, 1, 1, 1))
        _x2 = K.tile(K.expand_dims(x, axis=2), (1, 1, N, 1, 1))
        x = K.concatenate([_x1, _x2], axis=4)  # (B, N, N, H, 2F)

        def _expand_dims_recursive(x, axis_list):
            assert(len(axis_list) > 0)
            if len(axis_list) == 1:
                return K.expand_dims(x, axis_list[0])
            return _expand_dims_recursive(K.expand_dims(x, axis_list[0]),
                                          axis_list=axis_list[1:])
        # squeeze 2F
        a = _expand_dims_recursive(a, (0, 0, 0, 0))
        x = K.exp(K.relu(K.sum(x * a, axis=-1), alpha=0.2))  # (B, N, N, H)

        # normalize by neighbors
        x_norm = K.sum(x * K.expand_dims(g, axis=-1),
                       axis=2, keepdims=True)  # (B, N, 1, H)
        return x / x_norm  # (B, N, N, H) 
Example 71
Project: keras_extension   Author: k1414st   File: graph.py    MIT License 4 votes vote down vote up
def __graph_attention(self, g, x, w, a, n_heads=1):
        """
        using graph attention mechanism.

        Args:
            g: input Tensor of graph adjacency matrix.
               shape: (B(Batch_size), N(N_nodes), N)
            x: input Tensor of node-data after convolutioned.
               shape: (B, N, F_in(F_inputs))
            w: weight matrix variable
               (to transform input to attentionable hidden states.)
               shape: (F_in, F(F_outputs) * H(N_heads))
            a: merge weight vector from attentionable state to attention value.
               shape: (2 * F,)
        """
        H = n_heads
        F_in, FH = w.shape[0], w.shape[1]
        F = FH // H
        N = g.shape[-1]

        # w = K.reshape(F1, H * F2)  # (F_in, H*F)
        x = K.expand_dims(K.dot(x, w), axis=2)  # (B, N, 1, H*F)
        x = K.concatenate([x[:, :, :, F*i:F*(i+1)]
                           for i in range(H)], axis=2)  # (B, N, H, F)

        # concat meshly
        _x1 = K.tile(K.expand_dims(x, axis=1), (1, N, 1, 1, 1))
        _x2 = K.tile(K.expand_dims(x, axis=2), (1, 1, N, 1, 1))
        x = K.concatenate([_x1, _x2], axis=4)  # (B, N, N, H, 2F)

        def _expand_dims_recursive(x, axis_list):
            assert(len(axis_list) > 0)
            if len(axis_list) == 1:
                return K.expand_dims(x, axis_list[0])
            return _expand_dims_recursive(K.expand_dims(x, axis_list[0]),
                                          axis_list=axis_list[1:])
        # squeeze 2F
        a = _expand_dims_recursive(a, (0, 0, 0, 0))
        x = K.exp(K.relu(K.sum(x * a, axis=-1), alpha=0.2))  # (B, N, N, H)

        # normalize by neighbors
        x_norm = K.sum(x * K.expand_dims(g, axis=-1),
                       axis=2, keepdims=True)  # (B, N, 1, H)
        return x / x_norm  # (B, N, N, H) 
Example 72
Project: keras_extension   Author: k1414st   File: layer.py    MIT License 4 votes vote down vote up
def _graph_attention(self, g, x, w, a, n_heads=1):
        """
        using graph attention mechanism.

        Args:
            g: input Tensor of graph adjacency matrix.
               shape: (B(Batch_size), N(N_nodes), N)
            x: input Tensor of node-data after convolutioned.
               shape: (B, N, F_in(F_inputs))
            w: weight matrix variable
               (to transform input to attentionable hidden states.)
               shape: (F_in, F(F_outputs) * H(N_heads))
            a: merge weight vector from attentionable state to attention value.
               shape: (2 * F,)
        """
        H = n_heads
        F_in, FH = w.shape[0], w.shape[1]
        F = FH // H
        N = g.shape[-1]

        # w = K.reshape(F1, H * F2)  # (F_in, H*F)
        x = K.expand_dims(K.dot(x, w), axis=2)  # (B, N, 1, H*F)
        x = K.concatenate([x[:, :, :, F*i:F*(i+1)]
                           for i in range(H)], axis=2)  # (B, N, H, F)

        # concat meshly
        _x1 = K.tile(K.expand_dims(x, axis=1), (1, N, 1, 1, 1))
        _x2 = K.tile(K.expand_dims(x, axis=2), (1, 1, N, 1, 1))
        x = K.concatenate([_x1, _x2], axis=4)  # (B, N, N, H, 2F)

        def _expand_dims_recursive(x, axis_list):
            assert(len(axis_list) > 0)
            if len(axis_list) == 1:
                return K.expand_dims(x, axis_list[0])
            return _expand_dims_recursive(K.expand_dims(x, axis_list[0]),
                                          axis_list=axis_list[1:])
        # squeeze 2F
        a = _expand_dims_recursive(a, (0, 0, 0, 0))
        x = K.exp(K.relu(K.sum(x * a, axis=-1), alpha=0.2))  # (B, N, N, H)

        # normalize by neighbors
        x_norm = K.sum(x * K.expand_dims(g, axis=-1),
                       axis=2, keepdims=True)  # (B, N, 1, H)
        return x / x_norm  # (B, N, N, H) 
Example 73
Project: CDAE4InfoExtraction   Author: grassknoted   File: capsulelayers.py    MIT License 4 votes vote down vote up
def call(self, inputs, training=None):
        # inputs.shape=[None, input_num_capsule, input_dim_capsule]
        # inputs_expand.shape=[None, 1, input_num_capsule, input_dim_capsule]
        inputs_expand = K.expand_dims(inputs, 1)

        # Replicate num_capsule dimension to prepare being multiplied by W
        # inputs_tiled.shape=[None, num_capsule, input_num_capsule, input_dim_capsule]
        inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])

        # Compute `inputs * W` by scanning inputs_tiled on dimension 0.
        # x.shape=[num_capsule, input_num_capsule, input_dim_capsule]
        # W.shape=[num_capsule, input_num_capsule, dim_capsule, input_dim_capsule]
        # Regard the first two dimensions as `batch` dimension,
        # then matmul: [input_dim_capsule] x [dim_capsule, input_dim_capsule]^T -> [dim_capsule].
        # inputs_hat.shape = [None, num_capsule, input_num_capsule, dim_capsule]
        inputs_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, [2, 3]), elems=inputs_tiled)

        # Begin: Routing algorithm ---------------------------------------------------------------------#
        # The prior for coupling coefficient, initialized as zeros.
        # b.shape = [None, self.num_capsule, self.input_num_capsule].
        b = tf.zeros(shape=[K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule])

        assert self.routings > 0, 'The routings should be > 0.'
        for i in range(self.routings):
            # c.shape=[batch_size, num_capsule, input_num_capsule]
            c = tf.nn.softmax(b, dim=1)

            # c.shape =  [batch_size, num_capsule, input_num_capsule]
            # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
            # The first two dimensions as `batch` dimension,
            # then matmal: [input_num_capsule] x [input_num_capsule, dim_capsule] -> [dim_capsule].
            # outputs.shape=[None, num_capsule, dim_capsule]
            outputs = squash(K.batch_dot(c, inputs_hat, [2, 2]))  # [None, 10, 16]

            if i < self.routings - 1:
                # outputs.shape =  [None, num_capsule, dim_capsule]
                # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
                # The first two dimensions as `batch` dimension,
                # then matmal: [dim_capsule] x [input_num_capsule, dim_capsule]^T -> [input_num_capsule].
                # b.shape=[batch_size, num_capsule, input_num_capsule]
                b += K.batch_dot(outputs, inputs_hat, [2, 3])
        # End: Routing algorithm -----------------------------------------------------------------------#

        return outputs 
Example 74
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelorient.py    MIT License 4 votes vote down vote up
def yolo_head(d2feats, d3dimfeats, d3orientfeats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(d2feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(d2feats))

    d2feats = K.reshape(
        d2feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
    d3dimfeats = K.reshape(d3dimfeats, [-1, grid_shape[0], grid_shape[1], num_anchors, 3])
    d3orientfeats = K.reshape(d3orientfeats, [-1, grid_shape[0], grid_shape[1], num_anchors, 3*BIN])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(d2feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(d2feats))
    box_wh = K.exp(d2feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(d2feats))
    box_confidence = K.sigmoid(d2feats[..., 4:5])
    box_class_probs = K.sigmoid(d2feats[..., 5:])
    # 3d dim noline 
    #box_dim = K.exp(feats[..., 5:8]) ##no exp 
    #box_dim = K.sigmoid(feats[..., 5:8]) ##no exp 
    box_dim = d3dimfeats[..., 0:3] ##no exp 
    box_3d_conf = K.sigmoid(d3orientfeats[..., 0:2])
    #print(box_3d_conf.shape)
    #box_3d_cossin = K.l2_normalize(K.reshape(d3orientfeats[..., 2:3*BIN], [-1, -1, -1, -1, BIN, 2]), 2)
    box_3d_cossin = d3orientfeats[..., 2:3*BIN]

    #print(box_3d_cossin.shape)
    #K.sigmoid(K.reshape(d3orientfeats[..., 2:3*BIN], [-1, BIN, 2]))
    #box_3d_cossin = K.l2_normalize(d3orientfeats[..., 2:3*BIN])
    #box_3d_cossin = K.tanh(d3orientfeats[..., 2:3*BIN])
    #K.sigmoid(feats[..., 8:14])

    if calc_loss == True:
        return grid, d2feats, box_xy, box_wh, box_dim, box_3d_conf, box_3d_cossin
    return box_xy, box_wh, box_confidence, box_dim, box_3d_conf, box_3d_cossin, box_class_probs 
Example 75
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelmergetree.py    MIT License 4 votes vote down vote up
def yolo_head(d2feats, d3dimfeats, d3orientfeats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(d2feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(d2feats))

    d2feats = K.reshape(
        d2feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
    d3dimfeats = K.reshape(d3dimfeats, [-1, grid_shape[0], grid_shape[1], num_anchors, 3])
    d3orientfeats = K.reshape(d3orientfeats, [-1, grid_shape[0], grid_shape[1], num_anchors, 3*BIN])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(d2feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(d2feats))
    box_wh = K.exp(d2feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(d2feats))
    box_confidence = K.sigmoid(d2feats[..., 4:5])
    box_class_probs = K.sigmoid(d2feats[..., 5:])
    # 3d dim noline 
    #box_dim = K.exp(feats[..., 5:8]) ##no exp 
    #box_dim = K.sigmoid(feats[..., 5:8]) ##no exp 
    box_dim = d3dimfeats[..., 0:3] ##no exp 
    box_3d_conf = K.sigmoid(d3orientfeats[..., 0:2])
    #print(box_3d_conf.shape)
    #box_3d_cossin = K.l2_normalize(K.reshape(d3orientfeats[..., 2:3*BIN], [-1, -1, -1, -1, BIN, 2]), 2)
    box_3d_cossin = d3orientfeats[..., 2:3*BIN]

    #print(box_3d_cossin.shape)
    #K.sigmoid(K.reshape(d3orientfeats[..., 2:3*BIN], [-1, BIN, 2]))
    #box_3d_cossin = K.l2_normalize(d3orientfeats[..., 2:3*BIN])
    #box_3d_cossin = K.tanh(d3orientfeats[..., 2:3*BIN])
    #K.sigmoid(feats[..., 8:14])

    if calc_loss == True:
        return grid, d2feats, box_xy, box_wh, box_dim, box_3d_conf, box_3d_cossin
    return box_xy, box_wh, box_confidence, box_dim, box_3d_conf, box_3d_cossin, box_class_probs 
Example 76
Project: FSA-Net   Author: shamangary   File: capsulelayers.py    Apache License 2.0 4 votes vote down vote up
def call(self, inputs, training=None):
        # inputs.shape=[None, input_num_capsule, input_dim_capsule]
        # inputs_expand.shape=[None, 1, input_num_capsule, input_dim_capsule]
        inputs_expand = K.expand_dims(inputs, 1)

        # Replicate num_capsule dimension to prepare being multiplied by W
        # inputs_tiled.shape=[None, num_capsule, input_num_capsule, input_dim_capsule]
        inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])

        # Compute `inputs * W` by scanning inputs_tiled on dimension 0.
        # x.shape=[num_capsule, input_num_capsule, input_dim_capsule]
        # W.shape=[num_capsule, input_num_capsule, dim_capsule, input_dim_capsule]
        # Regard the first two dimensions as `batch` dimension,
        # then matmul: [input_dim_capsule] x [dim_capsule, input_dim_capsule]^T -> [dim_capsule].
        # inputs_hat.shape = [None, num_capsule, input_num_capsule, dim_capsule]
        inputs_hat = K.map_fn(lambda x: batch_dot(
            x, self.W, [2, 3]), elems=inputs_tiled)

        # Begin: Routing algorithm ---------------------------------------------------------------------#
        # The prior for coupling coefficient, initialized as zeros.
        # b.shape = [None, self.num_capsule, self.input_num_capsule].
        b = tf.zeros(shape=[K.shape(inputs_hat)[0],
                            self.num_capsule, self.input_num_capsule])
        output_list = []
        assert self.routings > 0, 'The routings should be > 0.'
        for i in range(self.routings):
            # c.shape=[batch_size, num_capsule, input_num_capsule]
            c = tf.nn.softmax(b, dim=1)

            # c.shape =  [batch_size, num_capsule, input_num_capsule]
            # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
            # The first two dimensions as `batch` dimension,
            # then matmal: [input_num_capsule] x [input_num_capsule, dim_capsule] -> [dim_capsule].
            # outputs.shape=[None, num_capsule, dim_capsule]
            # [None, 10, 16]
            outputs = squash(batch_dot(c, inputs_hat, [2, 2]))
            # output_list.append(K.expand_dims(outputs,axis=-1))
            if i < self.routings - 1:
                # outputs.shape =  [None, num_capsule, dim_capsule]
                # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
                # The first two dimensions as `batch` dimension,
                # then matmal: [dim_capsule] x [input_num_capsule, dim_capsule]^T -> [input_num_capsule].
                # b.shape=[batch_size, num_capsule, input_num_capsule]
                b += batch_dot(outputs, inputs_hat, [2, 3])
        # End: Routing algorithm -----------------------------------------------------------------------#
        # return K.concatenate(output_list,-1)
        return outputs 
Example 77
Project: FSA-Net   Author: shamangary   File: FSANET_model.py    Apache License 2.0 4 votes vote down vote up
def ssr_S_model_build(self, num_primcaps, m_dim):
        input_s1_preS = Input((self.map_xy_size,self.map_xy_size,64))
        input_s2_preS = Input((self.map_xy_size,self.map_xy_size,64))
        input_s3_preS = Input((self.map_xy_size,self.map_xy_size,64))

        feat_S_model = self.ssr_feat_S_model_build(m_dim)

        SR_matrix_s1,feat_s1_preS = feat_S_model(input_s1_preS)
        SR_matrix_s2,feat_s2_preS = feat_S_model(input_s2_preS)
        SR_matrix_s3,feat_s3_preS = feat_S_model(input_s3_preS)
        
        feat_pre_concat = Concatenate()([feat_s1_preS,feat_s2_preS,feat_s3_preS])
        SL_matrix = Dense(int(num_primcaps/3)*m_dim,activation='sigmoid')(feat_pre_concat)
        SL_matrix = Reshape((int(num_primcaps/3),m_dim))(SL_matrix)       

        S_matrix_s1 = MatrixMultiplyLayer(name="S_matrix_s1")([SL_matrix,SR_matrix_s1])
        S_matrix_s2 = MatrixMultiplyLayer(name='S_matrix_s2')([SL_matrix,SR_matrix_s2])
        S_matrix_s3 = MatrixMultiplyLayer(name='S_matrix_s3')([SL_matrix,SR_matrix_s3])        

        # Very important!!! Without this training won't converge.        
        # norm_S_s1 = Lambda(lambda x: K.tile(K.sum(x,axis=-1,keepdims=True),(1,1,64)))(S_matrix_s1)
        norm_S_s1 = MatrixNormLayer(tile_count=64)(S_matrix_s1)
        norm_S_s2 = MatrixNormLayer(tile_count=64)(S_matrix_s2)
        norm_S_s3 = MatrixNormLayer(tile_count=64)(S_matrix_s3)        

        feat_s1_pre = Reshape((self.map_xy_size*self.map_xy_size,64))(input_s1_preS)
        feat_s2_pre = Reshape((self.map_xy_size*self.map_xy_size,64))(input_s2_preS)
        feat_s3_pre = Reshape((self.map_xy_size*self.map_xy_size,64))(input_s3_preS)

        feat_pre_concat = Concatenate(axis=1)([feat_s1_pre, feat_s2_pre, feat_s3_pre])
        
        # Warining: don't use keras's 'K.dot'. It is very weird when high dimension is used.
        # https://github.com/keras-team/keras/issues/9779
        # Make sure 'tf.matmul' is used
        # primcaps = Lambda(lambda x: tf.matmul(x[0],x[1])/x[2])([S_matrix,feat_pre_concat, norm_S])        
        primcaps_s1 = PrimCapsLayer()([S_matrix_s1,feat_pre_concat, norm_S_s1])
        primcaps_s2 = PrimCapsLayer()([S_matrix_s2,feat_pre_concat, norm_S_s2])
        primcaps_s3 = PrimCapsLayer()([S_matrix_s3,feat_pre_concat, norm_S_s3])        
        
        primcaps = Concatenate(axis=1)([primcaps_s1,primcaps_s2,primcaps_s3])

        return Model(inputs=[input_s1_preS, input_s2_preS, input_s3_preS],outputs=primcaps, name='ssr_S_model') 
Example 78
Project: Multi-level-DCNet   Author: ssrp   File: capsulelayers.py    GNU General Public License v3.0 4 votes vote down vote up
def call(self, inputs, training=None):
        # inputs.shape=[None, input_num_capsule, input_dim_capsule]
        # inputs_expand.shape=[None, 1, input_num_capsule, input_dim_capsule]
        inputs_expand = K.expand_dims(inputs, 1)

        # Replicate num_capsule dimension to prepare being multiplied by W
        # inputs_tiled.shape=[None, num_capsule, input_num_capsule, input_dim_capsule]
        inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])

        # Compute `inputs * W` by scanning inputs_tiled on dimension 0.
        # x.shape=[num_capsule, input_num_capsule, input_dim_capsule]
        # W.shape=[num_capsule, input_num_capsule, dim_capsule, input_dim_capsule]
        # Regard the first two dimensions as `batch` dimension,
        # then matmul: [input_dim_capsule] x [dim_capsule, input_dim_capsule]^T -> [dim_capsule].
        # inputs_hat.shape = [None, num_capsule, input_num_capsule, dim_capsule]
        inputs_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, [2, 3]), elems=inputs_tiled)

        # Begin: Routing algorithm ---------------------------------------------------------------------#
        # The prior for coupling coefficient, initialized as zeros.
        # b.shape = [None, self.num_capsule, self.input_num_capsule].
        b = tf.zeros(shape=[K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule])

        assert self.routings > 0, 'The routings should be > 0.'
        for i in range(self.routings):
            # c.shape=[batch_size, num_capsule, input_num_capsule]
            c = tf.nn.softmax(b, dim=1)

            # c.shape =  [batch_size, num_capsule, input_num_capsule]
            # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
            # The first two dimensions as `batch` dimension,
            # then matmal: [input_num_capsule] x [input_num_capsule, dim_capsule] -> [dim_capsule].
            # outputs.shape=[None, num_capsule, dim_capsule]
            outputs = squash(K.batch_dot(c, inputs_hat, [2, 2]))  # [None, 10, 16]

            if i < self.routings - 1:
                # outputs.shape =  [None, num_capsule, dim_capsule]
                # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
                # The first two dimensions as `batch` dimension,
                # then matmal: [dim_capsule] x [input_num_capsule, dim_capsule]^T -> [input_num_capsule].
                # b.shape=[batch_size, num_capsule, input_num_capsule]
                b += K.batch_dot(outputs, inputs_hat, [2, 3])
        # End: Routing algorithm -----------------------------------------------------------------------#

        return outputs 
Example 79
Project: CapsNet-Collections   Author: Jiankai-Sun   File: capsulelayers.py    GNU General Public License v3.0 4 votes vote down vote up
def call(self, inputs, training=None):
        # inputs.shape=[None, input_num_capsule, input_dim_vector]
        # Expand dims to [None, input_num_capsule, 1, 1, input_dim_vector]
        inputs_expand = K.expand_dims(K.expand_dims(inputs, 2), 2)

        # Replicate num_capsule dimension to prepare being multiplied by W
        # Now it has shape = [None, input_num_capsule, num_capsule, 1, input_dim_vector]
        inputs_tiled = K.tile(inputs_expand, [1, 1, self.num_capsule, 1, 1])

        """  
        # Compute `inputs * W` by expanding the first dim of W. More time-consuming and need batch_size.
        # Now W has shape  = [batch_size, input_num_capsule, num_capsule, input_dim_vector, dim_vector]
        w_tiled = K.tile(K.expand_dims(self.W, 0), [self.batch_size, 1, 1, 1, 1])
        
        # Transformed vectors, inputs_hat.shape = [None, input_num_capsule, num_capsule, 1, dim_vector]
        inputs_hat = K.batch_dot(inputs_tiled, w_tiled, [4, 3])
        """
        # Compute `inputs * W` by scanning inputs_tiled on dimension 0. This is faster but requires Tensorflow.
        # inputs_hat.shape = [None, input_num_capsule, num_capsule, 1, dim_vector]
        inputs_hat = tf.scan(lambda ac, x: K.batch_dot(x, self.W, [3, 2]),
                             elems=inputs_tiled,
                             initializer=K.zeros([self.input_num_capsule, self.num_capsule, 1, self.dim_vector]))
        """
        # Routing algorithm V1. Use tf.while_loop in a dynamic way.
        def body(i, b, outputs):
            c = tf.nn.softmax(self.bias, dim=2)  # dim=2 is the num_capsule dimension
            outputs = squash(K.sum(c * inputs_hat, 1, keepdims=True))
            b = b + K.sum(inputs_hat * outputs, -1, keepdims=True)
            return [i-1, b, outputs]

        cond = lambda i, b, inputs_hat: i > 0
        loop_vars = [K.constant(self.num_routing), self.bias, K.sum(inputs_hat, 1, keepdims=True)]
        _, _, outputs = tf.while_loop(cond, body, loop_vars)
        """
        # Routing algorithm V2. Use iteration. V2 and V1 both work without much difference on performance
        assert self.num_routing > 0, 'The num_routing should be > 0.'
        for i in range(self.num_routing):
            c = tf.nn.softmax(self.bias, dim=2)  # dim=2 is the num_capsule dimension
            # outputs.shape=[None, 1, num_capsule, 1, dim_vector]
            outputs = squash(K.sum(c * inputs_hat, 1, keepdims=True))

            # last iteration needs not compute bias which will not be passed to the graph any more anyway.
            if i != self.num_routing - 1:
                # self.bias = K.update_add(self.bias, K.sum(inputs_hat * outputs, [0, -1], keepdims=True))
                self.bias += K.sum(inputs_hat * outputs, -1, keepdims=True)
            # tf.summary.histogram('BigBee', self.bias)  # for debugging
        return K.reshape(outputs, [-1, self.num_capsule, self.dim_vector]) 
Example 80
Project: RPGOne   Author: RTHMaK   File: tree_composition_lstm.py    Apache License 2.0 4 votes vote down vote up
def step(self, inputs, states):
        # This function is called at each timestep. Before calling this, Keras' rnn
        # dimshuffles the input to have time as the leading dimension, and iterates over
        # it So,
        # inputs: (samples, input_dim) = (samples, x_op + <current timestep's buffer input>)
        #
        # We do not need the current timestep's buffer input here, since the buffer
        # state is the one that's relevant. We just want the current op from inputs.

        buff = states[0] # Current state of buffer
        stack = states[1] # Current state of stack

        step_ops = inputs[:, 0] #(samples, 1), current ops for all samples.

        # We need to make tensors from the ops vectors, one to apply to all dimensions
        # of stack, and the other for the buffer.
        # For the stack:
        # Note stack's dimensionality is 2*output_dim because it holds both h and c
        stack_tiled_step_ops = K.permute_dimensions(
                K.tile(step_ops, (self.stack_limit, 2 * self.output_dim, 1)),
                (2, 0, 1))  # (samples, stack_limit, 2*dim)

        # For the buffer:
        buff_tiled_step_ops = K.permute_dimensions(
                K.tile(step_ops, (self.buffer_ops_limit, 2 * self.output_dim, 1)),
                (2, 0, 1))  # (samples, buff_len, 2*dim)

        shifted_stack = K.concatenate([buff[:, :1], stack], axis=1)[:, :self.stack_limit]
        one_reduced_stack = K.concatenate([self._one_arg_compose(stack[:, :2]),
                                           stack[:, 2:],
                                           K.zeros_like(stack)[:, :1]],
                                          axis=1)
        two_reduced_stack = K.concatenate([self._two_arg_compose(stack[:, :3]),
                                           stack[:, 3:],
                                           K.zeros_like(stack)[:, :2]],
                                          axis=1)
        shifted_buff = K.concatenate([buff[:, 1:], K.zeros_like(buff)[:, :1]], axis=1)

        stack = K.switch(K.equal(stack_tiled_step_ops, SHIFT_OP), shifted_stack, stack)
        stack = K.switch(K.equal(stack_tiled_step_ops, REDUCE2_OP), one_reduced_stack, stack)
        stack = K.switch(K.equal(stack_tiled_step_ops, REDUCE3_OP), two_reduced_stack, stack)
        buff = K.switch(K.equal(buff_tiled_step_ops, SHIFT_OP), shifted_buff, buff)

        stack_top_h = stack[:, 0, :self.output_dim] # first half of the top element for all samples

        return stack_top_h, [buff, stack]