Python keras.backend.int_shape() Examples

The following are code examples for showing how to use keras.backend.int_shape(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn_all.py    MIT License 6 votes vote down vote up
def softmax_sparse_crossentropy_ignoring_first_label(y_true, y_pred):
    y_pred = K.reshape(y_pred, (-1, K.int_shape(y_pred)[-1]))
    log_softmax = tf.nn.log_softmax(y_pred)

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), K.int_shape(y_pred)[-1]+1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[0], tf.bool)
    y_true = tf.stack(unpacked[1:], axis=-1)

    cross_entropy = -K.sum(y_true * log_softmax, axis=1)
    cross_entropy_mean = K.sum(cross_entropy) / K.sum(tf.to_float(legal_labels))

    return cross_entropy_mean


# Accuracy for segmentation (ignoring first label) 
Example 2
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 6 votes vote down vote up
def sparse_accuracy(y_true, y_pred):
    classes = K.int_shape(y_pred)[-1]
    y_pred = K.reshape(y_pred, (-1, classes))

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)),
                       classes + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[0], tf.bool)
    y_true = tf.stack(unpacked[1:], axis=-1)

    return K.sum(tf.to_float(legal_labels & K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels))


# Define different models


# 3D-FCN model 
Example 3
Project: timeception   Author: noureldien   File: timeception.py    GNU General Public License v3.0 6 votes vote down vote up
def timeception_layers(tensor, n_layers=4, n_groups=8, is_dilated=True):
    input_shape = K.int_shape(tensor)
    assert len(input_shape) == 5

    expansion_factor = 1.25
    _, n_timesteps, side_dim, side_dim, n_channels_in = input_shape

    # how many layers of timeception
    for i in range(n_layers):
        layer_num = i + 1

        # get details about grouping
        n_channels_per_branch, n_channels_out = __get_n_channels_per_branch(n_groups, expansion_factor, n_channels_in)

        # temporal conv per group
        tensor = __grouped_convolutions(tensor, n_groups, n_channels_per_branch, is_dilated, layer_num)

        # downsample over time
        tensor = MaxPooling3D(pool_size=(2, 1, 1), name='maxpool_tc%d' % (layer_num))(tensor)
        n_channels_in = n_channels_out

    return tensor 
Example 4
Project: timeception   Author: noureldien   File: timeception.py    GNU General Public License v3.0 6 votes vote down vote up
def __call_timeception_layers(self, tensor, n_layers, n_groups, expansion_factor):
        input_shape = K.int_shape(tensor)
        assert len(input_shape) == 5

        _, n_timesteps, side_dim, side_dim, n_channels_in = input_shape

        # how many layers of timeception
        for i in range(n_layers):
            layer_num = i + 1

            # get details about grouping
            n_channels_per_branch, n_channels_out = self.__get_n_channels_per_branch(n_groups, expansion_factor, n_channels_in)

            # temporal conv per group
            tensor = self.__call_grouped_convolutions(tensor, n_groups, n_channels_per_branch, layer_num)

            # downsample over time
            tensor = getattr(self, 'maxpool_tc%d' % (layer_num))(tensor)
            n_channels_in = n_channels_out

        return tensor 
Example 5
Project: Python-Deep-Learning-SE   Author: ivan-vasilev   File: chapter_06_001.py    MIT License 6 votes vote down vote up
def sampling(args: tuple):
    """
    Reparameterization trick by sampling z from unit Gaussian
    :param args: (tensor, tensor) mean and log of variance of q(z|x)
    :returns tensor: sampled latent vector z
    """

    # unpack the input tuple
    z_mean, z_log_var = args

    # mini-batch size
    mb_size = K.shape(z_mean)[0]

    # latent space size
    dim = K.int_shape(z_mean)[1]

    # random normal vector with mean=0 and std=1.0
    epsilon = K.random_normal(shape=(mb_size, dim))

    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
Example 6
Project: DeepIC   Author: HeavenDuke   File: ResSppNet.py    MIT License 6 votes vote down vote up
def _shortcut(input, residual):
    """Adds a shortcut between input and residual block and merges them with "sum"
    """
    # Expand channels of shortcut to match residual.
    # Stride appropriately to match residual (width, height)
    # Should be int if network architecture is correctly configured.
    input_shape = K.int_shape(input)
    residual_shape = K.int_shape(residual)
    stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
    stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
    equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]

    shortcut = input
    # 1 X 1 conv if shape is different. Else identity.
    if stride_width > 1 or stride_height > 1 or not equal_channels:
        shortcut = Conv2D(filters = residual_shape[CHANNEL_AXIS],
                          kernel_size = (1, 1),
                          strides = (stride_width, stride_height),
                          padding = "valid",
                          kernel_initializer = "he_normal",
                          kernel_regularizer = l2(regularizer_rate))(input)

    return add([shortcut, residual]) 
Example 7
Project: keras_nade   Author: jgrnt   File: utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def mog_layer(input_layer, previous_layer, n_mixtures, regularize_activations=False):
    if regularize_activations:
        activity_regularizer = ExpConstraint(-2, 10)
    else:
        activity_regularizer = None
    n_outputs = K.int_shape(input_layer)[1]
    mu = Reshape((n_outputs, n_mixtures))(
        Dense(n_outputs * n_mixtures, name="mog_mu")(
            previous_layer))
    sigma = Reshape((n_outputs, n_mixtures))(
        Lambda(lambda x: K.exp(x), output_shape=(n_outputs * n_mixtures,))(
            Dense(n_outputs * n_mixtures, name="mog_sigma",
                  activity_regularizer=activity_regularizer,
                  bias_initializer=keras.initializers.Ones())(
                previous_layer)))
    # Implement softmax here as it has to work on n_mixtures
    temp_alpha = Lambda(lambda x: K.exp(x - K.max(x, axis=2, keepdims=True)), output_shape=(n_outputs, n_mixtures))(
        Reshape((n_outputs, n_mixtures))(Dense(n_outputs * n_mixtures, name="mog_alpha",
                                               activity_regularizer=activity_regularizer,
                                               bias_initializer=keras.initializers.Zeros())(
            previous_layer)))
    alpha = Lambda(lambda x: x / K.expand_dims(K.sum(x, axis=2), 2), output_shape=(n_outputs, n_mixtures))(temp_alpha)
    output = Lambda(_merge_mog, output_shape=(n_outputs,))([input_layer, mu, sigma, alpha])
    return output 
Example 8
Project: spektral   Author: danielegrattarola   File: ops.py    MIT License 6 votes vote down vote up
def filter_dot(fltr, features):
    """
    Performs the multiplication of a graph filter (N x N) with the node features,
    automatically dealing with single, mixed, and batch modes.
    :param fltr: the graph filter(s) (N x N in single and mixed mode,
    batch x N x N in batch mode).
    :param features: the node features (N x F in single mode, batch x N x F in
    mixed and batch mode).
    :return: the filtered features.
    """
    if len(K.int_shape(features)) == 2:
        # Single mode
        return K.dot(fltr, features)
    else:
        if len(K.int_shape(fltr)) == 3:
            # Batch mode
            return K.batch_dot(fltr, features)
        else:
            # Mixed mode
            return mixed_mode_dot(fltr, features) 
Example 9
Project: spektral   Author: danielegrattarola   File: ops.py    MIT License 6 votes vote down vote up
def mixed_mode_dot(A, B):
    """
    Computes the equivalent of `tf.einsum('ij,bjk->bik', fltr, output)`, but
    works for both dense and sparse input filters.
    :param A: rank 2 Tensor or SparseTensor.
    :param B: rank 3 Tensor or SparseTensor.
    :return: rank 3 Tensor or SparseTensor.
    """
    s_0_, s_1_, s_2_ = K.int_shape(B)
    B_T = transpose(B, (1, 2, 0))
    B_T = reshape(B_T, (s_1_, -1))
    output = single_mode_dot(A, B_T)
    output = reshape(output, (s_1_, s_2_, -1))
    output = transpose(output, (2, 0, 1))

    return output


################################################################################
# Wrappers for automatic switching between dense and sparse ops
################################################################################ 
Example 10
Project: CDAE4InfoExtraction   Author: grassknoted   File: VAE.py    MIT License 6 votes vote down vote up
def sampling(args):
    """
    Reparameterization trick by sampling from an isotropic unit Gaussian.
    # Arguments:
        args (tensor): mean and log of variance of Q(z|X)
    # Returns:
        z (tensor): sampled latent vector
    """

    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon


# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend 
Example 11
Project: SeqGAN   Author: tyo-yo   File: models.py    MIT License 6 votes vote down vote up
def Highway(x, num_layers=1, activation='relu', name_prefix=''):
    '''
    Layer wrapper function for Highway network
    # Arguments:
        x: tensor, shape = (B, input_size)
    # Optional Arguments:
        num_layers: int, dafault is 1, the number of Highway network layers
        activation: keras activation, default is 'relu'
        name_prefix: str, default is '', layer name prefix
    # Returns:
        out: tensor, shape = (B, input_size)
    '''
    input_size = K.int_shape(x)[1]
    for i in range(num_layers):
        gate_ratio_name = '{}Highway/Gate_ratio_{}'.format(name_prefix, i)
        fc_name = '{}Highway/FC_{}'.format(name_prefix, i)
        gate_name = '{}Highway/Gate_{}'.format(name_prefix, i)

        gate_ratio = Dense(input_size, activation='sigmoid', name=gate_ratio_name)(x)
        fc = Dense(input_size, activation=activation, name=fc_name)(x)
        x = Lambda(lambda args: args[0] * args[2] + args[1] * (1 - args[2]), name=gate_name)([fc, x, gate_ratio])
    return x 
Example 12
Project: iMIMIC-RCVs   Author: medgift   File: optimizer.py    MIT License 6 votes vote down vote up
def _get_seed_input(self, seed_input):
        """Creates a random `seed_input` if None. Otherwise:
            - Ensures batch_size dim on provided `seed_input`.
            - Shuffle axis according to expected `image_data_format`.
        """
        desired_shape = (1, ) + K.int_shape(self.input_tensor)[1:]
        if seed_input is None:
            return utils.random_array(desired_shape, mean=np.mean(self.input_range),
                                      std=0.05 * (self.input_range[1] - self.input_range[0]))

        # Add batch dim if needed.
        if len(seed_input.shape) != len(desired_shape):
            seed_input = np.expand_dims(seed_input, 0)

        # Only possible if channel idx is out of place.
        if seed_input.shape != desired_shape:
            seed_input = np.moveaxis(seed_input, -1, 1)
        return seed_input.astype(K.floatx()) 
Example 13
Project: keras_mixnets   Author: titu1994   File: custom_objects.py    MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        if len(self._layers) == 1:
            return self._layers[0](inputs)

        filters = K.int_shape(inputs)[self._channel_axis]
        splits = self._split_channels(filters, self.groups)
        x_splits = tf.split(inputs, splits, self._channel_axis)
        x_outputs = [c(x) for x, c in zip(x_splits, self._layers)]
        x = layers.concatenate(x_outputs, axis=self._channel_axis)
        return x 
Example 14
Project: keras_mixnets   Author: titu1994   File: mixnets.py    MIT License 5 votes vote down vote up
def __call__(self, inputs):
        filters = K.int_shape(inputs)[self._channel_axis]
        grouped_op = GroupConvolution(filters, self.kernels, groups=len(self.kernels),
                                      type='depthwise_conv', conv_kwargs=self._conv_kwargs)
        x = grouped_op(inputs)
        return x


# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/mixnet_model.py 
Example 15
Project: Scene-Understanding   Author: foamliu   File: unpooling_layer.py    MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        x = inputs[:, 1]
        # print('x.shape: ' + str(K.int_shape(x)))
        bool_mask = Lambda(lambda t: K.greater_equal(t[:, 0], t[:, 1]),
                           output_shape=K.int_shape(x)[1:])(inputs)
        # print('bool_mask.shape: ' + str(K.int_shape(bool_mask)))
        mask = Lambda(lambda t: K.cast(t, dtype='float32'))(bool_mask)
        # print('mask.shape: ' + str(K.int_shape(mask)))
        x = Multiply()([mask, x])
        # print('x.shape: ' + str(K.int_shape(x)))
        return x 
Example 16
Project: smach_based_introspection_framework   Author: birlrobotics   File: layer_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_constants(self, inputs, training=None):
        constants = []
        if self.implementation != 0 and 0 < self.dropout < 1:
            input_shape = K.int_shape(inputs)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))

            def dropped_inputs():
                return K.dropout(ones, self.dropout)

            dp_mask = [K.in_train_phase(dropped_inputs,
                                        ones,
                                        training=training) for _ in range(4)]
            constants.append(dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)
            rec_dp_mask = [K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training) for _ in range(4)]
            constants.append(rec_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        # append the input as well for use later
        constants.append(inputs)
        return constants 
Example 17
Project: phoneticSimilarity   Author: ronggong   File: models.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def createModel_schluter_valid(input, num_filter, height_filter, width_filter, filter_density, pool_n_row,
                pool_n_col, dropout):
    """
    original Schluter relu activation, no dropout
    :param input:
    :param num_filter:
    :param height_filter:
    :param width_filter:
    :param filter_density:
    :param pool_n_row:
    :param pool_n_col:
    :param dropout:
    :return:
    """

    x = ZeroPadding2D(padding=(0, int(width_filter/2)),  data_format="channels_first")(input)

    x = Conv2D(int(num_filter * filter_density), (height_filter, width_filter), padding="valid",
                       data_format="channels_first",
                       activation='relu')(x)

    output_shape = K.int_shape(x)

    if pool_n_row == 'all' and pool_n_col == 'all':
        x = MaxPooling2D(pool_size=(output_shape[2], output_shape[3]), padding='same', data_format="channels_first")(x)
    elif pool_n_row == 'all' and pool_n_col != 'all':
        x = MaxPooling2D(pool_size=(output_shape[2], pool_n_col), padding='same', data_format="channels_first")(x)
    elif pool_n_row != 'all' and pool_n_col == 'all':
        x = MaxPooling2D(pool_size=(pool_n_row, output_shape[3]), padding='same', data_format="channels_first")(x)
    else:
        x = MaxPooling2D(pool_size=(pool_n_row, pool_n_col), padding='same', data_format="channels_first")(x)
    x = Dropout(dropout)(x)
    x = Flatten()(x)

    return x 
Example 18
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def conv_module(conv, input_shape, input):
    if conv:
        x = Reshape((-1, input_shape[2]) + (1,))(input)
        # x = BatchNormalization()(x)
        x = Conv2D(filters=8, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = Conv2D(filters=8, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = Conv2D(filters=8, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = MaxPooling2D(pool_size=(1, 3))(x)

        x = Conv2D(filters=16, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = Conv2D(filters=16, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = Conv2D(filters=16, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = MaxPooling2D(pool_size=(1, 3))(x)
        shape = K.int_shape(x)
        x = Reshape((-1, shape[2] * shape[3]))(x)
    else:
        x = input
    return x 
Example 19
Project: gandlf   Author: codekansas   File: losses.py    MIT License 5 votes vote down vote up
def rbf_moment_matching(y_true, y_pred, sigmas=[2, 5, 10, 20, 40, 80]):
    """Generative moment matching loss with RBF kernel.

    Reference: https://arxiv.org/abs/1502.02761
    """

    warnings.warn('Moment matching loss is still in development.')

    if len(K.int_shape(y_pred)) != 2 or len(K.int_shape(y_true)) != 2:
        raise ValueError('RBF Moment Matching function currently only works '
                         'for outputs with shape (batch_size, num_features).'
                         'Got y_true="%s" and y_pred="%s".' %
                         (str(K.int_shape(y_pred)), str(K.int_shape(y_true))))

    sigmas = list(sigmas) if isinstance(sigmas, (list, tuple)) else [sigmas]

    x = K.concatenate([y_pred, y_true], 0)

    # Performs dot product between all combinations of rows in X.
    xx = K.dot(x, K.transpose(x))  # (batch_size, batch_size)

    # Performs dot product of all rows with themselves.
    x2 = K.sum(x * x, 1, keepdims=True)  # (batch_size, None)

    # Gets exponent entries of the RBF kernel (without sigmas).
    exponent = xx - 0.5 * x2 - 0.5 * K.transpose(x2)

    # Applies all the sigmas.
    total_loss = None
    for sigma in sigmas:
        kernel_val = K.exp(exponent / sigma)
        loss = K.sum(kernel_val)
        total_loss = loss if total_loss is None else loss + total_loss

    return total_loss 
Example 20
Project: gandlf   Author: codekansas   File: attention.py    MIT License 5 votes vote down vote up
def __init__(self, layer, attention, attn_activation='tanh',
                 attn_gate_func='sigmoid', W_regularizer=None,
                 b_regularizer=None, **kwargs):

        if not isinstance(layer, keras.layers.Recurrent):
            raise ValueError('The RecurrentAttention wrapper only works on '
                             'recurrent layers.')

        # Should know this so that we can handle multiple hidden states.
        self._wraps_lstm = isinstance(layer, keras.layers.LSTM)

        if not hasattr(attention, '_keras_shape'):
            raise ValueError('Attention should be a Keras tensor.')

        if len(K.int_shape(attention)) != 2:
            raise ValueError('The attention input for RecurrentAttention2D '
                             'should be a tensor with shape (batch_size, '
                             'num_features). Got shape=%s.' %
                             str(K.int_shape(attention)))

        self.supports_masking = True
        self.attention = attention
        self.attn_activation = keras.activations.get(attn_activation)
        self.attn_gate_func = keras.activations.get(attn_gate_func)

        self.W_regularizer = keras.regularizers.get(W_regularizer)
        self.b_regularizer = keras.regularizers.get(b_regularizer)

        super(RecurrentAttention1D, self).__init__(layer, **kwargs) 
Example 21
Project: gandlf   Author: codekansas   File: attention.py    MIT License 5 votes vote down vote up
def __init__(self, layer, attention, time_dist_activation='softmax',
                 attn_gate_func='sigmoid', W_regularizer=None,
                 b_regularizer=None, **kwargs):

        if not isinstance(layer, keras.layers.Recurrent):
            raise ValueError('The RecurrentAttention wrapper only works on '
                             'recurrent layers.')

        # Should know this so that we can handle multiple hidden states.
        self._wraps_lstm = isinstance(layer, keras.layers.LSTM)

        if not hasattr(attention, '_keras_shape'):
            raise ValueError('Attention should be a Keras tensor.')

        if len(K.int_shape(attention)) != 3:
            raise ValueError('The attention input for RecurrentAttention2D '
                             'should be a tensor with shape (batch_size, '
                             'num_timesteps, num_features). Got shape=%s.' %
                             str(K.int_shape(attention)))

        self.supports_masking = True
        self.attention = attention

        self.time_dist_activation = keras.activations.get(time_dist_activation)
        self.attn_gate_func = keras.activations.get(attn_gate_func)

        self.W_regularizer = keras.regularizers.get(W_regularizer)
        self.b_regularizer = keras.regularizers.get(b_regularizer)

        super(RecurrentAttention2D, self).__init__(layer, **kwargs) 
Example 22
Project: gandlf   Author: codekansas   File: attention.py    MIT License 5 votes vote down vote up
def build(self, input_shape):
        assert input_shape >= 3
        self.input_spec = [keras.engine.InputSpec(shape=input_shape)]

        # Builds the wrapped layer.
        if not self.layer.built:
            self.layer.build(input_shape)

        super(RecurrentAttention2D, self).build()

        num_attn_timesteps, num_attn_feats = K.int_shape(self.attention)[1:]
        output_dim = self.layer.output_dim

        self.attn_U_t = self.add_weight((output_dim, num_attn_timesteps),
                                   initializer=self.layer.inner_init,
                                   name='{}_attn_U_t'.format(self.name),
                                   regularizer=self.W_regularizer)
        self.attn_b_t = self.add_weight((num_attn_timesteps,),
                                   initializer='zero',
                                   name='{}_attn_b_t'.format(self.name),
                                   regularizer=self.b_regularizer)

        self.attn_U_a = self.add_weight((num_attn_feats, output_dim),
                                   initializer=self.layer.inner_init,
                                   name='{}_attn_U_a'.format(self.name),
                                   regularizer=self.W_regularizer)
        self.attn_b_a = self.add_weight((output_dim,),
                                   initializer='zero',
                                   name='{}_attn_b_a'.format(self.name),
                                   regularizer=self.b_regularizer)

        self.trainable_weights = [self.attn_U_t, self.attn_b_t,
                                  self.attn_U_a, self.attn_b_a] 
Example 23
Project: AI_Competition   Author: Decalogue   File: attention.py    MIT License 5 votes vote down vote up
def call(self, x, mask=None):
        input_shape = K.int_shape(x)

        features_dim = self.features_dim
        # step_dim = self.step_dim
        step_dim = input_shape[1]

        eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)), K.reshape(self.W, (features_dim, 1))), (-1, step_dim))

        if self.bias:
            eij += self.b[:input_shape[1]]

        eij = K.tanh(eij)

        a = K.exp(eij)

        # apply mask after the exp. will be re-normalized next
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            a *= K.cast(mask, K.floatx())

        # in some cases especially in the early stages of training the sum may be almost zero
        # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        a = K.expand_dims(a)
        weighted_input = x * a
    	# print weigthted_input.shape
        return K.sum(weighted_input, axis=1) 
Example 24
Project: AI_Competition   Author: Decalogue   File: attention.py    MIT License 5 votes vote down vote up
def call(self, inputs, mask=None):
        en = inputs[0]
        de = inputs[1]
        de_shape = K.int_shape(de)
        step_dim = de_shape[1]

        hid_en = K.dot(en, self.W_en1)
        hid_de = K.dot(de, self.W_en2)
        if self.bias:
            hid_en += self.b_en1
            hid_de += self.b_en2
        hid = K.tanh(K.expand_dims(hid_en, axis=1) + hid_de)
        eij = K.reshape(K.dot(hid, K.reshape(self.W_de, (self.hid_size, 1))), (-1, step_dim))
        if self.bias:
            eij += self.b_de[:step_dim]

        a = K.exp(eij - K.max(eij, axis=-1, keepdims=True))

        # apply mask after the exp. will be re-normalized next
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            a *= K.cast(mask[1], K.floatx())

        # in some cases especially in the early stages of training the sum may be almost zero
        # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        a = K.expand_dims(a)
        weighted_input = de * a
        return K.sum(weighted_input, axis=1) 
Example 25
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn_all.py    MIT License 5 votes vote down vote up
def sparse_accuracy(y_true, y_pred):
    classes = K.int_shape(y_pred)[-1]
    y_pred = K.reshape(y_pred, (-1, classes))

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)),
                       classes + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[0], tf.bool)
    y_true = tf.stack(unpacked[1:], axis=-1)

    return K.sum(tf.to_float(legal_labels & K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels)) 
Example 26
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 5 votes vote down vote up
def softmax_sparse_crossentropy_ignoring_first_label(y_true, y_pred):
    y_pred = K.reshape(y_pred, (-1, K.int_shape(y_pred)[-1]))
    log = tf.nn.log_softmax(y_pred)

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), K.int_shape(y_pred)[-1]+1)
    unpacked = tf.unstack(y_true, axis=-1)
    y_true = tf.stack(unpacked[1:], axis=-1)

    cross_entropy = -K.sum(y_true * log, axis=1)
    cross_entropy_mean = K.mean(cross_entropy)

    return cross_entropy_mean


# Accuracy for segmentation (ignoring first label) 
Example 27
Project: FasterRCNN_KERAS   Author: akshaylamba   File: FixedBatchNormalization.py    Apache License 2.0 5 votes vote down vote up
def call(self, x, mask=None):

        assert self.built, 'Layer must be built before being called'
        input_shape = K.int_shape(x)

        reduction_axes = list(range(len(input_shape)))
        del reduction_axes[self.axis]
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis]

        if sorted(reduction_axes) == range(K.ndim(x))[:-1]:
            x_normed = K.batch_normalization(
                x, self.running_mean, self.running_std,
                self.beta, self.gamma,
                epsilon=self.epsilon)
        else:
            # need broadcasting
            broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape)
            broadcast_running_std = K.reshape(self.running_std, broadcast_shape)
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            x_normed = K.batch_normalization(
                x, broadcast_running_mean, broadcast_running_std,
                broadcast_beta, broadcast_gamma,
                epsilon=self.epsilon)

        return x_normed 
Example 28
Project: ccm-aae   Author: danielegrattarola   File: geometry.py    MIT License 5 votes vote down vote up
def call(self, inputs):
        output_part = []
        manifold_size = K.int_shape(inputs)[-1] // len(self.r)

        for idx, r_ in enumerate(self.r):
            start = idx * manifold_size
            stop = start + manifold_size
            part = inputs[..., start:stop]
            sign = np.sign(r_)
            if sign == 0.:
                # This is weird but necessary to make the layer differentiable
                output_pre = K.sum(inputs, -1, keepdims=True) * 0. + 1.
            else:
                free_components = part[..., :-1] ** 2
                bound_component = sign * part[..., -1:] ** 2
                all_components = K.concatenate((free_components, bound_component), -1)
                ext_product = K.sum(all_components, -1, keepdims=True)
                output_pre = K.exp(-(ext_product - sign * r_ ** 2) ** 2 / (2 * self.sigma ** 2))

            output_part.append(output_pre)

        if len(output_part) >= 2:
            if self.mode == 'average':
                output = Average()(output_part)
            elif self.mode == 'concat':
                output = Concatenate()(output_part)
            else:
                raise ValueError()  # Never gets here
        else:
            output = output_part[0]

        return output 
Example 29
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 5 votes vote down vote up
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
    """Loss for Mask R-CNN bounding box refinement.

    target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
    target_class_ids: [batch, num_rois]. Integer class IDs.
    pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
    """
    # Reshape to merge batch and roi dimensions for simplicity.
    target_class_ids = K.reshape(target_class_ids, (-1,))
    target_bbox = K.reshape(target_bbox, (-1, 4))
    pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))

    # Only positive ROIs contribute to the loss. And only
    # the right class_id of each ROI. Get their indices.
    positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
    positive_roi_class_ids = tf.cast(
        tf.gather(target_class_ids, positive_roi_ix), tf.int64)
    indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)

    # Gather the deltas (predicted and true) that contribute to loss
    target_bbox = tf.gather(target_bbox, positive_roi_ix)
    pred_bbox = tf.gather_nd(pred_bbox, indices)

    # Smooth-L1 Loss
    loss = K.switch(tf.size(target_bbox) > 0,
                    smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
                    tf.constant(0.0))
    loss = K.mean(loss)
    return loss 
Example 30
Project: BlurbGenreCollection-HMC   Author: uhh-lt   File: capsulelayers.py    Apache License 2.0 5 votes vote down vote up
def build(self, input_shape):
        assert len(input_shape) >= 3, "The input Tensor should have shape=[None, input_num_capsule, input_dim_capsule]"
        self.input_num_capsule = input_shape[1]
        self.input_dim_capsule = input_shape[2]

        # Transform matrix
        self.W = self.add_weight(shape=[self.num_capsule, self.input_num_capsule,
                                        self.dim_capsule, self.input_dim_capsule],
                                 initializer=self.kernel_initializer,
                                 name='W')

        print(K.int_shape(self.W))
        print(self.W)
        print(self.W[0][0][0][0])
        self.built = True 
Example 31
Project: BlurbGenreCollection-HMC   Author: uhh-lt   File: capsulelayers.py    Apache License 2.0 5 votes vote down vote up
def PrimaryCap(inputs, dim_capsule, n_channels, kernel_size, strides, padding, name):
    # shape = K.int_shape(inputs)
    # output = layers.Reshape(target_shape = [shape[1],shape[3],shape[2]], name = 'conv_output')(inputs)
    output = layers.Conv1D(filters=dim_capsule*n_channels, kernel_size=kernel_size, strides=strides, padding=padding,  kernel_initializer='normal', kernel_constraint=max_norm(3), activation='relu',
                           name=name)(inputs)
    #dropout = layers.Dropout(0.2)(output)
    outputs = layers.Reshape(target_shape=[-1, dim_capsule], name=name + '_reshape')(output)
    return layers.Lambda(squash, name= name + '_squash')(outputs) 
Example 32
Project: keras-attention-augmented-convs   Author: titu1994   File: attn_augconv.py    MIT License 5 votes vote down vote up
def augmented_conv2d(ip, filters, kernel_size=(3, 3), strides=(1, 1),
                     depth_k=0.2, depth_v=0.2, num_heads=8, relative_encodings=True):
    """
    Builds an Attention Augmented Convolution block.

    Args:
        ip: keras tensor.
        filters: number of output filters.
        kernel_size: convolution kernel size.
        strides: strides of the convolution.
        depth_k: float or int. Number of filters for k.
            Computes the number of filters for `v`.
            If passed as float, computed as `filters * depth_k`.
        depth_v: float or int. Number of filters for v.
            Computes the number of filters for `k`.
            If passed as float, computed as `filters * depth_v`.
        num_heads: int. Number of attention heads.
            Must be set such that `depth_k // num_heads` is > 0.
        relative_encodings: bool. Whether to use relative
            encodings or not.

    Returns:
        a keras tensor.
    """
    # input_shape = K.int_shape(ip)
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    depth_k, depth_v = _normalize_depth_vars(depth_k, depth_v, filters)

    conv_out = _conv_layer(filters - depth_v, kernel_size, strides)(ip)

    # Augmented Attention Block
    qkv_conv = _conv_layer(2 * depth_k + depth_v, (1, 1), strides)(ip)
    attn_out = AttentionAugmentation2D(depth_k, depth_v, num_heads, relative_encodings)(qkv_conv)
    attn_out = _conv_layer(depth_v, kernel_size=(1, 1))(attn_out)

    output = concatenate([conv_out, attn_out], axis=channel_axis)
    return output 
Example 33
Project: trVAE   Author: theislab   File: _utils.py    MIT License 5 votes vote down vote up
def sample_z(args):
    """
        Samples from standard Normal distribution with shape [size, z_dim] and
        applies re-parametrization trick. It is actually sampling from latent
        space distributions with N(mu, var) computed in `_encoder` function.
        # Parameters
            No parameters are needed.
        # Returns
            The computed Tensor of samples with shape [size, z_dim].
    """
    mu, log_var = args
    batch_size = K.shape(mu)[0]
    z_dim = K.int_shape(mu)[1]
    eps = K.random_normal(shape=[batch_size, z_dim])
    return mu + K.exp(log_var / 2) * eps 
Example 34
Project: cbc_networks   Author: saralajew   File: detection_probability_functions.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        def sqrt(x):
            return K.sqrt(K.maximum(x, K.epsilon()))

        # Both components and input given
        if isinstance(inputs, list) and len(inputs) > 1:
            signals, kernel = inputs
        else:
            signals = inputs
            kernel = self.components.astype(K.floatx())

        # move component_number to channel dimension
        kernel = K.permute_dimensions(kernel, (1, 2, 3, 0))
        # normalize kernel
        normed_kernel = kernel / sqrt(K.sum(K.square(kernel),
                                            (0, 1, 2),
                                            keepdims=True))

        # get norm of signals
        signals_norm = sqrt(K.conv2d(K.square(signals),
                                     np.ones(K.int_shape(kernel)[:3] + (1,),
                                             dtype=K.floatx()),
                                     strides=self.strides,
                                     padding=self.padding,
                                     data_format='channels_last',
                                     dilation_rate=self.dilation_rate))

        diss = K.conv2d(signals,
                        normed_kernel,
                        strides=self.strides,
                        padding=self.padding,
                        data_format='channels_last',
                        dilation_rate=self.dilation_rate) / signals_norm

        if self.n_replicas != 1:
            shape = K.int_shape(diss)
            diss = K.reshape(diss, (-1, shape[1], shape[2],
                                    shape[3] // self.n_replicas,
                                    self.n_replicas))

        return self.activation(diss) 
Example 35
Project: cbc_networks   Author: saralajew   File: detection_probability_functions.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        # Both components and input given
        if isinstance(inputs, list) and len(inputs) > 1:
            signals, kernel = inputs
        else:
            signals = inputs
            kernel = self.components.astype(K.floatx())

        # move component_number to channel dimension
        kernel = K.permute_dimensions(kernel, (1, 2, 3, 0))
        # normalize kernel
        kernel_norm = K.sum(K.square(kernel), (0, 1, 2), keepdims=True)

        # get norm of signals
        signals_norm = K.conv2d(K.square(signals),
                                K.ones_like(kernel),
                                strides=self.strides,
                                padding=self.padding,
                                data_format='channels_last',
                                dilation_rate=self.dilation_rate)

        diss = kernel_norm \
               - 2 * K.conv2d(signals,
                              kernel,
                              strides=self.strides,
                              padding=self.padding,
                              data_format='channels_last',
                              dilation_rate=self.dilation_rate) \
               + signals_norm

        if self.n_replicas != 1:
            shape = K.int_shape(diss)
            diss = K.reshape(diss, (-1, shape[1], shape[2],
                                    shape[3] // self.n_replicas,
                                    self.n_replicas))

        return self.activation(diss) 
Example 36
Project: df   Author: dfaker   File: pixel_shuffler.py    Mozilla Public License 2.0 5 votes vote down vote up
def call(self, inputs):

        input_shape = K.int_shape(inputs)
        if len(input_shape) != 4:
            raise ValueError('Inputs should have rank ' +
                             str(4) +
                             '; Received input shape:', str(input_shape))

        if self.data_format == 'channels_first':
            batch_size, c, h, w = input_shape
            if batch_size is None:
                batch_size = -1
            rh, rw = self.size
            oh, ow = h * rh, w * rw
            oc = c // (rh * rw)

            out = K.reshape(inputs, (batch_size, rh, rw, oc, h, w))
            out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2))
            out = K.reshape(out, (batch_size, oc, oh, ow))
            return out

        elif self.data_format == 'channels_last':
            batch_size, h, w, c = input_shape
            if batch_size is None:
                batch_size = -1
            rh, rw = self.size
            oh, ow = h * rh, w * rw
            oc = c // (rh * rw)

            out = K.reshape(inputs, (batch_size, h, w, rh, rw, oc))
            out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5))
            out = K.reshape(out, (batch_size, oh, ow, oc))
            return out 
Example 37
Project: df   Author: dfaker   File: exampleTrainer.py    Mozilla Public License 2.0 5 votes vote down vote up
def call(self, inputs):

        input_shape = K.int_shape(inputs)
        if len(input_shape) != 4:
            raise ValueError('Inputs should have rank ' +
                             str(4) +
                             '; Received input shape:', str(input_shape))

        if self.data_format == 'channels_first':
            batch_size, c, h, w = input_shape
            if batch_size is None:
                batch_size = -1
            rh, rw = self.size
            oh, ow = h * rh, w * rw
            oc = c // (rh * rw)

            out = K.reshape(inputs, (batch_size, rh, rw, oc, h, w))
            out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2))
            out = K.reshape(out, (batch_size, oc, oh, ow))
            return out

        elif self.data_format == 'channels_last':
            batch_size, h, w, c = input_shape
            if batch_size is None:
                batch_size = -1
            rh, rw = self.size
            oh, ow = h * rh, w * rw
            oc = c // (rh * rw)

            out = K.reshape(inputs, (batch_size, h, w, rh, rw, oc))
            out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5))
            out = K.reshape(out, (batch_size, oh, ow, oc))
            return out 
Example 38
Project: AIX360   Author: IBM   File: classifiers.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, model, input_layer=0, output_layer=0):

        """Initialize KerasClassifier.

        Args:
            model: a trained keras classifier model.
        """

        import keras.backend as k

        super(KerasClassifier, self).__init__()

        self._model = model

        if hasattr(model, 'inputs'):
            self._input = model.inputs[input_layer]
        else:
            self._input = model.input

        if hasattr(model, 'outputs'):
            self._output = model.outputs[output_layer]
        else:
            self._output = model.output

        _, self._nb_classes = k.int_shape(self._output)
        self._input_shape   = k.int_shape(self._input)[1:] 
Example 39
Project: labelImg   Author: keyuncheng   File: model.py    MIT License 5 votes vote down vote up
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
    """Loss for Mask R-CNN bounding box refinement.

    target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
    target_class_ids: [batch, num_rois]. Integer class IDs.
    pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
    """
    # Reshape to merge batch and roi dimensions for simplicity.
    target_class_ids = K.reshape(target_class_ids, (-1,))
    target_bbox = K.reshape(target_bbox, (-1, 4))
    pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))

    # Only positive ROIs contribute to the loss. And only
    # the right class_id of each ROI. Get their indicies.
    positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
    positive_roi_class_ids = tf.cast(
        tf.gather(target_class_ids, positive_roi_ix), tf.int64)
    indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)

    # Gather the deltas (predicted and true) that contribute to loss
    target_bbox = tf.gather(target_bbox, positive_roi_ix)
    pred_bbox = tf.gather_nd(pred_bbox, indices)

    # Smooth-L1 Loss
    loss = K.switch(tf.size(target_bbox) > 0,
                    smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
                    tf.constant(0.0))
    loss = K.mean(loss)
    loss = K.reshape(loss, [1, 1])
    return loss 
Example 40
Project: keras_nade   Author: jgrnt   File: orderless_nade.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def total_masked_logdensity(inputs):
    component_logdensity, mask = inputs
    D = K.expand_dims(K.constant(K.int_shape(mask)[1]), 0)
    total = K.sum(K.abs(mask - 1) * component_logdensity, axis=1) * D / (D - K.sum(mask, axis=1))
    return total 
Example 41
Project: dockerizeme   Author: dockerizeme   File: snippet.py    Apache License 2.0 5 votes vote down vote up
def call(self, inputs):

        input_shape = K.int_shape(inputs)
        if len(input_shape) != 4:
            raise ValueError('Inputs should have rank ' +
                             str(4) +
                             '; Received input shape:', str(input_shape))

        if self.data_format == 'channels_first':
            batch_size, c, h, w = input_shape
            if batch_size is None:
                batch_size = -1
            rh, rw = self.size
            oh, ow = h * rh, w * rw
            oc = c // (rh * rw)

            out = K.reshape(inputs, (batch_size, rh, rw, oc, h, w))
            out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2))
            out = K.reshape(out, (batch_size, oc, oh, ow))
            return out

        elif self.data_format == 'channels_last':
            batch_size, h, w, c = input_shape
            if batch_size is None:
                batch_size = -1
            rh, rw = self.size
            oh, ow = h * rh, w * rw
            oc = c // (rh * rw)

            out = K.reshape(inputs, (batch_size, h, w, rh, rw, oc))
            out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5))
            out = K.reshape(out, (batch_size, oh, ow, oc))
            return out 
Example 42
Project: Mask-R-CNN-sports-action-fine-tuing   Author: adelmassimo   File: model.py    MIT License 5 votes vote down vote up
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
    """Loss for Mask R-CNN bounding box refinement.

    target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
    target_class_ids: [batch, num_rois]. Integer class IDs.
    pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
    """
    # Reshape to merge batch and roi dimensions for simplicity.
    target_class_ids = K.reshape(target_class_ids, (-1,))
    target_bbox = K.reshape(target_bbox, (-1, 4))
    pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))

    # Only positive ROIs contribute to the loss. And only
    # the right class_id of each ROI. Get their indicies.
    positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
    positive_roi_class_ids = tf.cast(
        tf.gather(target_class_ids, positive_roi_ix), tf.int64)
    indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)

    # Gather the deltas (predicted and true) that contribute to loss
    target_bbox = tf.gather(target_bbox, positive_roi_ix)
    pred_bbox = tf.gather_nd(pred_bbox, indices)

    # Smooth-L1 Loss
    loss = K.switch(tf.size(target_bbox) > 0,
                    smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
                    tf.constant(0.0))
    loss = K.mean(loss)
    loss = K.reshape(loss, [1, 1])
    return loss 
Example 43
Project: backdoor   Author: bolunwang   File: visualizer.py    MIT License 5 votes vote down vote up
def reset_opt(self):

        K.set_value(self.opt.iterations, 0)
        for w in self.opt.weights:
            K.set_value(w, np.zeros(K.int_shape(w)))

        pass 
Example 44
Project: PanopticSegmentation   Author: dmechea   File: model.py    MIT License 5 votes vote down vote up
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
    """Loss for Mask R-CNN bounding box refinement.

    target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
    target_class_ids: [batch, num_rois]. Integer class IDs.
    pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
    """
    # Reshape to merge batch and roi dimensions for simplicity.
    target_class_ids = K.reshape(target_class_ids, (-1,))
    target_bbox = K.reshape(target_bbox, (-1, 4))
    pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))

    # Only positive ROIs contribute to the loss. And only
    # the right class_id of each ROI. Get their indices.
    positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
    positive_roi_class_ids = tf.cast(
        tf.gather(target_class_ids, positive_roi_ix), tf.int64)
    indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)

    # Gather the deltas (predicted and true) that contribute to loss
    target_bbox = tf.gather(target_bbox, positive_roi_ix)
    pred_bbox = tf.gather_nd(pred_bbox, indices)

    # Smooth-L1 Loss
    loss = K.switch(tf.size(target_bbox) > 0,
                    smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
                    tf.constant(0.0))
    loss = K.mean(loss)
    return loss 
Example 45
Project: spektral   Author: danielegrattarola   File: base.py    MIT License 5 votes vote down vote up
def call(self, inputs):
        F = K.int_shape(inputs)[-1]
        minkowski_prod_mat = np.eye(F)
        minkowski_prod_mat[-1, -1] = -1.
        minkowski_prod_mat = K.constant(minkowski_prod_mat)
        output = K.dot(inputs, minkowski_prod_mat)
        output = K.dot(output, K.transpose(inputs))
        output = K.clip(output, -10e9, -1.)

        if self.activation is not None:
            output = self.activation(output)

        return output 
Example 46
Project: spektral   Author: danielegrattarola   File: convolutional.py    MIT License 5 votes vote down vote up
def dense_layer(self,
                    x,
                    units,
                    name,
                    activation=None,
                    use_bias=True,
                    kernel_initializer='glorot_uniform',
                    bias_initializer='zeros',
                    kernel_regularizer=None,
                    bias_regularizer=None,
                    kernel_constraint=None,
                    bias_constraint=None):
        input_dim = K.int_shape(x)[-1]
        kernel = self.add_weight(shape=(input_dim, units),
                                 name=name + '_kernel',
                                 initializer=kernel_initializer,
                                 regularizer=kernel_regularizer,
                                 constraint=kernel_constraint)
        bias = self.add_weight(shape=(units,),
                               name=name + '_bias',
                               initializer=bias_initializer,
                               regularizer=bias_regularizer,
                               constraint=bias_constraint)
        act = activations.get(activation)
        output = K.dot(x, kernel)
        if use_bias:
            output = K.bias_add(output, bias)
        output = act(output)
        return output 
Example 47
Project: keras_extension   Author: k1414st   File: core_sparse.py    MIT License 5 votes vote down vote up
def compute_output_shape(self, input_shape):
        if self._output_shape is None:
            # With TensorFlow or CNTK, we can infer the output shape directly:
            if K.backend() in ('tensorflow', 'cntk'):
                if isinstance(input_shape, list):
                    xs = [K.placeholder(shape=shape, sparse=is_sparse)
                          for shape, is_sparse in zip(input_shape, self._is_sparse)]
                    x = self.call(xs)
                else:
                    x = K.placeholder(shape=input_shape, sparse=self._is_sparse)
                    x = self.call(x)
                if isinstance(x, list):
                    return [K.int_shape(x_elem) for x_elem in x]
                else:
                    return K.int_shape(x)
            # Otherwise, we default to the input shape.
            warnings.warn('`output_shape` argument not specified for layer {} '
                          'and cannot be automatically inferred '
                          'with the Theano backend. '
                          'Defaulting to output shape `{}` '
                          '(same as input shape). '
                          'If the expected output shape is different, '
                          'specify it via the `output_shape` argument.'
                          .format(self.name, input_shape))
            return input_shape
        elif isinstance(self._output_shape, (tuple, list)):
            if isinstance(input_shape, list):
                num_samples = input_shape[0][0]
            else:
                num_samples = input_shape[0] if input_shape else None
            return (num_samples,) + tuple(self._output_shape)
        else:
            shape = self._output_shape(input_shape)
            if not isinstance(shape, (list, tuple)):
                raise ValueError('`output_shape` function must return a tuple or '
                                 'a list of tuples.')
            if isinstance(shape, list):
                if isinstance(shape[0], int) or shape[0] is None:
                    shape = tuple(shape)
            return shape 
Example 48
Project: keras_extension   Author: k1414st   File: core_sparse.py    MIT License 5 votes vote down vote up
def compute_output_shape(self, input_shape):
        if self._output_shape is None:
            # With TensorFlow or CNTK, we can infer the output shape directly:
            if K.backend() in ('tensorflow', 'cntk'):
                if isinstance(input_shape, list):
                    xs = [K.placeholder(shape=shape, sparse=is_sparse)
                          for shape, is_sparse in zip(input_shape, self._is_sparse)]
                    x = self.call(xs)
                else:
                    x = K.placeholder(shape=input_shape, sparse=self._is_sparse)
                    x = self.call(x)
                if isinstance(x, list):
                    return [K.int_shape(x_elem) for x_elem in x]
                else:
                    return K.int_shape(x)
            # Otherwise, we default to the input shape.
            warnings.warn('`output_shape` argument not specified for layer {} '
                          'and cannot be automatically inferred '
                          'with the Theano backend. '
                          'Defaulting to output shape `{}` '
                          '(same as input shape). '
                          'If the expected output shape is different, '
                          'specify it via the `output_shape` argument.'
                          .format(self.name, input_shape))
            return input_shape
        elif isinstance(self._output_shape, (tuple, list)):
            if isinstance(input_shape, list):
                num_samples = input_shape[0][0]
            else:
                num_samples = input_shape[0] if input_shape else None
            return (num_samples,) + tuple(self._output_shape)
        else:
            shape = self._output_shape(input_shape)
            if not isinstance(shape, (list, tuple)):
                raise ValueError('`output_shape` function must return a tuple or '
                                 'a list of tuples.')
            if isinstance(shape, list):
                if isinstance(shape[0], int) or shape[0] is None:
                    shape = tuple(shape)
            return shape 
Example 49
Project: cdt-ccm-aae   Author: danielegrattarola   File: layers.py    MIT License 5 votes vote down vote up
def call(self, inputs):
        output_part = []
        manifold_size = K.int_shape(inputs)[-1] // len(self.r)

        for idx, r_ in enumerate(self.r):
            start = idx * manifold_size
            stop = start + manifold_size
            part = inputs[..., start:stop]
            sign = np.sign(r_)
            if sign == 0.:
                # This is weird but necessary to make the layer differentiable
                output_pre = K.sum(inputs, -1, keepdims=True) * 0. + 1.
            else:
                free_components = part[..., :-1] ** 2
                bound_component = sign * part[..., -1:] ** 2
                all_components = K.concatenate((free_components, bound_component), -1)
                ext_product = K.sum(all_components, -1, keepdims=True)
                output_pre = K.exp(-(ext_product - sign * r_ ** 2) ** 2 / (2 * self.sigma ** 2))

            output_part.append(output_pre)

        if len(output_part) >= 2:
            if self.mode == 'average':
                output = Average()(output_part)
            elif self.mode == 'concat':
                output = Concatenate()(output_part)
            else:
                raise ValueError()  # Never gets here
        else:
            output = output_part[0]

        return output 
Example 50
Project: YouTubeCommenter   Author: HackerPoet   File: Generate.py    MIT License 5 votes vote down vote up
def new_sparse_categorical_accuracy(y_true, y_pred):
	y_pred_rank = ops.convert_to_tensor(y_pred).get_shape().ndims
	y_true_rank = ops.convert_to_tensor(y_true).get_shape().ndims
	# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
	if (y_true_rank is not None) and (y_pred_rank is not None) and (len(K.int_shape(y_true)) == len(K.int_shape(y_pred))):
		y_true = array_ops.squeeze(y_true, [-1])
	y_pred = math_ops.argmax(y_pred, axis=-1)
	# If the predicted output and actual output types don't match, force cast them
	# to match.
	if K.dtype(y_pred) != K.dtype(y_true):
		y_pred = math_ops.cast(y_pred, K.dtype(y_true))
	return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())

#Load the model 
Example 51
Project: YouTubeCommenter   Author: HackerPoet   File: Train.py    MIT License 5 votes vote down vote up
def new_sparse_categorical_accuracy(y_true, y_pred):
	y_pred_rank = ops.convert_to_tensor(y_pred).get_shape().ndims
	y_true_rank = ops.convert_to_tensor(y_true).get_shape().ndims
	# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
	if (y_true_rank is not None) and (y_pred_rank is not None) and (len(K.int_shape(y_true)) == len(K.int_shape(y_pred))):
		y_true = array_ops.squeeze(y_true, [-1])
	y_pred = math_ops.argmax(y_pred, axis=-1)
	# If the predicted output and actual output types don't match, force cast them
	# to match.
	if K.dtype(y_pred) != K.dtype(y_true):
		y_pred = math_ops.cast(y_pred, K.dtype(y_true))
	return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())

#Build the training models 
Example 52
Project: humpback-whale-4th-place   Author: daustingm1   File: ensemble.py    Apache License 2.0 5 votes vote down vote up
def subblock(x, filter, **kwargs):
    x = BatchNormalization()(x)
    y = x
    y = Conv2D(filter, (1, 1), activation='relu', **kwargs)(y)  # Reduce the number of features to 'filter'
    y = BatchNormalization()(y)
    y = Conv2D(filter, (3, 3), activation='relu', **kwargs)(y)  # Extend the feature field
    y = BatchNormalization()(y)
    y = Conv2D(K.int_shape(x)[-1], (1, 1), **kwargs)(y)  # no activation # Restore the number of original features
    y = Add()([x, y])  # Add the bypass connection
    y = Activation('relu')(y)
    return y 
Example 53
Project: humpback-whale-4th-place   Author: daustingm1   File: siamese_train.py    Apache License 2.0 5 votes vote down vote up
def subblock(x, filter, **kwargs):
    x = BatchNormalization()(x)
    y = x
    y = Conv2D(filter, (1, 1), activation='relu', **kwargs)(y)  # Reduce the number of features to 'filter'
    y = BatchNormalization()(y)
    y = Conv2D(filter, (3, 3), activation='relu', **kwargs)(y)  # Extend the feature field
    y = BatchNormalization()(y)
    y = Conv2D(K.int_shape(x)[-1], (1, 1), **kwargs)(y)  # no activation # Restore the number of original features
    y = Add()([x, y])  # Add the bypass connection
    y = Activation('relu')(y)
    return y 
Example 54
Project: humpback-whale-4th-place   Author: daustingm1   File: siamese_inference.py    Apache License 2.0 5 votes vote down vote up
def subblock(x, filter, **kwargs):
    x = BatchNormalization()(x)
    y = x
    y = Conv2D(filter, (1, 1), activation='relu', **kwargs)(y)  # Reduce the number of features to 'filter'
    y = BatchNormalization()(y)
    y = Conv2D(filter, (3, 3), activation='relu', **kwargs)(y)  # Extend the feature field
    y = BatchNormalization()(y)
    y = Conv2D(K.int_shape(x)[-1], (1, 1), **kwargs)(y)  # no activation # Restore the number of original features
    y = Add()([x, y])  # Add the bypass connection
    y = Activation('relu')(y)
    return y 
Example 55
Project: dtc-pointpillars-keras   Author: dtczhl   File: model.py    MIT License 5 votes vote down vote up
def make_backbone(width, height, feature_C):
    x = Input(shape=(height, width, feature_C), name='input')

    y = rpn_conv_block(x, feature_C, 3, name='rpn_conv1')
    y_deconv1 = Conv2DTranspose(2*feature_C, 3, strides=1, padding='same', name='rpn_deconv1')(y)
    y = rpn_conv_block(y, 2*feature_C, 5, name='rpn_conv2')
    y_deconv2 = Conv2DTranspose(2*feature_C, 2, strides=2, padding='same', name='rpn_deconv2')(y)
    y = rpn_conv_block(y, 4*feature_C, 5, name='rpn_conv3')
    y = Conv2DTranspose(2*feature_C, 4, strides=4, padding='same', name='rpn_deconv3')(y)
    y = Concatenate()([y, y_deconv2, y_deconv1])

    print (K.int_shape(y))

    return Model(inputs=x, outputs=ssd_300(y)) 
Example 56
Project: image-processing-service   Author: aetrapp   File: FixedBatchNormalization.py    GNU General Public License v3.0 5 votes vote down vote up
def call(self, x, mask=None):

        assert self.built, 'Layer must be built before being called'
        input_shape = K.int_shape(x)

        reduction_axes = list(range(len(input_shape)))
        del reduction_axes[self.axis]
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis]

        if sorted(reduction_axes) == range(K.ndim(x))[:-1]:
            x_normed = K.batch_normalization(
                x, self.running_mean, self.running_std,
                self.beta, self.gamma,
                epsilon=self.epsilon)
        else:
            # need broadcasting
            broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape)
            broadcast_running_std = K.reshape(self.running_std, broadcast_shape)
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            x_normed = K.batch_normalization(
                x, broadcast_running_mean, broadcast_running_std,
                broadcast_beta, broadcast_gamma,
                epsilon=self.epsilon)

        return x_normed 
Example 57
Project: BERT   Author: yyht   File: funcs.py    Apache License 2.0 5 votes vote down vote up
def shape_list(x):
    if K.backend() != 'theano':
        tmp = K.int_shape(x)
    else:
        tmp = x.shape
    tmp = list(tmp)
    tmp[0] = -1
    return tmp 
Example 58
Project: BERT   Author: yyht   File: train.py    Apache License 2.0 5 votes vote down vote up
def sparse_gather(y_pred, target_indices, task_name):
    clf_h = Lambda(lambda x: K.reshape(x, (-1, K.int_shape(x)[-1])), name=task_name + '_flatten')(y_pred)
    return Lambda(lambda x: K.gather(x[0], K.cast(x[1], 'int32')), name=task_name + '_gather')([clf_h, target_indices]) 
Example 59
Project: smach_based_introspection_framework   Author: birlrobotics   File: layer_utils.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def _time_distributed_dense(x, w, b=None, dropout=None,
                            input_dim=None, output_dim=None,
                            timesteps=None, training=None):
    """Apply `y . w + b` for every temporal slice y of x.

    # Arguments
        x: input tensor.
        w: weight matrix.
        b: optional bias vector.
        dropout: wether to apply dropout (same dropout mask
            for every temporal slice of the input).
        input_dim: integer; optional dimensionality of the input.
        output_dim: integer; optional dimensionality of the output.
        timesteps: integer; optional number of timesteps.
        training: training phase tensor or boolean.

    # Returns
        Output tensor.
    """
    if not input_dim:
        input_dim = K.shape(x)[2]
    if not timesteps:
        timesteps = K.shape(x)[1]
    if not output_dim:
        output_dim = K.int_shape(w)[1]

    if dropout is not None and 0. < dropout < 1.:
        # apply the same dropout pattern at every timestep
        ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
        dropout_matrix = K.dropout(ones, dropout)
        expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
        x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)

    # collapse time dimension and batch dimension together
    x = K.reshape(x, (-1, input_dim))
    x = K.dot(x, w)
    if b is not None:
        x = K.bias_add(x, b)
    # reshape to 3D tensor
    if K.backend() == 'tensorflow':
        x = K.reshape(x, K.stack([-1, timesteps, output_dim]))
        x.set_shape([None, None, output_dim])
    else:
        x = K.reshape(x, (-1, timesteps, output_dim))
    return x 
Example 60
Project: keras-utility-layer-collection   Author: zimmerrol   File: attention.py    MIT License 4 votes vote down vote up
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
        inputs, initial_state, constants = self._standardize_args(
            inputs, initial_state, constants, self._num_constants)

        if initial_state is None and constants is None:
            return super(ExternalAttentionRNNWrapper, self).__call__(inputs, **kwargs)

        # If any of `initial_state` or `constants` are specified and are Keras
        # tensors, then add them to the inputs and temporarily modify the
        # input_spec to include them.

        additional_inputs = []
        additional_specs = []
        if initial_state is not None:
            kwargs['initial_state'] = initial_state
            additional_inputs += initial_state
            self.state_spec = [InputSpec(shape=K.int_shape(state))
                               for state in initial_state]
            additional_specs += self.state_spec
        if constants is not None:
            kwargs['constants'] = constants
            additional_inputs += constants
            self.constants_spec = [InputSpec(shape=K.int_shape(constant))
                                   for constant in constants]
            self._num_constants = len(constants)
            additional_specs += self.constants_spec
        # at this point additional_inputs cannot be empty
        is_keras_tensor = K.is_keras_tensor(additional_inputs[0])
        for tensor in additional_inputs:
            if K.is_keras_tensor(tensor) != is_keras_tensor:
                raise ValueError('The initial state or constants of an ExternalAttentionRNNWrapper'
                                 ' layer cannot be specified with a mix of'
                                 ' Keras tensors and non-Keras tensors'
                                 ' (a "Keras tensor" is a tensor that was'
                                 ' returned by a Keras layer, or by `Input`)')

        if is_keras_tensor:
            # Compute the full input spec, including state and constants
            full_input = inputs + additional_inputs
            full_input_spec = self.input_spec + additional_specs
            # Perform the call with temporarily replaced input_spec
            original_input_spec = self.input_spec
            self.input_spec = full_input_spec
            output = super(ExternalAttentionRNNWrapper, self).__call__(full_input, **kwargs)
            self.input_spec = self.input_spec[:len(original_input_spec)]
            return output
        else:
            return super(ExternalAttentionRNNWrapper, self).__call__(inputs, **kwargs) 
Example 61
Project: keras-minimal-rnn   Author: titu1994   File: minimal_rnn.py    MIT License 4 votes vote down vote up
def _time_distributed_dense(x, w, b=None, dropout=None,
                            input_dim=None, output_dim=None,
                            timesteps=None, training=None):
    """Apply `y . w + b` for every temporal slice y of x.

    # Arguments
        x: input tensor.
        w: weight matrix.
        b: optional bias vector.
        dropout: wether to apply dropout (same dropout mask
            for every temporal slice of the input).
        input_dim: integer; optional dimensionality of the input.
        output_dim: integer; optional dimensionality of the output.
        timesteps: integer; optional number of timesteps.
        training: training phase tensor or boolean.

    # Returns
        Output tensor.
    """
    if not input_dim:
        input_dim = K.shape(x)[2]
    if not timesteps:
        timesteps = K.shape(x)[1]
    if not output_dim:
        output_dim = K.int_shape(w)[1]

    if dropout is not None and 0. < dropout < 1.:
        # apply the same dropout pattern at every timestep
        ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
        dropout_matrix = K.dropout(ones, dropout)
        expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
        x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)

    # collapse time dimension and batch dimension together
    x = K.reshape(x, (-1, input_dim))
    x = K.dot(x, w)
    if b is not None:
        x = K.bias_add(x, b)
    # reshape to 3D tensor
    if K.backend() == 'tensorflow':
        x = K.reshape(x, K.stack([-1, timesteps, output_dim]))
        x.set_shape([None, None, output_dim])
    else:
        x = K.reshape(x, (-1, timesteps, output_dim))
    return x 
Example 62
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 4 votes vote down vote up
def fpn_classifier_graph(rois, feature_maps, image_meta,
                         pool_size, num_classes, train_bn=True,
                         fc_layers_size=1024):
    """Builds the computation graph of the feature pyramid network classifier
    and regressor heads.

    rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
          coordinates.
    feature_maps: List of feature maps from different layers of the pyramid,
                  [P2, P3, P4, P5]. Each has a different resolution.
    - image_meta: [batch, (meta data)] Image details. See compose_image_meta()
    pool_size: The width of the square feature map generated from ROI Pooling.
    num_classes: number of classes, which determines the depth of the results
    train_bn: Boolean. Train or freeze Batch Norm layers
    fc_layers_size: Size of the 2 FC layers

    Returns:
        logits: [N, NUM_CLASSES] classifier logits (before softmax)
        probs: [N, NUM_CLASSES] classifier probabilities
        bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to
                     proposal boxes
    """
    # ROI Pooling
    # Shape: [batch, num_boxes, pool_height, pool_width, channels]
    x = PyramidROIAlign([pool_size, pool_size],
                        name="roi_align_classifier")([rois, image_meta] + feature_maps)
    # Two 1024 FC layers (implemented with Conv2D for consistency)
    x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"),
                           name="mrcnn_class_conv1")(x)
    x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
    x = KL.Activation('relu')(x)
    x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),
                           name="mrcnn_class_conv2")(x)
    x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
    x = KL.Activation('relu')(x)

    shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
                       name="pool_squeeze")(x)

    # Classifier head
    mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
                                            name='mrcnn_class_logits')(shared)
    mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
                                     name="mrcnn_class")(mrcnn_class_logits)

    # BBox head
    # [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
    x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
                           name='mrcnn_bbox_fc')(shared)
    # Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
    s = K.int_shape(x)
    mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)

    return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox 
Example 63
Project: Sushi-dish-detection   Author: blackrubystudio   File: parallel_model.py    MIT License 4 votes vote down vote up
def make_parallel(self):
        """Creates a new wrapper model that consists of multiple replicas of
        the original model placed on different GPUs.
        """
        # Slice inputs. Slice inputs on the CPU to avoid sending a copy
        # of the full inputs to all GPUs. Saves on bandwidth and memory.
        input_slices = {name: tf.split(x, self.gpu_count)
                        for name, x in zip(self.inner_model.input_names,
                                           self.inner_model.inputs)}

        output_names = self.inner_model.output_names
        outputs_all = []
        for i in range(len(self.inner_model.outputs)):
            outputs_all.append([])

        # Run the model call() on each GPU to place the ops there
        for i in range(self.gpu_count):
            with tf.device('/gpu:%d' % i):
                with tf.name_scope('tower_%d' % i):
                    # Run a slice of inputs through this replica
                    zipped_inputs = zip(self.inner_model.input_names,
                                        self.inner_model.inputs)
                    inputs = [
                        KL.Lambda(lambda s: input_slices[name][i],
                                  output_shape=lambda s: (None,) + s[1:])(tensor)
                        for name, tensor in zipped_inputs]
                    # Create the model replica and get the outputs
                    outputs = self.inner_model(inputs)
                    if not isinstance(outputs, list):
                        outputs = [outputs]
                    # Save the outputs for merging back together later
                    for l, o in enumerate(outputs):
                        outputs_all[l].append(o)

        # Merge outputs on CPU
        with tf.device('/cpu:0'):
            merged = []
            for outputs, name in zip(outputs_all, output_names):
                # Concatenate or average outputs?
                # Outputs usually have a batch dimension and we concatenate
                # across it. If they don't, then the output is likely a loss
                # or a metric value that gets averaged across the batch.
                # Keras expects losses and metrics to be scalars.
                if K.int_shape(outputs[0]) == ():
                    # Average
                    m = KL.Lambda(lambda o: tf.add_n(o) / len(outputs), name=name)(outputs)
                else:
                    # Concatenate
                    m = KL.Concatenate(axis=0, name=name)(outputs)
                merged.append(m)
        return merged 
Example 64
Project: cbc_networks   Author: saralajew   File: reasoning_layers.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def call(self, inputs, **kwargs):
        # decode the reasoning probabilities
        positive_kernel = self.reasoning_probabilities[0]
        negative_kernel = (1 - positive_kernel) * \
                          self.reasoning_probabilities[1]

        if self.use_component_probabilities:
            # squash component probabilities
            components_probabilities = softmax(self.component_probabilities,
                                               axis=2)

            positive_kernel = positive_kernel * components_probabilities
            negative_kernel = negative_kernel * components_probabilities

        # get normalization tensor
        # stabilize the division with a small epsilon
        normalization = K.sum(positive_kernel + negative_kernel,
                              axis=2,
                              keepdims=True) + K.epsilon()

        # get sliding kernel and bias
        if self.use_pixel_probabilities:
            pixel_probabilities = softmax(self.pixel_probabilities,
                                          axis=(0, 1))
            # scale kernel with priors
            kernel = (positive_kernel - negative_kernel) / normalization \
                     * pixel_probabilities
            bias = K.sum(negative_kernel / normalization
                         * pixel_probabilities,
                         axis=(0, 1, 2),
                         keepdims=True)
        else:
            kernel = (positive_kernel - negative_kernel) / normalization
            bias = K.sum(negative_kernel / normalization,
                         axis=(0, 1, 2),
                         keepdims=True)

        # compute probabilities by a sliding operation
        probs = K.conv2d(inputs, kernel,
                         strides=self.strides,
                         padding=self.padding,
                         data_format='channels_last',
                         dilation_rate=self.dilation_rate) + bias

        if not self.use_pixel_probabilities:
            # divide by number of kernel_size
            probs = probs / np.prod(self.kernel_size)

        # reshape to m x n x #classes x #replicas
        probs = K.reshape(probs,
                          (-1,) + K.int_shape(probs)[1:3]
                          + (self.n_classes, self.n_replicas))

        # squeeze replica dimension if one.
        if self.n_replicas == 1:
            probs = K.squeeze(probs, axis=-1)

        return probs 
Example 65
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers.py    MIT License 4 votes vote down vote up
def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]
        self.updates.append(K.update_add(self.t_cur, 1))

        lr = self.learning_rate
        if self.initial_decay > 0:
            lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
                                                      K.dtype(self.decay))))
        # momentum
        shapes = [K.int_shape(p) for p in params]
        moments = [K.zeros(shape, name='moment_' + str(i))
                   for (i, shape) in enumerate(shapes)]
        self.weights = [self.iterations] + moments

        total_iterations = self.total_iterations
        # Cosine annealing
        if self.use_cosine_annealing and total_iterations != 0:
            self.eta_t = _compute_eta_t(self)
        self.lr_t = lr * self.eta_t  # for external tracking

        for p, g, m in zip(params, grads, moments):
            # Learning rate multipliers
            lr_t = self.learning_rate
            if self.lr_multipliers is not None:
                lr_t = _apply_lr_multiplier(self, lr_t, p)

            v = self.momentum * m - self.eta_t * lr_t * g  # velocity
            self.updates.append(K.update(m, v))

            if self.nesterov:
                p_t = p + self.momentum * v - self.eta_t * lr_t * g
            else:
                p_t = p + v

            # Weight decays
            if p.name in self.weight_decays.keys() and total_iterations != 0:
                p_t = _apply_weight_decays(self, p, p_t)
            new_p = p_t

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(K.update(p, new_p))

        self._init_notified = True
        return self.updates 
Example 66
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers_225.py    MIT License 4 votes vote down vote up
def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]
        self.updates.append(K.update_add(self.t_cur, 1))

        lr = self.lr
        if self.initial_decay > 0:
            lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
                                                      K.dtype(self.decay))))

        t = K.cast(self.iterations, K.floatx()) + 1
        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
                     (1. - K.pow(self.beta_1, t)))

        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]

        if self.amsgrad:
            vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        else:
            vhats = [K.zeros(1) for _ in params]
        self.weights = [self.iterations] + ms + vs + vhats

        total_iterations = self.total_iterations
        # Cosine annealing
        if self.use_cosine_annealing and total_iterations != 0:
            self.eta_t = _compute_eta_t(self)
        self.lr_t = lr_t * self.eta_t  # for external tracking
        lr_t_premult = lr_t

        for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
            # Learning rate multipliers
            if self.lr_multipliers is not None:
                lr_t = _apply_lr_multiplier(self, lr_t_premult, p)

            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
            if self.amsgrad:
                vhat_t = K.maximum(vhat, v_t)
                p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
                self.updates.append(K.update(vhat, vhat_t))
            else:
                p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))

            # Weight decays
            if p.name in self.weight_decays.keys() and total_iterations != 0:
                p_t = _apply_weight_decays(self, p, p_t)
            new_p = p_t

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(K.update(p, new_p))

        self._init_notified = True
        return self.updates 
Example 67
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers_225.py    MIT License 4 votes vote down vote up
def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]
        self.updates.append(K.update_add(self.t_cur, 1))

        t = K.cast(self.iterations, K.floatx()) + 1

        # Due to the recommendations in [2], i.e. warming momentum schedule
        momentum_cache_t = self.beta_1 * (1. - 0.5 * (
            K.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
        momentum_cache_t_1 = self.beta_1 * (1. - 0.5 * (
            K.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))
        m_schedule_new = self.m_schedule * momentum_cache_t
        m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
        self.updates.append((self.m_schedule, m_schedule_new))

        shapes = [K.int_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]

        self.weights = [self.iterations] + ms + vs

        total_iterations = self.total_iterations
        # Cosine annealing
        if self.use_cosine_annealing and total_iterations != 0:
            self.eta_t = _compute_eta_t(self)
        self.lr_t = self.lr * self.eta_t  # for external tracking

        for p, g, m, v in zip(params, grads, ms, vs):
            # Learning rate multipliers
            lr_t = self.lr
            if self.lr_multipliers is not None:
                lr_t = _apply_lr_multiplier(self, lr_t, p)

            # the following equations given in [1]
            g_prime = g / (1. - m_schedule_new)
            m_t = self.beta_1 * m + (1. - self.beta_1) * g
            m_t_prime = m_t / (1. - m_schedule_next)
            v_t = self.beta_2 * v + (1. - self.beta_2) * K.square(g)
            v_t_prime = v_t / (1. - K.pow(self.beta_2, t))
            m_t_bar = (1. - momentum_cache_t) * g_prime + (
                momentum_cache_t_1 * m_t_prime)

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))
            p_t = p - self.eta_t * lr_t * m_t_bar / (
                    K.sqrt(v_t_prime) + self.epsilon)

            # Weight decays
            if p.name in self.weight_decays.keys() and total_iterations != 0:
                p_t = _apply_weight_decays(self, p, p_t)
            new_p = p_t

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(K.update(p, new_p))

        self._init_notified = True
        return self.updates 
Example 68
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers_225.py    MIT License 4 votes vote down vote up
def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]
        self.updates.append(K.update_add(self.t_cur, 1))

        lr = self.lr
        if self.initial_decay > 0:
            lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
                                                      K.dtype(self.decay))))
        # momentum
        shapes = [K.int_shape(p) for p in params]
        moments = [K.zeros(shape) for shape in shapes]
        self.weights = [self.iterations] + moments

        total_iterations = self.total_iterations
        # Cosine annealing
        if self.use_cosine_annealing and total_iterations != 0:
            self.eta_t = _compute_eta_t(self)
        self.lr_t = lr * self.eta_t  # for external tracking

        for p, g, m in zip(params, grads, moments):
            # Learning rate multipliers
            lr_t = self.lr
            if self.lr_multipliers is not None:
                lr_t = _apply_lr_multiplier(self, lr_t, p)

            v = self.momentum * m - self.eta_t * lr_t * g  # velocity
            self.updates.append(K.update(m, v))

            if self.nesterov:
                p_t = p + self.momentum * v - self.eta_t * lr_t * g
            else:
                p_t = p + v

            # Weight decays
            if p.name in self.weight_decays.keys() and total_iterations != 0:
                p_t = _apply_weight_decays(self, p, p_t)
            new_p = p_t

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(K.update(p, new_p))

        self._init_notified = True
        return self.updates 
Example 69
Project: labelImg   Author: keyuncheng   File: model.py    MIT License 4 votes vote down vote up
def fpn_classifier_graph(rois, feature_maps,
                         image_shape, pool_size, num_classes):
    """Builds the computation graph of the feature pyramid network classifier
    and regressor heads.

    rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
          coordinates.
    feature_maps: List of feature maps from diffent layers of the pyramid,
                  [P2, P3, P4, P5]. Each has a different resolution.
    image_shape: [height, width, depth]
    pool_size: The width of the square feature map generated from ROI Pooling.
    num_classes: number of classes, which determines the depth of the results

    Returns:
        logits: [N, NUM_CLASSES] classifier logits (before softmax)
        probs: [N, NUM_CLASSES] classifier probabilities
        bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to
                     proposal boxes
    """
    # ROI Pooling
    # Shape: [batch, num_boxes, pool_height, pool_width, channels]
    x = PyramidROIAlign([pool_size, pool_size], image_shape,
                        name="roi_align_classifier")([rois] + feature_maps)
    # Two 1024 FC layers (implemented with Conv2D for consistency)
    x = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding="valid"),
                           name="mrcnn_class_conv1")(x)
    x = KL.TimeDistributed(BatchNorm(axis=3), name='mrcnn_class_bn1')(x)
    x = KL.Activation('relu')(x)
    x = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),
                           name="mrcnn_class_conv2")(x)
    x = KL.TimeDistributed(BatchNorm(axis=3),
                           name='mrcnn_class_bn2')(x)
    x = KL.Activation('relu')(x)

    shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
                       name="pool_squeeze")(x)

    # Classifier head
    mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
                                            name='mrcnn_class_logits')(shared)
    mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
                                     name="mrcnn_class")(mrcnn_class_logits)

    # BBox head
    # [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
    x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
                           name='mrcnn_bbox_fc')(shared)
    # Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
    s = K.int_shape(x)
    mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)

    return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox 
Example 70
Project: labelImg   Author: keyuncheng   File: parallel_model.py    MIT License 4 votes vote down vote up
def make_parallel(self):
        """Creates a new wrapper model that consists of multiple replicas of
        the original model placed on different GPUs.
        """
        # Slice inputs. Slice inputs on the CPU to avoid sending a copy
        # of the full inputs to all GPUs. Saves on bandwidth and memory.
        input_slices = {name: tf.split(x, self.gpu_count)
                        for name, x in zip(self.inner_model.input_names,
                                           self.inner_model.inputs)}

        output_names = self.inner_model.output_names
        outputs_all = []
        for i in range(len(self.inner_model.outputs)):
            outputs_all.append([])

        # Run the model call() on each GPU to place the ops there
        for i in range(self.gpu_count):
            with tf.device('/gpu:%d' % i):
                with tf.name_scope('tower_%d' % i):
                    # Run a slice of inputs through this replica
                    zipped_inputs = zip(self.inner_model.input_names,
                                        self.inner_model.inputs)
                    inputs = [
                        KL.Lambda(lambda s: input_slices[name][i],
                                  output_shape=lambda s: (None,) + s[1:])(tensor)
                        for name, tensor in zipped_inputs]
                    # Create the model replica and get the outputs
                    outputs = self.inner_model(inputs)
                    if not isinstance(outputs, list):
                        outputs = [outputs]
                    # Save the outputs for merging back together later
                    for l, o in enumerate(outputs):
                        outputs_all[l].append(o)

        # Merge outputs on CPU
        with tf.device('/cpu:0'):
            merged = []
            for outputs, name in zip(outputs_all, output_names):
                # If outputs are numbers without dimensions, add a batch dim.
                def add_dim(tensor):
                    """Add a dimension to tensors that don't have any."""
                    if K.int_shape(tensor) == ():
                        return KL.Lambda(lambda t: K.reshape(t, [1, 1]))(tensor)
                    return tensor
                outputs = list(map(add_dim, outputs))

                # Concatenate
                merged.append(KL.Concatenate(axis=0, name=name)(outputs))
        return merged 
Example 71
Project: dockerizeme   Author: dockerizeme   File: snippet.py    Apache License 2.0 4 votes vote down vote up
def call(self, inputs, mask=None, initial_state=None, training=None):
        # input shape: `(samples, time (padded with zeros), input_dim)`
        # note that the .build() method of subclasses MUST define
        # self.input_spec and self.state_spec with complete input shapes.
        if initial_state is not None:
            if not isinstance(initial_state, (list, tuple)):
                initial_states = [initial_state]
            else:
                initial_states = list(initial_state)
        if isinstance(inputs, list):
            initial_states = inputs[1:]
            inputs = inputs[0]
        else:
            initial_states = self.get_initial_states(inputs)

        if len(initial_states) != len(self.states):
            raise ValueError('Layer has ' + str(len(self.states)) +
                             ' states but was passed ' +
                             str(len(initial_states)) +
                             ' initial states.')
        input_shape = K.int_shape(inputs)
        constants = self.get_constants(inputs, training=None)
        preprocessed_input = self.preprocess_input(inputs, training=None)
        h = initial_states[0]
        h+= self.recurrent_activation(self.initial_attention)
        initial_states[0]=h
        last_output, outputs, states = K.rnn(self.step,
                                             preprocessed_input,
                                             initial_states,
                                             go_backwards=self.go_backwards,
                                             mask=mask,
                                             constants=constants,
                                             unroll=self.unroll,
                                             input_length=input_shape[1])
        return last_output
#         if self.stateful:
#             updates = []
#             for i in range(len(states)):
#                 updates.append((self.states[i], states[i]))
#             self.add_update(updates, inputs)

        # Properly set learning phase
#         if 0 < self.dropout + self.recurrent_dropout:
#             last_output._uses_learning_phase = True
#             outputs._uses_learning_phase = True

#         if self.return_sequences:
#             return outputs
#         else:
#             return last_output 
Example 72
Project: Mask-R-CNN-sports-action-fine-tuing   Author: adelmassimo   File: model.py    MIT License 4 votes vote down vote up
def fpn_classifier_graph(rois, feature_maps,
                         image_shape, pool_size, num_classes):
    """Builds the computation graph of the feature pyramid network classifier
    and regressor heads.

    rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
          coordinates.
    feature_maps: List of feature maps from diffent layers of the pyramid,
                  [P2, P3, P4, P5]. Each has a different resolution.
    image_shape: [height, width, depth]
    pool_size: The width of the square feature map generated from ROI Pooling.
    num_classes: number of classes, which determines the depth of the results

    Returns:
        logits: [N, NUM_CLASSES] classifier logits (before softmax)
        probs: [N, NUM_CLASSES] classifier probabilities
        bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to
                     proposal boxes
    """
    # ROI Pooling
    # Shape: [batch, num_boxes, pool_height, pool_width, channels]
    x = PyramidROIAlign([pool_size, pool_size], image_shape,
                        name="roi_align_classifier")([rois] + feature_maps)
    # Two 1024 FC layers (implemented with Conv2D for consistency)
    x = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding="valid"),
                           name="mrcnn_class_conv1")(x)
    x = KL.TimeDistributed(BatchNorm(axis=3), name='mrcnn_class_bn1')(x)
    x = KL.Activation('relu')(x)
    x = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),
                           name="mrcnn_class_conv2")(x)
    x = KL.TimeDistributed(BatchNorm(axis=3),
                           name='mrcnn_class_bn2')(x)
    x = KL.Activation('relu')(x)

    shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
                       name="pool_squeeze")(x)

    # Classifier head
    mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
                                            name='mrcnn_class_logits')(shared)
    mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
                                     name="mrcnn_class")(mrcnn_class_logits)

    # BBox head
    # [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
    x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
                           name='mrcnn_bbox_fc')(shared)
    # Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
    s = K.int_shape(x)
    mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)

    return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox 
Example 73
Project: PanopticSegmentation   Author: dmechea   File: model.py    MIT License 4 votes vote down vote up
def fpn_classifier_graph(rois, feature_maps, image_meta,
                         pool_size, num_classes, train_bn=True,
                         fc_layers_size=1024):
    """Builds the computation graph of the feature pyramid network classifier
    and regressor heads.

    rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
          coordinates.
    feature_maps: List of feature maps from different layers of the pyramid,
                  [P2, P3, P4, P5]. Each has a different resolution.
    image_meta: [batch, (meta data)] Image details. See compose_image_meta()
    pool_size: The width of the square feature map generated from ROI Pooling.
    num_classes: number of classes, which determines the depth of the results
    train_bn: Boolean. Train or freeze Batch Norm layers
    fc_layers_size: Size of the 2 FC layers

    Returns:
        logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)
        probs: [batch, num_rois, NUM_CLASSES] classifier probabilities
        bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to
                     proposal boxes
    """
    # ROI Pooling
    # Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]
    x = PyramidROIAlign([pool_size, pool_size],
                        name="roi_align_classifier")([rois, image_meta] + feature_maps)
    # Two 1024 FC layers (implemented with Conv2D for consistency)
    x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"),
                           name="mrcnn_class_conv1")(x)
    x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
    x = KL.Activation('relu')(x)
    x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),
                           name="mrcnn_class_conv2")(x)
    x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
    x = KL.Activation('relu')(x)

    shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
                       name="pool_squeeze")(x)

    # Classifier head
    mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
                                            name='mrcnn_class_logits')(shared)
    mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
                                     name="mrcnn_class")(mrcnn_class_logits)

    # BBox head
    # [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]
    x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
                           name='mrcnn_bbox_fc')(shared)
    # Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
    s = K.int_shape(x)
    mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)

    return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox 
Example 74
Project: PanopticSegmentation   Author: dmechea   File: parallel_model.py    MIT License 4 votes vote down vote up
def make_parallel(self):
        """Creates a new wrapper model that consists of multiple replicas of
        the original model placed on different GPUs.
        """
        # Slice inputs. Slice inputs on the CPU to avoid sending a copy
        # of the full inputs to all GPUs. Saves on bandwidth and memory.
        input_slices = {name: tf.split(x, self.gpu_count)
                        for name, x in zip(self.inner_model.input_names,
                                           self.inner_model.inputs)}

        output_names = self.inner_model.output_names
        outputs_all = []
        for i in range(len(self.inner_model.outputs)):
            outputs_all.append([])

        # Run the model call() on each GPU to place the ops there
        for i in range(self.gpu_count):
            with tf.device('/gpu:%d' % i):
                with tf.name_scope('tower_%d' % i):
                    # Run a slice of inputs through this replica
                    zipped_inputs = zip(self.inner_model.input_names,
                                        self.inner_model.inputs)
                    inputs = [
                        KL.Lambda(lambda s: input_slices[name][i],
                                  output_shape=lambda s: (None,) + s[1:])(tensor)
                        for name, tensor in zipped_inputs]
                    # Create the model replica and get the outputs
                    outputs = self.inner_model(inputs)
                    if not isinstance(outputs, list):
                        outputs = [outputs]
                    # Save the outputs for merging back together later
                    for l, o in enumerate(outputs):
                        outputs_all[l].append(o)

        # Merge outputs on CPU
        with tf.device('/cpu:0'):
            merged = []
            for outputs, name in zip(outputs_all, output_names):
                # Concatenate or average outputs?
                # Outputs usually have a batch dimension and we concatenate
                # across it. If they don't, then the output is likely a loss
                # or a metric value that gets averaged across the batch.
                # Keras expects losses and metrics to be scalars.
                if K.int_shape(outputs[0]) == ():
                    # Average
                    m = KL.Lambda(lambda o: tf.add_n(o) / len(outputs), name=name)(outputs)
                else:
                    # Concatenate
                    m = KL.Concatenate(axis=0, name=name)(outputs)
                merged.append(m)
        return merged 
Example 75
Project: spektral   Author: danielegrattarola   File: pooling.py    MIT License 4 votes vote down vote up
def call(self, inputs):
        if len(inputs) == 3:
            X, A, I = inputs
            self.data_mode = 'graph'
        else:
            X, A = inputs
            I = tf.zeros(tf.shape(X)[:1], dtype=tf.int32)
            self.data_mode = 'single'
        if K.ndim(I) == 2:
            I = I[:, 0]

        A_is_sparse = K.is_sparse(A)

        # Get mask
        y = K.dot(X, K.l2_normalize(self.kernel))
        N = K.shape(X)[-2]
        indices = ops.segment_top_k(y[:, 0], I, self.ratio, self.top_k_var)
        mask = tf.scatter_nd(tf.expand_dims(indices, 1), tf.ones_like(indices), (N,))

        # Multiply X and y to make layer differentiable
        features = X * self.gating_op(y)

        axis = 0 if len(K.int_shape(A)) == 2 else 1  # Cannot use negative axis in tf.boolean_mask
        # Reduce X
        X_pooled = tf.boolean_mask(features, mask, axis=axis)

        # Compute A^2
        if A_is_sparse:
            A_dense = tf.sparse.to_dense(A)
        else:
            A_dense = A
        A_squared = K.dot(A, A_dense)

        # Reduce A
        A_pooled = tf.boolean_mask(A_squared, mask, axis=axis)
        A_pooled = tf.boolean_mask(A_pooled, mask, axis=axis + 1)
        if A_is_sparse:
            A_pooled = tf.contrib.layers.dense_to_sparse(A_pooled)

        output = [X_pooled, A_pooled]

        # Reduce I
        if self.data_mode == 'graph':
            I_pooled = tf.boolean_mask(I[:, None], mask)[:, 0]
            output.append(I_pooled)

        if self.return_mask:
            output.append(mask)

        return output 
Example 76
Project: spektral   Author: danielegrattarola   File: pooling.py    MIT License 4 votes vote down vote up
def call(self, inputs):
        if len(inputs) == 3:
            X, A, I = inputs
            self.data_mode = 'graph'
        else:
            X, A = inputs
            I = tf.zeros(tf.shape(X)[:1], dtype=tf.int32)
            self.data_mode = 'single'
        if K.ndim(I) == 2:
            I = I[:, 0]

        A_is_sparse = K.is_sparse(A)

        # Get mask
        y = K.dot(X, self.kernel)
        y = filter_dot(A, y)
        N = K.shape(X)[-2]
        indices = ops.segment_top_k(y[:, 0], I, self.ratio, self.top_k_var)
        mask = tf.scatter_nd(tf.expand_dims(indices, 1), tf.ones_like(indices), (N,))

        # Multiply X and y to make layer differentiable
        features = X * self.gating_op(y)

        axis = 0 if len(K.int_shape(A)) == 2 else 1  # Cannot use negative axis in tf.boolean_mask
        # Reduce X
        X_pooled = tf.boolean_mask(features, mask, axis=axis)

        # Compute A^2
        if A_is_sparse:
            A_dense = tf.sparse.to_dense(A)
        else:
            A_dense = A
        A_squared = K.dot(A, A_dense)

        # Reduce A
        A_pooled = tf.boolean_mask(A_squared, mask, axis=axis)
        A_pooled = tf.boolean_mask(A_pooled, mask, axis=axis + 1)
        if A_is_sparse:
            A_pooled = tf.contrib.layers.dense_to_sparse(A_pooled)

        output = [X_pooled, A_pooled]

        # Reduce I
        if self.data_mode == 'graph':
            I_pooled = tf.boolean_mask(I[:, None], mask)[:, 0]
            output.append(I_pooled)

        if self.return_mask:
            output.append(mask)

        return output 
Example 77
Project: spektral   Author: danielegrattarola   File: convolutional.py    MIT License 4 votes vote down vote up
def call(self, inputs):
        X = inputs[0]  # (batch_size, N, F)
        A = inputs[1]  # (batch_size, N, N)
        E = inputs[2]  # (batch_size, N, N, S)

        mode = ops.autodetect_mode(A, X)

        # Parameters
        N = K.shape(X)[-2]
        F = K.int_shape(X)[-1]
        F_ = self.channels

        # Normalize adjacency matrix
        A = ops.normalize_A(A)

        # Filter network
        kernel_network = E
        if self.kernel_network is not None:
            for i, l in enumerate(self.kernel_network):
                kernel_network = self.dense_layer(kernel_network, l,
                                                  'FGN_{}'.format(i),
                                                  activation='relu',
                                                  use_bias=self.use_bias,
                                                  kernel_initializer=self.kernel_initializer,
                                                  bias_initializer=self.bias_initializer,
                                                  kernel_regularizer=self.kernel_regularizer,
                                                  bias_regularizer=self.bias_regularizer,
                                                  kernel_constraint=self.kernel_constraint,
                                                  bias_constraint=self.bias_constraint)
        kernel_network = self.dense_layer(kernel_network, F_ * F, 'FGN_out')

        # Convolution
        target_shape = (-1, N, N, F_, F) if mode == ops.modes['B'] else (N, N, F_, F)
        kernel = K.reshape(kernel_network, target_shape)
        output = kernel * A[..., None, None]

        if mode == ops.modes['B']:
            output = tf.einsum('abicf,aif->abc', output, X)
        else:
            output = tf.einsum('bicf,if->bc', output, X)

        if self.use_bias:
            output = K.bias_add(output, self.bias)
        if self.activation is not None:
            output = self.activation(output)

        return output 
Example 78
Project: FaceLandmarks   Author: JACKYLUO1991   File: model.py    Apache License 2.0 4 votes vote down vote up
def _bottleneck(self, inputs, filters, kernel, e, s, squeeze, nl):
        """Bottleneck
        This function defines a basic bottleneck structure.

        # Arguments
            inputs: Tensor, input tensor of conv layer.
            filters: Integer, the dimensionality of the output space.
            kernel: An integer or tuple/list of 2 integers, specifying the
                width and height of the 2D convolution window.
            e: Integer, expansion factor.
                t is always applied to the input size.
            s: An integer or tuple/list of 2 integers,specifying the strides
                of the convolution along the width and height.Can be a single
                integer to specify the same value for all spatial dimensions.
            squeeze: Boolean, Whether to use the squeeze.
            nl: String, nonlinearity activation type.

        # Returns
            Output tensor.
        """

        channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
        input_shape = K.int_shape(inputs)
        tchannel = e
        r = s == 1 and input_shape[3] == filters
        x = self._conv_block(inputs, tchannel, (1, 1), (1, 1), nl)

        x = DepthwiseConv2D(kernel, strides=(
            s, s), depth_multiplier=1, padding='same')(x)
        x = BatchNormalization(axis=channel_axis)(x)

        if squeeze:
            # x = Lambda(lambda x: x * self._squeeze(x))(x)
            x = self._squeeze(x)

        x = self._return_activation(x, nl)

        x = Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x)
        x = BatchNormalization(axis=channel_axis)(x)

        if r:
            x = Add()([x, inputs])

        return x 
Example 79
Project: keras-image-segmentation   Author: dhkim0225   File: pspnet.py    MIT License 4 votes vote down vote up
def call(self, inputs):
        input_shape = K.int_shape(inputs)
        if self.data_format == 'channels_first':
            input_height = input_shape[2]
            input_width = input_shape[3]
            target_height = self.target_shape[2]
            target_width = self.target_shape[3]
            if target_height > input_height or target_width > input_width:
                raise ValueError('The Tensor to be cropped need to be smaller'
                                 'or equal to the target Tensor.')

            if self.offset == 'centered':
                self.offset = [
                        int((input_height - target_height) / 2),
                        int((input_width - target_width) / 2)]

            if self.offset[0] + target_height > input_height:
                raise ValueError('Height index out of range: '
                                 + str(self.offset[0] + target_height))
            if self.offset[1] + target_width > input_width:
                raise ValueError('Width index out of range:'
                                 + str(self.offset[1] + target_width))

            return inputs[:,
                          :,
                          self.offset[0]:self.offset[0] + target_height,
                          self.offset[1]:self.offset[1] + target_width]
        elif self.data_format == 'channels_last':
            input_height = input_shape[1]
            input_width = input_shape[2]
            target_height = self.target_shape[1]
            target_width = self.target_shape[2]
            if target_height > input_height or target_width > input_width:
                raise ValueError('The Tensor to be cropped need to be smaller'
                                 'or equal to the target Tensor.')

            if self.offset == 'centered':
                self.offset = [int((input_height - target_height) / 2),
                               int((input_width - target_width) / 2)]

            if self.offset[0] + target_height > input_height:
                raise ValueError('Height index out of range: '
                                 + str(self.offset[0] + target_height))
            if self.offset[1] + target_width > input_width:
                raise ValueError('Width index out of range:'
                                 + str(self.offset[1] + target_width))
            output = inputs[
                    :,
                    self.offset[0]:self.offset[0] + target_height,
                    self.offset[1]:self.offset[1] + target_width,
                    :]
            return output 
Example 80
Project: keras-image-segmentation   Author: dhkim0225   File: pspnet.py    MIT License 4 votes vote down vote up
def crop_deconv(
        classes,
        scale=1,
        kernel_size=(4, 4),
        strides=(2, 2),
        crop_offset='centered',
        weight_decay=0.,
        block_name='featx'):
    def f(x, y):
        def scaling(xx, ss=1):
            return xx * ss

        scaled = Lambda(
                scaling,
                arguments={'ss': scale},
                name='scale_{}'.format(block_name))(x)
        score = Conv2D(
                filters=classes,
                kernel_size=(1, 1),
                activation='linear',
                kernel_initializer='he_normal',
                kernel_regularizer=l2(weight_decay),
                name='score_{}'.format(block_name))(scaled)

        if y is None:
            upscore = Conv2DTranspose(
                    filters=classes,
                    kernel_size=kernel_size,
                    strides=strides,
                    padding='valid',
                    kernel_initializer='he_normal',
                    kernel_regularizer=l2(weight_decay),
                    use_bias=False,
                    name='upscore_{}'.format(block_name))(score)
        else:
            crop = CroppingLike2D(
                    target_shape=K.int_shape(y),
                    offset=crop_offset,
                    name='crop_{}'.format(block_name))(score)
            merge = add([y, crop])
            upscore = Conv2DTranspose(
                    filters=classes,
                    kernel_size=kernel_size,
                    strides=strides, padding='valid',
                    kernel_initializer='he_normal',
                    kernel_regularizer=l2(weight_decay),
                    use_bias=False,
                    name='upscore_{}'.format(block_name))(merge)
        return upscore
    return f