Python keras.layers.Activation() Examples

The following are code examples for showing how to use keras.layers.Activation(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 6 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['ReLU']}
        # Test 1
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        temp = activation(net['l1'], [inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Activation')
        # Test 2
        net['l1']['params']['negative_slope'] = 1
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        temp = activation(net['l1'], [inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'LeakyReLU') 
Example 2
Project: neural-fingerprinting   Author: StephanZheng   File: utils_keras.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _get_logits_name(self):
        """
        Looks for the name of the layer producing the logits.
        :return: name of layer producing the logits
        """
        softmax_name = self._get_softmax_name()
        softmax_layer = self.model.get_layer(softmax_name)

        if not isinstance(softmax_layer, Activation):
            # In this case, the activation is part of another layer
            return softmax_name

        if hasattr(softmax_layer, 'inbound_nodes'):
            warnings.warn(
                "Please update your version to keras >= 2.1.3; "
                "support for earlier keras versions will be dropped on "
                "2018-07-22")
            node = softmax_layer.inbound_nodes[0]
        else:
            node = softmax_layer._inbound_nodes[0]

        logits_name = node.inbound_layers[0].name

        return logits_name 
Example 3
Project: oslodatascience-rl   Author: Froskekongen   File: erlenda_pong_parallel.py    MIT License 6 votes vote down vote up
def buildmodel(opt):
    print("Now we build the model")
    model = Sequential()
    model.add(Convolution2D(32, 8, 8, subsample=(4,4), border_mode='same',input_shape=(80,80,1)))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 4, 4, subsample=(2,2), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, subsample=(1,1), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dense(1))

    model.compile(loss='binary_crossentropy',optimizer=opt)
    print("We finish building the model")
    return model 
Example 4
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 6 votes vote down vote up
def CausalCNN(n_filters, lr, decay, loss, 
               seq_len, input_features, 
               strides_len, kernel_size,
               dilation_rates):

    inputs = Input(shape=(seq_len, input_features), name='input_layer')   
    x=inputs
    for dilation_rate in dilation_rates:
        x = Conv1D(filters=n_filters,
               kernel_size=kernel_size, 
               padding='causal',
               dilation_rate=dilation_rate,
               activation='linear')(x) 
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    #x = Dense(7, activation='relu', name='dense_layer')(x)
    outputs = Dense(3, activation='sigmoid', name='output_layer')(x)
    causalcnn = Model(inputs, outputs=[outputs])

    return causalcnn 
Example 5
Project: ANN   Author: waynezv   File: ANN_v0.1.py    MIT License 6 votes vote down vote up
def train(in_dim, out_dim, X_train, Y_train, X_test, Y_test):
    model = Sequential()
    model.add(Dense(100000, input_dim = in_dim, init='uniform'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(100000, init='uniform'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(out_dim, init='uniform'))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='sgd',\
            metrics=['accuracy'])

    hist = model.fit(X_train, Y_train, nb_epoch=5, batch_size=32,\
            validation_split=0.1, shuffle=True)
    print(hist.history)

    loss_and_metrics = model.evaluate(X_test, Y_test, batch_size=32)

    classes = model.predict_classes(X_test, batch_size=32)

    proba = model.predict_proba(X_test, batch_size=32) 
Example 6
Project: swishnet   Author: i7p9h9   File: swishnet.py    MIT License 6 votes vote down vote up
def __causal_gated_conv1D(x=None, filters=16, length=6, strides=1):
    def causal_gated_conv1D(x, filters, length, strides):
        x_in_1 = layers.Conv1D(filters=filters // 2,
                               kernel_size=length,
                               dilation_rate=strides,  # it's correct, use this instead strides for shape matching
                               strides=1,
                               padding="causal")(x)
        x_sigmoid = layers.Activation(activation="sigmoid")(x_in_1)

        x_in_2 = layers.Conv1D(filters=filters // 2,
                               kernel_size=length,
                               dilation_rate=strides,  # it's correct, use this instead strides for shape matching
                               strides=1,
                               padding="causal")(x)
        x_tanh = layers.Activation(activation="tanh")(x_in_2)

        x_out = layers.Multiply()([x_sigmoid, x_tanh])

        return x_out

    if x is None:
        return lambda _x: causal_gated_conv1D(x=_x, filters=filters, length=length, strides=strides)
    else:
        return causal_gated_conv1D(x=x, filters=filters, length=length, strides=strides) 
Example 7
Project: wdcnn_bearning_fault_diagnosis-master   Author: yfshich   File: main.py    MIT License 6 votes vote down vote up
def wdcnn(filters, kernerl_size, strides, conv_padding, pool_padding,  pool_size, BatchNormal):
    """wdcnn层神经元

    :param filters: 卷积核的数目,整数
    :param kernerl_size: 卷积核的尺寸,整数
    :param strides: 步长,整数
    :param conv_padding: 'same','valid'
    :param pool_padding: 'same','valid'
    :param pool_size: 池化层核尺寸,整数
    :param BatchNormal: 是否Batchnormal,布尔值
    :return: model
    """
    model.add(Conv1D(filters=filters, kernel_size=kernerl_size, strides=strides,
                     padding=conv_padding, kernel_regularizer=l2(1e-4)))
    if BatchNormal:
        model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_size=pool_size, padding=pool_padding))
    return model

# 实例化序贯模型 
Example 8
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: gap_keras.py    MIT License 6 votes vote down vote up
def GAP():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(96, (7, 7), padding='valid', strides=2, activation='relu', name='conv1')(inputs)
    x = MaxPooling2D((3, 3), strides=2,  padding='same')(x)
    x = Conv2D(256, (5, 5), padding='valid', strides=2, activation='relu', name='conv2')(x)
    x = keras.layers.ZeroPadding2D(1)(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv3')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv4')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv5')(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    # GAP
    x = Conv2D(num_classes, (1, 1), padding='same', activation=None, name='out')(x)
    x = keras.layers.GlobalAveragePooling2D()(x)
    x = Activation('softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 9
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: lenet_keras.py    MIT License 6 votes vote down vote up
def LeNet():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(6, (5, 5), padding='valid', activation=None, name='conv1')(inputs)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Activation('sigmoid')(x)
    x = Conv2D(16, (5, 5), padding='valid', activation=None, name='conv2')(x)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Activation('sigmoid')(x)
    
    x = Flatten()(x)
    x = Dense(120, name='dense1', activation=None)(x)
    x = Dense(64, name='dense2', activation=None)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 10
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: nin_keras.py    MIT License 6 votes vote down vote up
def NIN():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(192, (5, 5), padding='same', strides=1, activation='relu', name='conv1')(inputs)
    x = Conv2D(160, (1, 1), padding='same', strides=1, activation='relu', name='cccp1')(x)
    x = Conv2D(96, (1, 1), padding='same', strides=1, activation='relu', name='cccp2')(x)
    x = MaxPooling2D((3, 3), strides=2,  padding='same')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(192, (5, 5), padding='same', strides=1, activation='relu', name='conv2')(x)
    x = Conv2D(192, (1, 1), padding='same', strides=1, activation='relu', name='cccp3')(x)
    x = Conv2D(192, (1, 1), padding='same', strides=1, activation='relu', name='cccp4')(x)
    x = AveragePooling2D((3, 3), strides=2,  padding='same')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(192, (3, 3), padding='same', strides=1, activation='relu', name='conv3')(x)
    x = Conv2D(192, (1, 1), padding='same', strides=1, activation='relu', name='cccp5')(x)
    x = Conv2D(num_classes, (1, 1), padding='same', strides=1, activation='relu', name='cccp6')(x)
    x = keras.layers.GlobalAveragePooling2D()(x)
    x = Activation('softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 11
Project: Keras-GAN   Author: eriklindernoren   File: sgan.py    MIT License 6 votes vote down vote up
def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(1, kernel_size=3, padding="same"))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        img = model(noise)

        return Model(noise, img) 
Example 12
Project: Keras-GAN   Author: eriklindernoren   File: infogan.py    MIT License 6 votes vote down vote up
def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
        model.add(Activation("tanh"))

        gen_input = Input(shape=(self.latent_dim,))
        img = model(gen_input)

        model.summary()

        return Model(gen_input, img) 
Example 13
Project: Keras-GAN   Author: eriklindernoren   File: wgan_gp.py    MIT License 6 votes vote down vote up
def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=4, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=4, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv2D(self.channels, kernel_size=4, padding="same"))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        img = model(noise)

        return Model(noise, img) 
Example 14
Project: Keras-GAN   Author: eriklindernoren   File: dcgan.py    MIT License 6 votes vote down vote up
def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        img = model(noise)

        return Model(noise, img) 
Example 15
Project: Github-Classifier   Author: Ichaelus   File: descriptionfoldersreponamelstm.py    MIT License 6 votes vote down vote up
def __init__(self, num_hidden_layers=3):
        ClassificationModule.__init__(self, "Description, foldernames and reponame LSTM", "A LSTM reading the description, foldernames and reponame character by character")

        hidden_size = 300
        self.maxlen = 300

        # Set output_size
        self.output_size = 7 # Hardcoded for 7 classes

        model = Sequential()

        # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array
        model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength())))

        for _ in range(num_hidden_layers):
            model.add(Dense(hidden_size))

        model.add(Dense(self.output_size))  
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                    optimizer=SGD(),
                    metrics=['accuracy'])

        self.model = model
        print "\t-", self.name 
Example 16
Project: Github-Classifier   Author: Ichaelus   File: descriptionlstm.py    MIT License 6 votes vote down vote up
def __init__(self, num_hidden_layers=3):
        ClassificationModule.__init__(self, "Description Only LSTM", "A LSTM reading the description character by character")

        hidden_size = 250
        self.maxlen = 100

        # Set output_size
        self.output_size = 7 # Hardcoded for 7 classes

        model = Sequential()

        # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array
        model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength())))

        for _ in range(num_hidden_layers):
            model.add(Dense(hidden_size))

        model.add(Dense(self.output_size))  
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                    optimizer=SGD(),
                    metrics=['accuracy'])

        self.model = model
        print "\t-", self.name 
Example 17
Project: Github-Classifier   Author: Ichaelus   File: readmelstm.py    MIT License 6 votes vote down vote up
def __init__(self, num_hidden_layers=3):
        ClassificationModule.__init__(self, "Readme Only LSTM", "A LSTM reading the Readme character by character.\
        The input is a matrix, each row standing for a character with each column representing a lowercase ASCII-Character, punctuation or a number.\
        Up to 2000 characters are fed into this net.\
        The first hidden layer is the LSTM-layer, the next 3 are standard and fully connected. The outputs are normalized using a softmax function.\
        The loss function is categorical crossentropy and we used Stochastic Gradient Descent for training.")

        hidden_size = 200
        self.maxlen = 2000 

        # Set output_size
        self.output_size = 7 # Hardcoded for 7 classes

        model = Sequential()

        # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array
        model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength())))

        for _ in range(num_hidden_layers):
            model.add(Dense(hidden_size))

        model.add(Dense(self.output_size))  
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                    optimizer=SGD(),
                    metrics=['accuracy'])

        self.model = model
        print "\t-", self.name 
Example 18
Project: Github-Classifier   Author: Ichaelus   File: descriptionreponamelstm.py    MIT License 6 votes vote down vote up
def __init__(self, num_hidden_layers=3):
        ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character")

        hidden_size = 300
        self.maxlen = 300

        # Set output_size
        self.output_size = 7 # Hardcoded for 7 classes

        model = Sequential()

        # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array
        model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength())))

        for _ in range(num_hidden_layers):
            model.add(Dense(hidden_size))

        model.add(Dense(self.output_size))  
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                    optimizer=SGD(),
                    metrics=['accuracy'])

        self.model = model
        print "\t-", self.name 
Example 19
Project: derplearning   Author: notkarol   File: line_train.py    MIT License 6 votes vote down vote up
def create_model(input_shape, n_output, n_blocks=2):
    model = Sequential()
    model.add(Conv2D(96, (5, 5), padding='same', input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(Activation('elu'))
    model.add(MaxPooling2D(pool_size=2))

    for i in range(n_blocks):
        model.add(Conv2D(32, (3, 3), padding='same'))
        model.add(BatchNormalization())
        model.add(Activation('elu'))
        model.add(Conv2D(32, (3, 3), padding='same'))
        model.add(BatchNormalization())
        model.add(Activation('elu'))
        model.add(MaxPooling2D(pool_size=2))

    model.add(Flatten())
    model.add(Dense(100))
    model.add(Activation('elu'))
    model.add(Dense(n_output))
    
    return model 
Example 20
Project: speed_estimation   Author: NeilNie   File: i3d.py    MIT License 5 votes vote down vote up
def conv3d_bath_norm(x, filters, num_frames, num_row, num_col, padding='same', strides=(1, 1, 1),
                         use_bias=False, use_activation_fn=True, use_bn=True, name=None):

        """

        :param x: input tensor.
        :param filters: filters in `Conv3D`.
        :param num_frames: frames (time depth) of the convolution kernel.
        :param num_row: height of the convolution kernel.
        :param num_col: width of the convolution kernel.
        :param padding: padding mode in `Conv3D`.
        :param strides: strides in `Conv3D`.
        :param use_bias: use bias or not
        :param use_activation_fn: use an activation function or not.
        :param use_bn: use batch normalization or not.
        :param name: name of the ops; will become `name + '_conv'`
                for the convolution and `name + '_bn'` for the
                batch norm layer.
        :return: Output tensor after applying `Conv3D` and `BatchNormalization`.
        """

        if name is not None:
            bn_name = name + '_bn'
            conv_name = name + '_conv'
        else:
            bn_name = None
            conv_name = None

        x = Conv3D(filters, (num_frames, num_row, num_col), strides=strides, padding=padding, use_bias=use_bias,
                   name=conv_name)(x)

        if use_bn:
            bn_axis = 4
            x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)

        if use_activation_fn:
            x = Activation('relu', name=name)(x)

        return x 
Example 21
Project: VisualNN   Author: angelhunt   File: layers_export.py    GNU General Public License v3.0 5 votes vote down vote up
def activation(layer, layer_in, layerId, tensor=True):
    print("activation")
    print(str(layer))
    out = {}
    if (layer['info']['type'] == 'ReLU'):
        if ('negative_slope' in layer['params'] and layer['params']['negative_slope'] != 0):
            out[layerId] = LeakyReLU(alpha=layer['params']['negative_slope'])
        else:
            out[layerId] = Activation('relu')
    elif (layer['info']['type'] == 'PReLU'):
        out[layerId] = PReLU()
    elif (layer['info']['type'] == 'ELU'):
        out[layerId] = ELU(alpha=layer['params']['alpha'])
    elif (layer['info']['type'] == 'ThresholdedReLU'):
        out[layerId] = ThresholdedReLU(theta=layer['params']['theta'])
    elif (layer['info']['type'] == 'Sigmoid'):
        out[layerId] = Activation('sigmoid')
    elif (layer['info']['type'] == 'TanH'):
        out[layerId] = Activation('tanh')
    elif (layer['info']['type'] == 'Softmax'):
        out[layerId] = Activation('softmax')
    elif (layer['info']['type'] == 'SELU'):
        out[layerId] = Activation('selu')
    elif (layer['info']['type'] == 'Softplus'):
        out[layerId] = Activation('softplus')
    elif (layer['info']['type'] == 'Softsign'):
        out[layerId] = Activation('softsign')
    elif (layer['info']['type'] == 'HardSigmoid'):
        out[layerId] = Activation('hard_sigmoid')
    elif (layer['info']['type'] == 'Linear'):
        out[layerId] = Activation('linear')
    if tensor:
        print(layer_in)
        out[layerId] = out[layerId](*layer_in)
    return out 
Example 22
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Sigmoid']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Activation') 
Example 23
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['TanH']}
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Activation') 
Example 24
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Softmax']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Activation') 
Example 25
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Softplus']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Activation') 
Example 26
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Softsign']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Activation') 
Example 27
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['HardSigmoid']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Activation') 
Example 28
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Linear']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Activation') 
Example 29
Project: smach_based_introspection_framework   Author: birlrobotics   File: anomaly_model_generation.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def generate_model_3():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))

    x = Masking()(ip)
    x = LSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    #y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    #y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()

    # add load model code here to fine-tune

    return model 
Example 30
Project: smach_based_introspection_framework   Author: birlrobotics   File: anomaly_model_generation.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def generate_model_4():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))
    # stride = 3
    #
    # x = Permute((2, 1))(ip)
    # x = Conv1D(MAX_NB_VARIABLES // stride, 8, strides=stride, padding='same', activation='relu', use_bias=False,
    #            kernel_initializer='he_uniform')(x)  # (None, variables / stride, timesteps)
    # x = Permute((2, 1))(x)

    x = Masking()(ip)
    x = AttentionLSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    #y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    #y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()

    # add load model code here to fine-tune

    return model 
Example 31
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 5 votes vote down vote up
def weather_conv1D(layers, lr, decay, loss, 
               input_len, input_features, 
               strides_len, kernel_size):
    
    inputs = Input(shape=(input_len, input_features), name='input_layer')
    for i, hidden_nums in enumerate(layers):
        if i==0:
            #inputs = BatchNormalization(name='BN_input')(inputs)
            hn = Conv1D(hidden_nums, kernel_size=kernel_size, strides=strides_len, 
                        data_format='channels_last', 
                        padding='same', activation='linear')(inputs)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn)
            hn = Activation('relu')(hn)
        elif i<len(layers)-1:
            hn = Conv1D(hidden_nums, kernel_size=kernel_size, strides=strides_len,
                        data_format='channels_last', 
                        padding='same',activation='linear')(hn)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn) 
            hn = Activation('relu')(hn)
        else:
            hn = Conv1D(hidden_nums, kernel_size=kernel_size, strides=strides_len,
                        data_format='channels_last', 
                        padding='same',activation='linear')(hn)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn) 

    outputs = Dense(80, activation='relu', name='dense_layer')(hn)
    outputs = Dense(3, activation='tanh', name='output_layer')(outputs)

    weather_model = Model(inputs, outputs=[outputs])

    return weather_model 
Example 32
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 5 votes vote down vote up
def weather_fnn(layers, lr,
            decay, loss, seq_len, 
            input_features, output_features):
    
    ori_inputs = Input(shape=(seq_len, input_features), name='input_layer')
    #print(seq_len*input_features)
    conv_ = Conv1D(11, kernel_size=13, strides=1, 
                        data_format='channels_last', 
                        padding='valid', activation='linear')(ori_inputs)
    conv_ = BatchNormalization(name='BN_conv')(conv_)
    conv_ = Activation('relu')(conv_)
    conv_ = Conv1D(5, kernel_size=7, strides=1, 
                        data_format='channels_last', 
                        padding='valid', activation='linear')(conv_)
    conv_ = BatchNormalization(name='BN_conv2')(conv_)
    conv_ = Activation('relu')(conv_)

    inputs = Reshape((-1,))(conv_)

    for i, hidden_nums in enumerate(layers):
        if i==0:
            hn = Dense(hidden_nums, activation='linear')(inputs)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn)
            hn = Activation('relu')(hn)
        else:
            hn = Dense(hidden_nums, activation='linear')(hn)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn)
            hn = Activation('relu')(hn)
            #hn = Dropout(0.1)(hn)
    #print(seq_len, output_features)
    #print(hn)
    outputs = Dense(seq_len*output_features, activation='sigmoid', name='output_layer')(hn) # 37*3
    outputs = Reshape((seq_len, output_features))(outputs)

    weather_fnn = Model(ori_inputs, outputs=[outputs])

    return weather_fnn 
Example 33
Project: Jetson-RaceCar-AI   Author: ardamavi   File: get_model.py    Apache License 2.0 5 votes vote down vote up
def get_model():
    img_inputs = Input(shape=(500, 500, 1))
    lidar_inputs = Input(shape=(3,))

    conv_1 = Conv2D(32, (4,4), strides=(2,2))(img_inputs)

    conv_2 = Conv2D(32, (4,4), strides=(2,2))(conv_1)

    conv_3 = Conv2D(32, (3,3), strides=(1,1))(conv_2)
    act_3 = Activation('relu')(conv_3)

    pooling_1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(act_3)

    flat_1 = Flatten()(pooling_1)

    fc = Dense(32)(flat_1)

    lidar_fc = Dense(32)(lidar_inputs)

    concatenate_layer = concatenate([fc, lidar_fc])

    fc = Dense(10)(concatenate_layer)
    fc = Activation('relu')(fc)
    fc = Dropout(0.5)(fc)

    outputs = Dense(2)(fc)

    outputs = Activation('sigmoid')(outputs)


    model = Model(inputs=[img_inputs, lidar_inputs], outputs=[outputs])

    model.compile(loss='mse', optimizer='adadelta', metrics=['accuracy'])

    print(model.summary())

    return model 
Example 34
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def conv_module(conv, input_shape, input):
    if conv:
        x = Reshape((-1, input_shape[2]) + (1,))(input)
        # x = BatchNormalization()(x)
        x = Conv2D(filters=8, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = Conv2D(filters=8, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = Conv2D(filters=8, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = MaxPooling2D(pool_size=(1, 3))(x)

        x = Conv2D(filters=16, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = Conv2D(filters=16, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = Conv2D(filters=16, kernel_size=(1, 3), activation="relu")(x)
        # x = BatchNormalization()(x)
        # x = Activation("relu")(x)
        x = MaxPooling2D(pool_size=(1, 3))(x)
        shape = K.int_shape(x)
        x = Reshape((-1, shape[2] * shape[3]))(x)
    else:
        x = input
    return x 
Example 35
Project: kaggle-carvana-2017   Author: killthekitten   File: inception_resnet_v2.py    MIT License 5 votes vote down vote up
def conv2d_bn(x,
              filters,
              kernel_size,
              strides=1,
              padding='same',
              activation='relu',
              use_bias=False,
              name=None):
    """Utility function to apply conv + BN.

    # Arguments
        x: input tensor.
        filters: filters in `Conv2D`.
        kernel_size: kernel size as in `Conv2D`.
        padding: padding mode in `Conv2D`.
        activation: activation in `Conv2D`.
        strides: strides in `Conv2D`.
        name: name of the ops; will become `name + '_ac'` for the activation
            and `name + '_bn'` for the batch norm layer.

    # Returns
        Output tensor after applying `Conv2D` and `BatchNormalization`.
    """
    x = Conv2D(filters,
               kernel_size,
               strides=strides,
               padding=padding,
               use_bias=use_bias,
               name=name)(x)
    if not use_bias:
        bn_axis = 1 if K.image_data_format() == 'channels_first' else 3
        bn_name = None if name is None else name + '_bn'
        x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
    if activation is not None:
        ac_name = None if name is None else name + '_ac'
        x = Activation(activation, name=ac_name)(x)
    return x 
Example 36
Project: kaggle-carvana-2017   Author: killthekitten   File: resnet50_fixed.py    MIT License 5 votes vote down vote up
def identity_block(input_tensor, kernel_size, filters, stage, block):
    """The identity block is the block that has no conv layer at shortcut.

    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of middle conv layer at main path
        filters: list of integers, the filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'keras.., current block label, used for generating layer names

    # Returns
        Output tensor for the block.
    """
    filters1, filters2, filters3 = filters
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters2, kernel_size,
               padding='same', name=conv_name_base + '2b')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    x = layers.add([x, input_tensor])
    x = Activation('relu')(x)
    return x 
Example 37
Project: AIGame   Author: chenghongkuan   File: RL_brain_Keras.py    GNU General Public License v3.0 5 votes vote down vote up
def _build_net(self):
        # ------------------ 建造估计层 ------------------
        # 因为神经网络在这个地方只是用来输出不同动作对应的Q值,最后的决策是用Q表的选择来做的
        # 所以其实这里的神经网络可以看做是一个线性的,也就是通过不同的输入有不同的输出,而不是确定类别的几个输出
        # 这里我们先按照上一个例子造一个两层每层单个神经元的神经网络
        self.model_eval = Sequential([
            # 输入 并且给每一个神经元配一个激活函数
            Dense(self.first_layer_neurno, input_dim=self.n_features, activation='relu'),
            # Activation('relu'),
            # Dense(1, activation='tanh'),
            # 输出
            Dense(self.n_actions),
        ])
        # 选择rms优化器,输入学习率参数
        rmsprop = RMSprop(lr=self.lr, rho=0.9, epsilon=1e-08, decay=0.0)
        self.model_eval.compile(loss='mse',
                            optimizer=rmsprop,
                            metrics=['accuracy'])

        # ------------------ 构建目标神经网络 ------------------
        # 目标神经网络的架构必须和估计神经网络一样,但是不需要计算损失函数
        self.model_target = Sequential([
            # 输入 并且给每一个神经元配一个激活函数
            Dense(self.first_layer_neurno, input_dim=self.n_features, activation='relu'),
            # Activation('relu'),
            # Dense(1, activation='tanh'),
            # 输出
            Dense(self.n_actions),
        ]) 
Example 38
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn_all.py    MIT License 5 votes vote down vote up
def fcn_2d(input_shape):
    inputs = Input(input_shape)

    conv1 = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), kernel_regularizer=l2(REG_lambda))(inputs)
#    bn1 = BatchNormalization()(conv1)
    act1 = Activation('relu')(conv1)
    pool1 = MaxPooling2D(pool_size=(3, 3), strides=(3, 3))(act1)

    conv2 = Conv2D(32, kernel_size=(6, 6), strides=(1, 1), kernel_regularizer=l2(REG_lambda))(pool1)
#    bn2 = BatchNormalization()(conv2)
    act2 = Activation('relu')(conv2)
    pool2 = MaxPooling2D(pool_size=(3, 3), strides=(3, 3))(act2)

    conv3 = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), kernel_regularizer=l2(REG_lambda))(pool2)
#    bn3 = BatchNormalization()(conv3)
    act3 = Activation('relu')(conv3)
    pool3 = MaxPooling2D(pool_size=(3, 3), strides=(3, 3))(act3)

    up1 = UpSampling2D(size=(3, 3))(pool3)
    deconv1 = Conv2DTranspose(32, 3)(up1)
    act4 = Activation('relu')(deconv1)

    up2 = UpSampling2D(size=(3, 3))(act4)
    deconv2 = Conv2DTranspose(16, 6)(up2)
    act5 = Activation('relu')(deconv2)

    up3 = UpSampling2D(size=(3, 3))(act5)
    deconv3 = Conv2DTranspose(nb_classes, 3)(up3)
    act6 = Activation('relu')(deconv3)
    deconv4 = Conv2DTranspose(nb_classes, 3)(act6)

    model = Model(inputs=inputs, outputs=deconv4)
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss=softmax_sparse_crossentropy_ignoring_first_label,
                  optimizer=adam,
                  metrics=[sparse_accuracy])
    return model


# U-net model 
Example 39
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: dcgan_keras.py    MIT License 5 votes vote down vote up
def G_model():
    inputs = Input((100,))
    in_h = int(img_height / 16)
    in_w = int(img_width / 16)
    d_dim = 256
    base = 128
    x = Dense(in_h * in_w * d_dim, name='g_dense1',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(inputs)
    x = Reshape((in_h, in_w, d_dim), input_shape=(d_dim * in_h * in_w,))(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_dense1_bn')(x)
    # 1/8
    x = Conv2DTranspose(base*4, (5, 5), name='g_conv1', padding='same', strides=(2,2),
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv1_bn')(x)
    # 1/4
    x = Conv2DTranspose(base*2, (5, 5), name='g_conv2', padding='same', strides=(2,2),
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv2_bn')(x)
    # 1/2
    x = Conv2DTranspose(base, (5, 5), name='g_conv3', padding='same', strides=(2,2),
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv3_bn')(x)
    # 1/1
    x = Conv2DTranspose(channel, (5, 5), name='g_out', padding='same', strides=(2,2),
        kernel_initializer=RN(mean=0.0, stddev=0.02),  bias_initializer=Constant())(x)
    x = Activation('tanh')(x)
    model = Model(inputs=inputs, outputs=x, name='G')
    return model 
Example 40
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: nearest_keras.py    MIT License 5 votes vote down vote up
def Mynet(train=False):
    inputs = Input((img_height, img_width, 3), name='in')
    x = inputs
    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv1_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)

    x = MaxPooling2D((2,2), 2)(x)
    
    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv2_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)

    x = keras.layers.UpSampling2D(size=(2,2), interpolation='nearest')(x)

    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='dec1_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
    
    x = Conv2D(num_classes+1, (1, 1), padding='same', strides=1)(x)
    x = Reshape([-1, num_classes+1])(x)
    x = Activation('softmax', name='out')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 41
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: semaseg_dataset_keras.py    MIT License 5 votes vote down vote up
def Mynet(train=False):
    inputs = Input((img_height, img_width, 3), name='in')
    x = inputs
    for i in range(6):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv1_{}'.format(i+1))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    x = Conv2D(num_classes+1, (1, 1), padding='same', strides=1)(x)
    x = Reshape([-1, num_classes+1])(x)
    x = Activation('softmax', name='out')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 42
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: concat_keras.py    MIT License 5 votes vote down vote up
def Mynet(train=False):
    inputs = Input((img_height, img_width, 3), name='in')
    x = inputs
    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv1_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)

    enc1 = x

    x = MaxPooling2D((2,2), 2)(x)

    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv2_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)

    x = keras.layers.Conv2DTranspose(32, (2,2), strides=2, padding='same')(x)
    x = Activation('relu')(x)
    x = BatchNormalization()(x)

    x = keras.layers.concatenate([x, enc1])
    x = Conv2D(32, (1, 1), padding='same', strides=1, name='concat_conv')(x)
    x = Activation('relu')(x)
    x = BatchNormalization()(x)
    
    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='dec1_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
    
    x = Conv2D(num_classes+1, (1, 1), padding='same', strides=1)(x)
    x = Reshape([-1, num_classes+1])(x)
    x = Activation('softmax', name='out')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 43
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: semaseg_test_keras.py    MIT License 5 votes vote down vote up
def Mynet(train=False):
    inputs = Input((img_height, img_width, 3), name='in')
    x = inputs
    for i in range(6):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv1_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)

    x = Conv2D(num_classes+1, (1, 1), padding='same', strides=1)(x)
    x = Reshape([-1, num_classes+1])(x)
    x = Activation('softmax', name='out')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 44
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: semaseg_loss_keras.py    MIT License 5 votes vote down vote up
def Mynet(train=False):
    inputs = Input((img_height, img_width, 3), name='in')
    x = inputs
    for i in range(6):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv1_{}'.format(i+1))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    x = Conv2D(num_classes+1, (1, 1), padding='same', strides=1)(x)
    x = Reshape([-1, num_classes+1])(x)
    x = Activation('softmax', name='out')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 45
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: bin_test_keras.py    MIT License 5 votes vote down vote up
def Mynet(train=False):
    inputs = Input((img_height, img_width, 3), name='in')
    x = inputs
    for i in range(6):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv1_{}'.format(i+1))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    x = Conv2D(1, (1, 1), padding='same', strides=1, name='out', activation='sigmoid')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 46
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: bin_loss_keras.py    MIT License 5 votes vote down vote up
def Mynet(train=False):
    inputs = Input((img_height, img_width, 3), name='in')
    x = inputs
    for i in range(6):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv1_{}'.format(i+1))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    x = Conv2D(1, (1, 1), padding='same', strides=1, name='out', activation='sigmoid')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 47
Project: Keras-GAN   Author: eriklindernoren   File: context_encoder.py    MIT License 5 votes vote down vote up
def build_generator(self):


        model = Sequential()

        # Encoder
        model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))

        model.add(Conv2D(512, kernel_size=1, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.5))

        # Decoder
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Activation('relu'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Activation('relu'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
        model.add(Activation('tanh'))

        model.summary()

        masked_img = Input(shape=self.img_shape)
        gen_missing = model(masked_img)

        return Model(masked_img, gen_missing) 
Example 48
Project: Keras-GAN   Author: eriklindernoren   File: pixelda.py    MIT License 5 votes vote down vote up
def build_generator(self):
        """Resnet Generator"""

        def residual_block(layer_input):
            """Residual block described in paper"""
            d = Conv2D(64, kernel_size=3, strides=1, padding='same')(layer_input)
            d = BatchNormalization(momentum=0.8)(d)
            d = Activation('relu')(d)
            d = Conv2D(64, kernel_size=3, strides=1, padding='same')(d)
            d = BatchNormalization(momentum=0.8)(d)
            d = Add()([d, layer_input])
            return d

        # Image input
        img = Input(shape=self.img_shape)

        l1 = Conv2D(64, kernel_size=3, padding='same', activation='relu')(img)

        # Propogate signal through residual blocks
        r = residual_block(l1)
        for _ in range(self.residual_blocks - 1):
            r = residual_block(r)

        output_img = Conv2D(self.channels, kernel_size=3, padding='same', activation='tanh')(r)

        return Model(img, output_img) 
Example 49
Project: Keras-GAN   Author: eriklindernoren   File: acgan.py    MIT License 5 votes vote down vote up
def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        label = Input(shape=(1,), dtype='int32')
        label_embedding = Flatten()(Embedding(self.num_classes, self.latent_dim)(label))

        model_input = multiply([noise, label_embedding])
        img = model(model_input)

        return Model([noise, label], img) 
Example 50
Project: Github-Classifier   Author: Ichaelus   File: Classifier.py    MIT License 5 votes vote down vote up
def __init__(self, clf_type, input_size=None, output_size=None):
        # Check if clf_type is correct
        if clf_type is None or not clf_type.lower() in Classifier.classifier_types:
            raise TypeError("Enter correct classifier type")
        if clf_type.lower() == 'nn':
            if input_size is None or output_size is None:
                print("Enter input_size and output_size to create neural network")
                return None
            self.output_size = output_size
            
            model = Sequential()
            model.add(Dense(input_size, input_dim=(input_size)))
            model.add(Activation('tanh'))
            model.add(Dense(input_size *  2))
            model.add(Activation('tanh'))
            model.add(Dense(input_size * 2))
            model.add(Activation('tanh'))
            
            # Output Layer, one neuron per class
            model.add(Dense(output_size))
            # Softmax zum Normalisieren der Werte
            model.add(Activation('softmax'))

            model.compile(metrics=['accuracy'], optimizer='sgd', loss='mse')
            self.model = model
        elif clf_type.lower() == 'svm':
            self.model = SVC()
        elif clf_type.lower() == 'nb':
            self.model = MultinomialNB()
        elif clf_type.lower() == 'lr':
            self.model = LinearRegression() 
Example 51
Project: Github-Classifier   Author: Ichaelus   File: readme2w2vlstm.py    MIT License 5 votes vote down vote up
def __init__(self, num_hidden_layers=3):
        ClassificationModule.__init__(self, "Readme Only Word2Vec LSTM", "A LSTM-Network reading the Readme word by word.\
        We used a Word2Vec-Model trained on Google-News articles, providing an embedding of 3 million different words.\
        This embedding comprises 300 dimensions. Each word is then fed into an LSTM-layer being followed by 3 Dense-layers.\
        Optimizer: Adam, loss: categorical crossentropy.")

        hidden_size = 300
        self.maxlen = 1000

        print "\tLoading word2vec Model"
        path= os.path.dirname(__file__) + "/../../Word2VecModel/"
        modelName = 'GoogleNews-vectors-negative300.bin'
        self.word2vecModel = Word2Vec.load_word2vec_format(path + modelName, binary=True)

        # Set output_size
        self.output_size = 7 # Hardcoded for 7 classes

        model = Sequential()

        # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array
        model.add(LSTM(hidden_size, input_shape=(self.maxlen, self.word2vecModel.vector_size)))

        for _ in range(num_hidden_layers):
            model.add(LSTM(hidden_size))

        model.add(Dense(self.output_size))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                    optimizer=Adam(),
                    metrics=['accuracy'])

        self.model = model
        print "\t-", self.name 
Example 52
Project: Github-Classifier   Author: Ichaelus   File: reponamelstm.py    MIT License 5 votes vote down vote up
def __init__(self, num_hidden_layers=3):
        ClassificationModule.__init__(self, "Repo-Name Only LSTM", "A LSTM reading the repository-name character by character.\
        The input is a matrix, each row standing for a character with each column representing a lowercase ASCII-Character, punctuation or a number.\
        Up to 30 characters are fed into this net.\
        The first hidden layer is the LSTM-layer, the next 3 are standard and fully connected. The outputs are normalized using a softmax function.\
        The loss function is categorical crossentropy and the learning-rate of the used Adam-optimizer was set to 0.0025.")

        hidden_size = 250
        self.maxlen = 30

        # Set output_size
        self.output_size = 7 # Hardcoded for 7 classes

        model = Sequential()

        # Maximum of self.maxlen charcters allowed, each in one-hot-encoded array
        model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength())))

        for _ in range(num_hidden_layers):
            model.add(Dense(hidden_size))

        model.add(Dense(self.output_size))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                    optimizer=Adam(lr  =0.0025),
                    metrics=['accuracy'])

        self.model = model
        print "\t-", self.name 
Example 53
Project: Github-Classifier   Author: Ichaelus   File: nnstacking.py    MIT License 5 votes vote down vote up
def __init__(self, subclassifiers):
        EnsembleClassifier.__init__(self, "Stacking NN", "This is our best and therefore final classifier.\
        The input to this shallow neural net comprises the predictions of various Support Vector Machines, Random Forests, Gradient Boosted Regression Trees and neural networks (LSTM and normal).\
        Most of them being trained on different feature-combinations.\
        Additionally we supply it with a hand selected subset of metadata for context.\
        For an in-depth explanation see the Documentation.\
        The input is followed by one hidden layer with a leaky (smoothed) ReLu-Activation.\
        Loss-function: categorical crossentropy, optimizer: Adam.") 

        self.subclassifiers = subclassifiers

        # Set input-size and output_size
        self.input_size = 7 * len(subclassifiers) + getReducedMetadataLength()
        self.output_size = 7 # Hardcoded for 7 classes
        self.hidden_size = self.input_size

        # Create model
        model = Sequential()
        # Add input-layer
        model.add(Dense(self.hidden_size, input_dim=self.input_size, init='uniform'))

        # Add hidden layer
        model.add(LeakyReLU())
        model.add(Dense(self.output_size, init='uniform'))
            
            

        # Add output layer and normalize probablities with softmax
        #model.add(Dense(self.output_size, init='uniform'))
        model.add(Activation('softmax'))

        # Compile model and use Adam as optimizer
        model.compile(metrics=['accuracy'], loss='categorical_crossentropy', optimizer=Adam())

        self.model = model
        print "\t-", self.name 
Example 54
Project: Github-Classifier   Author: Ichaelus   File: nndescriptiononly.py    MIT License 5 votes vote down vote up
def __init__(self, text_corpus, num_hidden_layers=1):
        ClassificationModule.__init__(self, "Description only NN", "A basic feedforward neural network")
        # Create vectorizer and fit on all available Descriptions
        self.vectorizer = getTextVectorizer(3000) # Maximum of different columns
        corpus = []
        for description in text_corpus:
            corpus.append(process_text(description))
        self.vectorizer.fit(corpus)

        # Set input-size and output_size
        self.input_size = len(self.vectorizer.get_feature_names())
        self.output_size = 7 # Hardcoded for 6 classes

        # Create model
        model = Sequential()
        # Add input-layer
        model.add(Dense(self.input_size, input_dim=self.input_size, init='uniform'))
        model.add(Activation('relu'))

        # Add hidden layers
        for _ in xrange(num_hidden_layers):
            model.add(Dense(self.input_size, init='uniform'))
            model.add(Activation('relu'))
        
        # Add output layer and normalize probablities with softmax
        model.add(Dense(self.output_size, init='uniform'))
        model.add(Activation('softmax'))

        # Compile model and use Adam as optimizer
        model.compile(metrics=['accuracy'], loss='categorical_crossentropy', optimizer=Adam())

        self.model = model
        print "\t-", self.name 
Example 55
Project: Github-Classifier   Author: Ichaelus   File: nnmetaonly.py    MIT License 5 votes vote down vote up
def __init__(self, num_hidden_layers=1):
        ClassificationModule.__init__(self, "Meta Only NN", "A feedforward neural network which uses metadata (numerical features + used programming languages)")

        # Set input-size and output_size
        self.input_size = getMetadataLength()
        self.output_size = 7 # Hardcoded for 7 classes

        # Create model
        model = Sequential()
        # Add input-layer
        model.add(Dense(self.input_size, input_dim=self.input_size, init='uniform'))
        model.add(Activation('relu'))

        # Add hidden layers
        for _ in xrange(num_hidden_layers):
            model.add(Dense(self.input_size, init='uniform'))
            model.add(Activation('relu'))
        
        # Add output layer and normalize probablities with softmax
        model.add(Dense(self.output_size, init='uniform'))
        model.add(Activation('softmax'))

        # Compile model and use Adam as optimizer
        model.compile(metrics=['accuracy'], loss='categorical_crossentropy', optimizer=Adam())

        self.model = model
        print "\t-", self.name 
Example 56
Project: Kickstart-AI   Author: katchu11   File: generative-adversarial-network.py    MIT License 5 votes vote down vote up
def generator_builder(z_dim=100,depth=64,p=0.4):

    inputs = Input((z_dim,))

    dense1 = Dense(7*7*64)(inputs)
    dense1 = BatchNormalization(momentum=0.9)(dense1)
    dense1 = Activation(activation='relu')(dense1)
    dense1 = Reshape((7,7,64))(dense1)
    dense1 = Dropout(p)(dense1)

    conv1 = UpSampling2D()(dense1)
    conv1 = Conv2DTranspose(int(depth/2), kernel_size=5, padding='same', activation=None,)(conv1)
    conv1 = BatchNormalization(momentum=0.9)(conv1)
    conv1 = Activation(activation='relu')(conv1)

    conv2 = UpSampling2D()(conv1)
    conv2 = Conv2DTranspose(int(depth/4), kernel_size=5, padding='same', activation=None,)(conv2)
    conv2 = BatchNormalization(momentum=0.9)(conv2)
    conv2 = Activation(activation='relu')(conv2)

    conv3 = Conv2DTranspose(int(depth/8), kernel_size=5, padding='same', activation=None,)(conv2)
    conv3 = BatchNormalization(momentum=0.9)(conv3)
    conv3 = Activation(activation='relu')(conv3)

    output = Conv2D(1, kernel_size=5, padding='same', activation='sigmoid')(conv3)

    model = Model(inputs=inputs, outputs=output)
    model.summary()

    return model


# In[24]: 
Example 57
Project: keras_mixnets   Author: titu1994   File: mixnets.py    MIT License 4 votes vote down vote up
def SEBlock(input_filters, se_ratio, expand_ratio, activation_fn, data_format=None):
    if data_format is None:
        data_format = K.image_data_format()

    num_reduced_filters = max(
        1, int(input_filters * se_ratio))
    filters = input_filters * expand_ratio

    if data_format == 'channels_first':
        channel_axis = 1
        spatial_dims = [2, 3]
    else:
        channel_axis = -1
        spatial_dims = [1, 2]

    def block(inputs):
        x = inputs
        x = layers.Lambda(lambda a: K.mean(a, axis=spatial_dims, keepdims=True))(x)
        x = GroupedConv2D(
            num_reduced_filters,
            kernel_size=[1],
            strides=[1, 1],
            kernel_initializer=MixNetConvInitializer(),
            padding='same',
            use_bias=True)(x)

        x = activation_fn()(x)

        # Excite
        x = GroupedConv2D(
            filters,
            kernel_size=[1],
            strides=[1, 1],
            kernel_initializer=MixNetConvInitializer(),
            padding='same',
            use_bias=True)(x)
        x = layers.Activation('sigmoid')(x)
        out = layers.Multiply()([x, inputs])
        return out

    return block


# Obtained from 
Example 58
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo3.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 8, 8,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 6, 6,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 4, 4,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 2, 2,init=weight_init, name='conv1_4'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 208, out 104
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 8, 8,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 6, 6,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 4, 4,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 2, 2,init=weight_init, name='conv2_4'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 88, out is 44 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(220, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 59
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_three.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 60
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 5, 5,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 116, out 58
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 52, out is 26 
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  # in is 20, out is 10 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(10, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 61
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_two.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(16, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(16, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 62
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_three.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 63
Project: neural-fingerprinting   Author: StephanZheng   File: utils_keras.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def cnn_model(logits=False, input_ph=None, img_rows=28, img_cols=28,
              channels=1, nb_filters=64, nb_classes=10):
    """
    Defines a CNN model using Keras sequential model
    :param logits: If set to False, returns a Keras model, otherwise will also
                    return logits tensor
    :param input_ph: The TensorFlow tensor for the input
                    (needed if returning logits)
                    ("ph" stands for placeholder but it need not actually be a
                    placeholder)
    :param img_rows: number of row in the image
    :param img_cols: number of columns in the image
    :param channels: number of color channels (e.g., 1 for MNIST)
    :param nb_filters: number of convolutional filters per layer
    :param nb_classes: the number of output classes
    :return:
    """
    model = Sequential()

    # Define the layers successively (convolution layers are version dependent)
    if keras.backend.image_dim_ordering() == 'th':
        input_shape = (channels, img_rows, img_cols)
    else:
        input_shape = (img_rows, img_cols, channels)

    layers = [conv_2d(nb_filters, (8, 8), (2, 2), "same",
                      input_shape=input_shape),
              Activation('relu'),
              conv_2d((nb_filters * 2), (6, 6), (2, 2), "valid"),
              Activation('relu'),
              conv_2d((nb_filters * 2), (5, 5), (1, 1), "valid"),
              Activation('relu'),
              Flatten(),
              Dense(nb_classes)]

    for layer in layers:
        model.add(layer)

    if logits:
        logits_tensor = model(input_ph)
    model.add(Activation('softmax'))

    if logits:
        return model, logits_tensor
    else:
        return model 
Example 64
Project: smach_based_introspection_framework   Author: birlrobotics   File: anomaly_model_generation.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def generate_model():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))

    x = Masking()(ip)
    x = LSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()

    # add load model code here to fine-tune

    return model 
Example 65
Project: smach_based_introspection_framework   Author: birlrobotics   File: anomaly_model_generation.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def generate_model_2():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))
    # stride = 10

    # x = Permute((2, 1))(ip)
    # x = Conv1D(MAX_NB_VARIABLES // stride, 8, strides=stride, padding='same', activation='relu', use_bias=False,
    #            kernel_initializer='he_uniform')(x)  # (None, variables / stride, timesteps)
    # x = Permute((2, 1))(x)

    #ip1 = K.reshape(ip,shape=(MAX_TIMESTEPS,MAX_NB_VARIABLES))
    #x = Permute((2, 1))(ip)
    x = Masking()(ip)
    x = AttentionLSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()

    # add load model code here to fine-tune

    return model 
Example 66
Project: swishnet   Author: i7p9h9   File: swishnet.py    MIT License 4 votes vote down vote up
def SwishNet(input_shape, classes, width_multiply=1):
    _x_in = layers.Input(shape=input_shape)

    # 1 block
    _x_up = __causal_gated_conv1D(filters=16 * width_multiply, length=3)(_x_in)
    _x_down = __causal_gated_conv1D(filters=16 * width_multiply, length=6)(_x_in)
    _x = layers.Concatenate()([_x_up, _x_down])

    # 2 block
    _x_up = __causal_gated_conv1D(filters=8 * width_multiply, length=3)(_x)
    _x_down = __causal_gated_conv1D(filters=8 * width_multiply, length=6)(_x)
    _x = layers.Concatenate()([_x_up, _x_down])

    # 3 block
    _x_up = __causal_gated_conv1D(filters=8 * width_multiply, length=3)(_x)
    _x_down = __causal_gated_conv1D(filters=8 * width_multiply, length=6)(_x)
    _x_concat = layers.Concatenate()([_x_up, _x_down])

    _x = layers.Add()([_x, _x_concat])

    # 4 block
    _x_loop1 = __causal_gated_conv1D(filters=16 * width_multiply, length=3, strides=3)(_x)
    _x = layers.Add()([_x, _x_loop1])

    # 5 block
    _x_loop2 = __causal_gated_conv1D(filters=16 * width_multiply, length=3, strides=2)(_x)
    _x = layers.Add()([_x, _x_loop2])

    # 6 block
    _x_loop3 = __causal_gated_conv1D(filters=16 * width_multiply, length=3, strides=2)(_x)
    _x = layers.Add()([_x, _x_loop3])

    # 7 block
    _x_forward = __causal_gated_conv1D(filters=16 * width_multiply, length=3, strides=2)(_x)

    # 8 block
    _x_loop4 = __causal_gated_conv1D(filters=32 * width_multiply, length=3, strides=2)(_x)

    # output
    _x = layers.Concatenate()([_x_loop2, _x_loop3, _x_forward, _x_loop4])
    _x = layers.Conv1D(filters=classes, kernel_size=1)(_x)
    _x = layers.GlobalAveragePooling1D()(_x)
    _x = layers.Activation("softmax")(_x)

    model = models.Model(inputs=_x_in, outputs=_x)

    return model 
Example 67
Project: ndparse   Author: neurodata   File: nddl.py    Apache License 2.0 4 votes vote down vote up
def ciresan_n3(n=65, nOutput=2):
    """An approximation of the N3 network from [1].
    Note that we also made a few small modifications along the way
    (from Theano to caffe and now to tensorflow/keras).

    As of this writing, no serious attempt has been made to optimize
    hyperparameters or structure of this network.

    Parameters:
       n : The tile size (diameter) to use in the sliding window.
           Tiles are assumed to be square, hence only one parameter.

    [1] Ciresan et al 'Deep neural networks segment neuronal membranes in
        electron microscopy images,' NIPS 2012.
    """

    from keras.optimizers import SGD
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, Activation, Flatten
    from keras.layers import Convolution2D, MaxPooling2D
    from keras.layers.normalization import BatchNormalization


    model = Sequential()

    # input: nxn images with 1 channel -> (1, n, n) tensors.
    # this applies 48 convolution filters of size 5x5 each.
    model.add(Convolution2D(48, 5, 5, border_mode='valid', dim_ordering='th', input_shape=(1, n, n)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
    model.add(BatchNormalization())  # note: we used LRN previously...

    model.add(Convolution2D(48, 5, 5))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
    model.add(BatchNormalization())  # note: we used LRN previously...
    #model.add(Dropout(0.25))

    model.add(Convolution2D(48, 5, 5, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))

    model.add(Flatten())
    # Note: Keras does automatic shape inference.
    model.add(Dense(200))
    model.add(Activation('relu'))
    #model.add(Dropout(0.5))

    model.add(Dense(nOutput))  # use 2 for binary classification
    model.add(Activation('softmax'))

    return model


#-------------------------------------------------------------------------------
#  Code for training a deep learning network
#------------------------------------------------------------------------------- 
Example 68
Project: kaggle-carvana-2017   Author: killthekitten   File: mobile_net_fixed.py    MIT License 4 votes vote down vote up
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1), name="conv1"):
    """Adds an initial convolution layer (with batch normalization and relu6).

    # Arguments
        inputs: Input tensor of shape `(rows, cols, 3)`
            (with `channels_last` data format) or
            (3, rows, cols) (with `channels_first` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 32.
            E.g. `(224, 224, 3)` would be one valid value.
        filters: Integer, the dimensionality of the output space
            (i.e. the number output of filters in the convolution).
        alpha: controls the width of the network.
            - If `alpha` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `alpha` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `alpha` = 1, default number of filters from the paper
                 are used at each layer.
        kernel: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
            Can be a single integer to specify the same value for
            all spatial dimensions.
        strides: An integer or tuple/list of 2 integers,
            specifying the strides of the convolution along the width and height.
            Can be a single integer to specify the same value for
            all spatial dimensions.
            Specifying any stride value != 1 is incompatible with specifying
            any `dilation_rate` value != 1.

    # Input shape
        4D tensor with shape:
        `(samples, channels, rows, cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(samples, rows, cols, channels)` if data_format='channels_last'.

    # Output shape
        4D tensor with shape:
        `(samples, filters, new_rows, new_cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
        `rows` and `cols` values might have changed due to stride.

    # Returns
        Output tensor of block.
    """
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    filters = int(filters * alpha)
    x = Conv2D(filters, kernel,
               padding='same',
               use_bias=False,
               strides=strides,
               name='conv1')(inputs)
    x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
    return Activation(relu6, name='conv1_relu')(x) 
Example 69
Project: kaggle-carvana-2017   Author: killthekitten   File: resnet50_fixed.py    MIT License 4 votes vote down vote up
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
    """A block that has a conv layer at shortcut.

    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of middle conv layer at main path
        filters: list of integers, the filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'keras.., current block label, used for generating layer names

    # Returns
        Output tensor for the block.

    Note that from stage 3, the first conv layer at main path is with strides=(2,2)
    And the shortcut should have strides=(2,2) as well
    """
    filters1, filters2, filters3 = filters
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(filters1, (1, 1), strides=strides,
               name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters2, kernel_size, padding='same',
               name=conv_name_base + '2b')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    shortcut = Conv2D(filters3, (1, 1), strides=strides,
                      name=conv_name_base + '1')(input_tensor)
    shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)

    x = layers.add([x, shortcut])
    x = Activation('relu')(x)
    return x 
Example 70
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 4 votes vote down vote up
def cnn_3d(input_shape):

    model = Sequential()
    model.add(Conv3D(16, kernel_size=(3, 3, 20), strides=(1, 1, 10), padding='valid', kernel_regularizer=l2(REG_lambda), input_shape=input_shape))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Conv3D(16, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 3)))

    model.add(Conv3D(32, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Conv3D(32, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 3)))

    model.add(Conv3D(64, kernel_size=(2, 2, 2), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Conv3D(64, kernel_size=(2, 2, 2), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2)))

    model.add(Flatten())
    model.add(Dense(128))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes, activation='softmax'))

    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model


# 2D-CNN model 
Example 71
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: res18_keras.py    MIT License 4 votes vote down vote up
def Res18():

    def ResBlock(x, in_f, out_f, stride=1, name="res"):
        res_x = Conv2D(out_f, [3, 3], strides=stride, padding='same', activation=None, name=name+"_conv1")(x)
        res_x = BatchNormalization(name=name+"_bn1")(res_x)
        res_x = Activation("relu")(res_x)

        res_x = Conv2D(out_f, [3, 3], strides=1, padding='same', activation=None, name=name+"_conv2")(res_x)
        res_x = BatchNormalization(name=name+"_bn2")(res_x)
        res_x = Activation("relu")(res_x)

        if in_f != out_f:
            x = Conv2D(out_f, [1, 1], strides=1, padding="same", activation=None, name=name+"_conv_sc")(x)
            x = BatchNormalization(name=name+"_bn_sc")(x)
            x = Activation("relu")(x)

        if stride == 2:
            x = MaxPooling2D([2, 2], strides=2, padding="same")(x)
        
        x = Add()([res_x, x])
        x = Activation("relu")(x)

        return x
        
    
    inputs = Input((img_height, img_width, channel))
    x = inputs
    
    x = Conv2D(64, [7, 7], strides=2, padding='same', activation=None, name="conv1")(x)
    x = BatchNormalization(name="bn1")(x)
    x = Activation("relu")(x)
    x = MaxPooling2D([3, 3], strides=2, padding='same')(x)

    x = ResBlock(x, 64, 64, name="res2_1")
    x = ResBlock(x, 64, 64, name="res2_2")

    x = ResBlock(x, 64, 128, stride=2, name="res3_1")
    x = ResBlock(x, 128, 128, name="res3_2")

    x = ResBlock(x, 128, 256, stride=2, name="res4_1")
    x = ResBlock(x, 256, 256, name="res4_2")

    x = ResBlock(x, 256, 512, stride=2, name="res5_1")
    x = ResBlock(x, 512, 512, name="res5_2")

    x = AveragePooling2D([7, 7], strides=1, padding='valid')(x)
    x = Flatten()(x)
    x = Dense(num_classes, activation='softmax', name="fc")(x)

    model = Model(inputs=inputs, outputs=x)

    return model 
Example 72
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: res101_keras.py    MIT License 4 votes vote down vote up
def Res101():

    def ResBlock(x, in_f, f_1, out_f, stride=1, name="res"):
        res_x = Conv2D(f_1, [1, 1], strides=stride, padding='same', activation=None, name=name+"_conv1")(x)
        res_x = BatchNormalization(name=name+"_bn1")(res_x)
        res_x = Activation("relu")(res_x)

        res_x = Conv2D(f_1, [3, 3], strides=1, padding='same', activation=None, name=name+"_conv2")(res_x)
        res_x = BatchNormalization(name=name+"_bn2")(res_x)
        res_x = Activation("relu")(res_x)

        res_x = Conv2D(out_f, [1, 1], strides=1, padding='same', activation=None, name=name+"_conv3")(res_x)
        res_x = BatchNormalization(name=name+"_bn3")(res_x)
        res_x = Activation("relu")(res_x)

        if in_f != out_f:
            x = Conv2D(out_f, [1, 1], strides=1, padding="same", activation=None, name=name+"_conv_sc")(x)
            x = BatchNormalization(name=name+"_bn_sc")(x)
            x = Activation("relu")(x)

        if stride == 2:
            x = MaxPooling2D([2, 2], strides=2, padding="same")(x)
        
        x = Add()([res_x, x])
        x = Activation("relu")(x)

        return x
        
    
    inputs = Input((img_height, img_width, channel))
    x = inputs
    
    x = Conv2D(64, [7, 7], strides=2, padding='same', activation=None, name="conv1")(x)
    x = BatchNormalization(name="bn1")(x)
    x = Activation("relu")(x)
    x = MaxPooling2D([3, 3], strides=2, padding='same')(x)

    x = ResBlock(x, 64, 64, 256, name="res2_1")
    x = ResBlock(x, 256, 64, 256, name="res2_2")
    x = ResBlock(x, 256, 64, 256, name="res2_3")

    x = ResBlock(x, 256, 128, 512, stride=2, name="res3_1")
    x = ResBlock(x, 512, 128, 512, name="res3_2")
    x = ResBlock(x, 512, 128, 512, name="res3_3")
    x = ResBlock(x, 512, 128, 512, name="res3_4")

    x = ResBlock(x, 512, 256, 1024, stride=2, name="res4_1")
    for i in range(22):
        x = ResBlock(x, 1024, 256, 1024, name="res4_{}".format(i+2))

    x = ResBlock(x, 1024, 512, 2048, stride=2, name="res5_1")
    x = ResBlock(x, 2048, 256, 2048, name="res5_2")
    x = ResBlock(x, 2048, 256, 2048, name="res5_3")

    x = AveragePooling2D([img_height // 32, img_width // 32], strides=1, padding='valid')(x)
    x = Flatten()(x)
    x = Dense(num_classes, activation='softmax', name="fc")(x)

    model = Model(inputs=inputs, outputs=x)

    return model 
Example 73
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: resNeXt101_keras.py    MIT License 4 votes vote down vote up
def ResNeXt101():

    def Block(x, in_f, f_1, out_f, stride=1, cardinality):
        res_x = Conv2D(f_1, [1, 1], strides=stride, padding='same', activation=None)(x)
        res_x = BatchNormalization()(res_x)
        res_x = Activation("relu")(res_x)

        multiplier = f_1 // cardinality
        res_x = SeparableConv2D(f_1, [3, 3], strides=1, padding='same', depth_multiplier=multiplier, activation=None)(res_x)
        res_x = BatchNormalization()(res_x)
        res_x = Activation("relu")(res_x)

        res_x = Conv2D(out_f, [1, 1], strides=1, padding='same', activation=None)(res_x)
        res_x = BatchNormalization()(res_x)
        res_x = Activation("relu")(res_x)

        if in_f != out_f:
            x = Conv2D(out_f, [1, 1], strides=1, padding="same", activation=None)(x)
            x = BatchNormalization()(x)
            x = Activation("relu")(x)

        if stride == 2:
            x = MaxPooling2D([2, 2], strides=2, padding="same")(x)
        
        x = Add()([res_x, x])

        return x
        
    
    inputs = Input((img_height, img_width, channel))
    x = inputs
    
    x = Conv2D(64, [7, 7], strides=2, padding='same', activation=None)(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)
    x = MaxPooling2D([3, 3], strides=2, padding='same')(x)

    x = Block(x, 64, 64, 256)
    x = Block(x, 256, 64, 256)
    x = Block(x, 256, 64, 256)

    x = Block(x, 256, 128, 512, stride=2)
    x = Block(x, 512, 128, 512)
    x = Block(x, 512, 128, 512)
    x = Block(x, 512, 128, 512)

    x = Block(x, 512, 256, 1024, stride=2)
    for i in range(22):
        x = Block(x, 1024, 256, 1024)

    x = Block(x, 1024, 512, 2048, stride=2)
    x = Block(x, 2048, 256, 2048)
    x = Block(x, 2048, 256, 2048)

    x = AveragePooling2D([img_height // 32, img_width // 32], strides=1, padding='valid')(x)
    x = Flatten()(x)
    x = Dense(num_classes, activation='softmax', name='out')(x)

    model = Model(inputs=inputs, outputs=[x])

    return model 
Example 74
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: res152_keras.py    MIT License 4 votes vote down vote up
def Res152():

    def ResBlock(x, in_f, f_1, out_f, stride=1, name="res"):
        res_x = Conv2D(f_1, [1, 1], strides=stride, padding='same', activation=None, name=name+"_conv1")(x)
        res_x = BatchNormalization(name=name+"_bn1")(res_x)
        res_x = Activation("relu")(res_x)

        res_x = Conv2D(f_1, [3, 3], strides=1, padding='same', activation=None, name=name+"_conv2")(res_x)
        res_x = BatchNormalization(name=name+"_bn2")(res_x)
        res_x = Activation("relu")(res_x)

        res_x = Conv2D(out_f, [1, 1], strides=1, padding='same', activation=None, name=name+"_conv3")(res_x)
        res_x = BatchNormalization(name=name+"_bn3")(res_x)
        res_x = Activation("relu")(res_x)

        if in_f != out_f:
            x = Conv2D(out_f, [1, 1], strides=1, padding="same", activation=None, name=name+"_conv_sc")(x)
            x = BatchNormalization(name=name+"_bn_sc")(x)
            x = Activation("relu")(x)

        if stride == 2:
            x = MaxPooling2D([2, 2], strides=2, padding="same")(x)
        
        x = Add()([res_x, x])
        x = Activation("relu")(x)

        return x
        
    
    inputs = Input((img_height, img_width, channel))
    x = inputs
    
    x = Conv2D(64, [7, 7], strides=2, padding='same', activation=None, name="conv1")(x)
    x = BatchNormalization(name="bn1")(x)
    x = Activation("relu")(x)
    x = MaxPooling2D([3, 3], strides=2, padding='same')(x)

    x = ResBlock(x, 64, 64, 256, name="res2_1")
    x = ResBlock(x, 256, 64, 256, name="res2_2")
    x = ResBlock(x, 256, 64, 256, name="res2_3")

    x = ResBlock(x, 256, 128, 512, stride=2, name="res3_1")
    for i in range(7):
        x = ResBlock(x, 512, 128, 512, name="res3_{}".format(i+2))

    x = ResBlock(x, 512, 256, 1024, stride=2, name="res4_1")
    for i in range(35):
        x = ResBlock(x, 1024, 256, 1024, name="res4_{}".format(i+2))

    x = ResBlock(x, 1024, 512, 2048, stride=2, name="res5_1")
    x = ResBlock(x, 2048, 256, 2048, name="res5_2")
    x = ResBlock(x, 2048, 256, 2048, name="res5_3")

    x = AveragePooling2D([img_height // 32, img_width // 32], strides=1, padding='valid')(x)
    x = Flatten()(x)
    x = Dense(num_classes, activation='softmax', name="fc")(x)

    model = Model(inputs=inputs, outputs=x)

    return model 
Example 75
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: cgan_cifar10_keras.py    MIT License 4 votes vote down vote up
def G_model():
    inputs = Input([100, ], name="x")
    con_x = Input([num_classes, ], name="con_x")
    con_x2 = Input([img_height, img_width, num_classes], name="con_x2")
    
    #con_x = K.zeros([None, num_classes, 1, 1])
    #print(con_x.shape)
    #con_x = np.zeros([len(_con_x), num_classes, 1, 1], dtype=np.float32)
    #con_x[np.arange(len(_con_x)), _con_x] = 1

    x = concatenate([inputs, con_x], axis=-1)
    
    in_h = int(img_height / 16)
    in_w = int(img_width / 16)
    d_dim = 256
    base = 128
    x = Dense(in_h * in_w * d_dim, name='g_dense1',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = Reshape((in_h, in_w, d_dim), input_shape=(d_dim * in_h * in_w,))(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_dense1_bn')(x)
    # 1/8
    x = Conv2DTranspose(base*4, (5, 5), name='g_conv1', padding='same', strides=(2,2),
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv1_bn')(x)
    # 1/4
    x = Conv2DTranspose(base*2, (5, 5), name='g_conv2', padding='same', strides=(2,2),
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv2_bn')(x)
    # 1/2
    x = Conv2DTranspose(base, (5, 5), name='g_conv3', padding='same', strides=(2,2),
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv3_bn')(x)
    # 1/1
    x = Conv2DTranspose(channel, (5, 5), name='g_out', padding='same', strides=(2,2),
        kernel_initializer=RN(mean=0.0, stddev=0.02),  bias_initializer=Constant())(x)
    x = Activation('tanh')(x)

    #con_x = np.zerns([len(_con_x), num_classes, img_height, img_width], dtype=np.float32)
    #con_x[np.arange(len(_con_x)), _con_x] = 1
    x2 = concatenate([x, con_x2], axis=-1)

    model = Model(inputs=[inputs, con_x, con_x2], outputs=[x], name='G')
    gan_g_model = Model(inputs=[inputs, con_x, con_x2], outputs=[x2], name='GAN_G')
    
    return model, gan_g_model 
Example 76
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: cgan_mnist_keras.py    MIT License 4 votes vote down vote up
def G_model():
    inputs = Input([100, ], name="x")
    con_x = Input([num_classes, ], name="con_x")
    con_x2 = Input([img_height, img_width, num_classes], name="con_x2")
    
    #con_x = K.zeros([None, num_classes, 1, 1])
    #print(con_x.shape)
    #con_x = np.zeros([len(_con_x), num_classes, 1, 1], dtype=np.float32)
    #con_x[np.arange(len(_con_x)), _con_x] = 1

    x = concatenate([inputs, con_x], axis=-1)
    
    in_h = int(img_height / 4)
    in_w = int(img_width / 4)
    d_dim = 256
    base = 128
    x = Dense(in_h * in_w * d_dim, name='g_dense1',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = Reshape((in_h, in_w, d_dim), input_shape=(d_dim * in_h * in_w,))(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_dense1_bn')(x)
    # 1/8
    #x = Conv2DTranspose(base*4, (5, 5), name='g_conv1', padding='same', strides=(2,2),
    #    kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    #x = Activation('relu')(x)
    #x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv1_bn')(x)
    # 1/4
    #x = Conv2DTranspose(base*2, (5, 5), name='g_conv2', padding='same', strides=(2,2),
    #    kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    #x = Activation('relu')(x)
    #x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv2_bn')(x)
    # 1/2
    x = Conv2DTranspose(base, (5, 5), name='g_conv3', padding='same', strides=(2,2),
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv3_bn')(x)
    # 1/1
    x = Conv2DTranspose(channel, (5, 5), name='g_out', padding='same', strides=(2,2),
        kernel_initializer=RN(mean=0.0, stddev=0.02),  bias_initializer=Constant())(x)
    x = Activation('tanh')(x)

    #con_x = np.zerns([len(_con_x), num_classes, img_height, img_width], dtype=np.float32)
    #con_x[np.arange(len(_con_x)), _con_x] = 1
    x2 = concatenate([x, con_x2], axis=-1)

    model = Model(inputs=[inputs, con_x], outputs=[x], name='G')
    gan_g_model = Model(inputs=[inputs, con_x, con_x2], outputs=[x2], name='GAN_G')
    
    return model, gan_g_model 
Example 77
Project: Keras-GAN   Author: eriklindernoren   File: srgan.py    MIT License 4 votes vote down vote up
def build_generator(self):

        def residual_block(layer_input, filters):
            """Residual block described in paper"""
            d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)
            d = Activation('relu')(d)
            d = BatchNormalization(momentum=0.8)(d)
            d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)
            d = BatchNormalization(momentum=0.8)(d)
            d = Add()([d, layer_input])
            return d

        def deconv2d(layer_input):
            """Layers used during upsampling"""
            u = UpSampling2D(size=2)(layer_input)
            u = Conv2D(256, kernel_size=3, strides=1, padding='same')(u)
            u = Activation('relu')(u)
            return u

        # Low resolution image input
        img_lr = Input(shape=self.lr_shape)

        # Pre-residual block
        c1 = Conv2D(64, kernel_size=9, strides=1, padding='same')(img_lr)
        c1 = Activation('relu')(c1)

        # Propogate through residual blocks
        r = residual_block(c1, self.gf)
        for _ in range(self.n_residual_blocks - 1):
            r = residual_block(r, self.gf)

        # Post-residual block
        c2 = Conv2D(64, kernel_size=3, strides=1, padding='same')(r)
        c2 = BatchNormalization(momentum=0.8)(c2)
        c2 = Add()([c2, c1])

        # Upsampling
        u1 = deconv2d(c2)
        u2 = deconv2d(u1)

        # Generate high resolution output
        gen_hr = Conv2D(self.channels, kernel_size=9, strides=1, padding='same', activation='tanh')(u2)

        return Model(img_lr, gen_hr) 
Example 78
Project: Github-Classifier   Author: Ichaelus   File: nnall.py    MIT License 4 votes vote down vote up
def __init__(self, text_corpus, filetype_corpus, filename_corpus, foldername_corpus, num_hidden_layers=1):
        ClassificationModule.__init__(self, "All NN", "A basic feedforward neural network with 3 hidden layers.\
        The used activation function is LeakyReLU. Trained with Adam-optimizer.\
        Features are metadata and all text-features except the repository-name.\
        The readme and description are both encoded by the same Tfidf-Vectorizer with a vocabulary of 7000 words.\
        Also the filetypes are encoded by such vectorizer, allowing encoding of 30 distinct filetypes.\
        The vectorizer for foldernames and filenames both distinguish 150 different words.")
        
        self.vectorizer = getTextVectorizer(7000) # Maximum of different columns
        self.filetypeVectorizer = getTextVectorizer(30) 
        self.foldernameVectorizer = getTextVectorizer(150)
        self.filenameVectorizer = getTextVectorizer(150) 

        # Vectorizer for descriptions and/or readmes
        corpus = []
        for text in text_corpus:
            corpus.append(process_text(text))
        self.vectorizer.fit(corpus)

        # Vectorizer for filetypes
        corpus = []
        for type in filetype_corpus:
            corpus.append(type)
        self.filetypeVectorizer.fit(corpus)

        # Vectorizer for filenames
        corpus = []
        for type in filename_corpus:
            corpus.append(type)
        self.filenameVectorizer.fit(corpus)

        # Vectorizer for foldernames
        corpus = []
        for folder in foldername_corpus:
            corpus.append(folder)
        self.foldernameVectorizer.fit(corpus)
        
        # Set input-size and output_size
        self.input_size = len(self.vectorizer.get_feature_names()) + getMetadataLength() + len(self.filetypeVectorizer.get_feature_names()) + len(self.foldernameVectorizer.get_feature_names()) + len(self.filenameVectorizer.get_feature_names())
        self.output_size = 7 # Hardcoded for 7 classes

        # Create model
        model = Sequential()
        # Add input-layer
        model.add(Dense(self.input_size, input_dim=self.input_size, init='uniform'))
        model.add(LeakyReLU())

        # Add hidden layers
        for _ in xrange(num_hidden_layers):
            model.add(Dense(self.input_size, init='uniform'))
            model.add(LeakyReLU())
        
        # Add output layer and normalize probablities with softmax
        model.add(Dense(self.output_size, init='uniform'))
        model.add(Activation('softmax'))

        # Compile model and use Adam as optimizer
        model.compile(metrics=['accuracy'], loss='categorical_crossentropy', optimizer=Adam())

        self.model = model
        print "\t-", self.name 
Example 79
Project: ismir2018-artist   Author: jongpillee   File: model.py    MIT License 4 votes vote down vote up
def model_basic(num_frame,num_sing):
	pos_anchor = Input(shape = (num_frame,128))

	# item model **audio**
	conv1 = Conv1D(128,4,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn1 = BatchNormalization()
	activ1 = Activation('relu')
	MP1 = MaxPool1D(pool_size=4)
	conv2 = Conv1D(128,4,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn2 = BatchNormalization()
	activ2 = Activation('relu')
	MP2 = MaxPool1D(pool_size=4)
	conv3 = Conv1D(128,4,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn3 = BatchNormalization()
	activ3 = Activation('relu')
	MP3 = MaxPool1D(pool_size=4)
	conv4 = Conv1D(128,2,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn4 = BatchNormalization()
	activ4 = Activation('relu')
	MP4 = MaxPool1D(pool_size=2)
	conv5 = Conv1D(256,1,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn5 = BatchNormalization()
	activ5 = Activation('relu')
	drop1 = Dropout(0.5)

	item_sem = GlobalAvgPool1D()
	
	# pos anchor
	pos_anchor_conv1 = conv1(pos_anchor)
	pos_anchor_bn1 = bn1(pos_anchor_conv1)
	pos_anchor_activ1 = activ1(pos_anchor_bn1)
	pos_anchor_MP1 = MP1(pos_anchor_activ1)
	pos_anchor_conv2 = conv2(pos_anchor_MP1)
	pos_anchor_bn2 = bn2(pos_anchor_conv2)
	pos_anchor_activ2 = activ2(pos_anchor_bn2)
	pos_anchor_MP2 = MP2(pos_anchor_activ2)
	pos_anchor_conv3 = conv3(pos_anchor_MP2)
	pos_anchor_bn3 = bn3(pos_anchor_conv3)
	pos_anchor_activ3 = activ3(pos_anchor_bn3)
	pos_anchor_MP3 = MP3(pos_anchor_activ3)
	pos_anchor_conv4 = conv4(pos_anchor_MP3)
	pos_anchor_bn4 = bn4(pos_anchor_conv4)
	pos_anchor_activ4 = activ4(pos_anchor_bn4)
	pos_anchor_MP4 = MP4(pos_anchor_activ4)
	pos_anchor_conv5 = conv5(pos_anchor_MP4)
	pos_anchor_bn5 = bn5(pos_anchor_conv5)
	pos_anchor_activ5 = activ5(pos_anchor_bn5)
	pos_anchor_sem = item_sem(pos_anchor_activ5)

	output = Dense(num_sing, activation='softmax')(pos_anchor_sem)
	model = Model(inputs = pos_anchor, outputs = output)
	return model 
Example 80
Project: dsl-char-cnn   Author: boknilev   File: cnn_multifilter_cv.py    MIT License 4 votes vote down vote up
def make_model(maxlen, alphabet_size, embedding_dims, embedding_droupout,
               nb_filters, filter_lengths, hidden_dims, fc_dropout, 
               num_classes):
    print('Build model...')
    main_input = Input(shape=(maxlen,))
    
    # we start off with an efficient embedding layer which maps
    # our vocab indices into embedding_dims dimensions
    embedding_layer = Embedding(alphabet_size,
                        embedding_dims,
                        input_length=maxlen,
                        dropout=embedding_droupout)
    embedded = embedding_layer(main_input)
    
    # we add a Convolution1D for each filter length, which will learn nb_filters[i]
    # word group filters of size filter_lengths[i]:
    convs = []
    for i in xrange(len(nb_filters)):
        conv_layer = Convolution1D(nb_filter=nb_filters[i],
                            filter_length=filter_lengths[i],
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1)
        conv_out = conv_layer(embedded)
        # we use max pooling:
        conv_out = MaxPooling1D(pool_length=conv_layer.output_shape[1])(conv_out)
        # We flatten the output of the conv layer,
        # so that we can concat all conv outpus and add a vanilla dense layer:
        conv_out = Flatten()(conv_out)
        convs.append(conv_out)
    
    # concat all conv outputs
    x = merge(convs, mode='concat') if len(convs) > 1 else convs[0]
    #concat = BatchNormalization()(concat)
    
    # We add a vanilla hidden layer:
    x = Dense(hidden_dims)(x)
    x = Dropout(fc_dropout)(x)
    x = Activation('relu')(x)
    
    # We project onto number of classes output layer, and squash it with a softmax:
    main_output = Dense(num_classes, activation='softmax')(x)
    
    # finally, define the model 
    model = Model(input=main_input, output=main_output)
    model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
    return model