Python keras.activations.relu() Examples

The following are code examples for showing how to use keras.activations.relu(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Jtyoui   Author: jtyoui   File: NER.py    MIT License 6 votes vote down vote up
def train_model():
    if cxl_model:
        embedding_matrix = load_embedding()
    else:
        embedding_matrix = {}
    train, label = vocab_train_label(train_path, vocab=vocab, tags=tag, max_chunk_length=length)
    n = np.array(label, dtype=np.float)
    labels = n.reshape((n.shape[0], n.shape[1], 1))
    model = Sequential([
        Embedding(input_dim=len(vocab), output_dim=300, mask_zero=True, input_length=length, weights=[embedding_matrix],
                  trainable=False),
        SpatialDropout1D(0.2),
        Bidirectional(layer=LSTM(units=150, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)),
        TimeDistributed(Dense(len(tag), activation=relu)),
    ])
    crf_ = CRF(units=len(tag), sparse_target=True)
    model.add(crf_)
    model.compile(optimizer=Adam(), loss=crf_.loss_function, metrics=[crf_.accuracy])
    model.fit(x=np.array(train), y=labels, batch_size=16, epochs=4, callbacks=[RemoteMonitor()])
    model.save(model_path) 
Example 2
Project: CAPTCHA-breaking   Author: lllcho   File: test_activations.py    MIT License 6 votes vote down vote up
def test_relu():
    '''
    Relu implementation doesn't depend on the value being
    a theano variable. Testing ints, floats and theano tensors.
    '''

    from keras.activations import relu as r

    assert r(5) == 5
    assert r(-5) == 0
    assert r(-0.1) == 0
    assert r(0.1) == 0.1

    x = T.vector()
    exp = r(x)
    f = theano.function([x], exp)

    test_values = get_standard_values()
    result = f(test_values)

    list_assert_equal(result, test_values) # because no negatives in test values 
Example 3
Project: Music_Generation   Author: llSourcell   File: simple-generative-model-regressor.py    GNU General Public License v3.0 6 votes vote down vote up
def wavenetBlock(n_atrous_filters, atrous_filter_size, atrous_rate,
                 n_conv_filters, conv_filter_size):
    def f(input_):
        residual = input_
        tanh_out = AtrousConvolution1D(n_atrous_filters, atrous_filter_size,
                                       atrous_rate=atrous_rate,
                                       border_mode='same',
                                       activation='tanh')(input_)
        sigmoid_out = AtrousConvolution1D(n_atrous_filters, atrous_filter_size,
                                          atrous_rate=atrous_rate,
                                          border_mode='same',
                                          activation='sigmoid')(input_)
        merged = merge([tanh_out, sigmoid_out], mode='mul')
        skip_out = Convolution1D(1, 1, activation='relu', border_mode='same')(merged)
        out = merge([skip_out, residual], mode='sum')
        return out, skip_out
    return f 
Example 4
Project: Music_Generation   Author: llSourcell   File: simple-generative-model-regressor.py    GNU General Public License v3.0 6 votes vote down vote up
def get_basic_generative_model(input_size):
    input = Input(shape=(1, input_size, 1))
    l1a, l1b = wavenetBlock(10, 5, 2, 1, 3)(input)
    l2a, l2b = wavenetBlock(1, 2, 4, 1, 3)(l1a)
    l3a, l3b = wavenetBlock(1, 2, 8, 1, 3)(l2a)
    l4a, l4b = wavenetBlock(1, 2, 16, 1, 3)(l3a)
    l5a, l5b = wavenetBlock(1, 2, 32, 1, 3)(l4a)
    l6 = merge([l1b, l2b, l3b, l4b, l5b], mode='sum')
    l7 = Lambda(relu)(l6)
    l8 = Convolution2D(1, 1, 1, activation='relu')(l7)
    l9 = Convolution2D(1, 1, 1)(l8)
    l10 = Flatten()(l9)
    l11 = Dense(1, activation='tanh')(l10)
    model = Model(input=input, output=l11)
    model.compile(loss='mse', optimizer='rmsprop', metrics=['accuracy'])
    model.summary()
    return model 
Example 5
Project: face-swap   Author: 5agado   File: gan.py    Apache License 2.0 6 votes vote down vote up
def conv(filters, kernel_size=5, strides=2, leaky_relu=False, batch_norm=False):
    def block(x):
        x = Conv2D(filters, kernel_size=kernel_size, strides=strides,
                   kernel_initializer=conv_init,
                   use_bias=False,
                   padding='same')(x)
        if batch_norm:
            x = BatchNormalization()(x)
        if leaky_relu:
            x = LeakyReLU(0.2)(x)
        else:
            x = Activation("relu")(x)
        return x
    return block


# Standard feed-forward CNN with skip connections that bypass the convolution layers
# ref: http://torch.ch/blog/2016/02/04/resnets.html 
Example 6
Project: keras-wavenet   Author: usernaamee   File: simple-generative-model-regressor.py    GNU General Public License v3.0 6 votes vote down vote up
def wavenetBlock(n_atrous_filters, atrous_filter_size, atrous_rate,
                 n_conv_filters, conv_filter_size):
    def f(input_):
        residual = input_
        tanh_out = AtrousConvolution1D(n_atrous_filters, atrous_filter_size,
                                       atrous_rate=atrous_rate,
                                       border_mode='same',
                                       activation='tanh')(input_)
        sigmoid_out = AtrousConvolution1D(n_atrous_filters, atrous_filter_size,
                                          atrous_rate=atrous_rate,
                                          border_mode='same',
                                          activation='sigmoid')(input_)
        merged = merge([tanh_out, sigmoid_out], mode='mul')
        skip_out = Convolution1D(1, 1, activation='relu', border_mode='same')(merged)
        out = merge([skip_out, residual], mode='sum')
        return out, skip_out
    return f 
Example 7
Project: keras-wavenet   Author: usernaamee   File: simple-generative-model-regressor.py    GNU General Public License v3.0 6 votes vote down vote up
def get_basic_generative_model(input_size):
    input = Input(shape=(1, input_size, 1))
    l1a, l1b = wavenetBlock(10, 5, 2, 1, 3)(input)
    l2a, l2b = wavenetBlock(1, 2, 4, 1, 3)(l1a)
    l3a, l3b = wavenetBlock(1, 2, 8, 1, 3)(l2a)
    l4a, l4b = wavenetBlock(1, 2, 16, 1, 3)(l3a)
    l5a, l5b = wavenetBlock(1, 2, 32, 1, 3)(l4a)
    l6 = merge([l1b, l2b, l3b, l4b, l5b], mode='sum')
    l7 = Lambda(relu)(l6)
    l8 = Convolution2D(1, 1, 1, activation='relu')(l7)
    l9 = Convolution2D(1, 1, 1)(l8)
    l10 = Flatten()(l9)
    l11 = Dense(1, activation='tanh')(l10)
    model = Model(input=input, output=l11)
    model.compile(loss='mse', optimizer='rmsprop', metrics=['accuracy'])
    model.summary()
    return model 
Example 8
Project: experiments   Author: Octavian-ai   File: adjacency_layer.py    MIT License 6 votes vote down vote up
def call_dot_softmax(self, x):
		pr = self.product
		pe = self.person

		pr = K.softmax(self.product)
		pe = K.softmax(self.person)

		m = K.dot(pr, K.transpose(pe))
		m = (self.w3 * m) + self.b3
		m = K.relu(m, alpha=0.1)

		m = m * x

		return m

	# 100pc test accuracy 
Example 9
Project: experiments   Author: Octavian-ai   File: adjacency_layer.py    MIT License 6 votes vote down vote up
def call_dense_conv(self, x):
		self.jitter(idx=[0,1])

		pr = self.product
		pe = self.person

		pr = K.softmax(pr)
		pe = K.softmax(pe)

		all_pairs = self.cartesian_product_matrix(pr, pe)

		flat = K.reshape(all_pairs, (self.product_count * self.person_count * self.style_width, 2))
		m = K.dot(flat, self.wc1)
		m = K.tanh(m)

		m = K.reshape(m, (self.product_count * self.person_count, self.style_width))
		m = K.dot(m, self.w2)
		m = K.relu(m, alpha=0.1)

		m = K.reshape(m, (1, self.product_count, self.person_count))
		masked = m * x
		return masked 
Example 10
Project: Deep-Segmentation   Author: ameya005   File: deconv.py    MIT License 6 votes vote down vote up
def __init__(self, model,  num_classes=2,r=5):
        #self.model = vgg16.VGG16()
        ##Freezing the initial convolutional layers
        for layer in self.model.layers:
            layer.trainable=False

        #Adding 4 convolutional layers as given in the paper : (May need to modify the #filters
        self.model.add(Conv2D(1024, 3, 3, activation='relu'))
        self.model.add(Conv2D(512,3,3,activation='relu'))
        self.model.add(Conv2D(256,3,3,activation='relu'))
        #The following layer outputs probability segmentation maps. 
        self.model.add(Conv2D(num_classes,3,3,activation='linear'))
        #New layer to be added here for the softmax aggregation.
        #The Log Sum Exp layer follows the follwoing aggregation rule:
        # s^k = (1/r) * log ( (1/(h_0*w_0) ) * \sigma(exp(r*(s_ij)^k)))
        # r = 5 for smoothness for now.
        self.model.add(Lambda(LogSumExp, output_shape=logsum_out,
                              arguments={'r':r,'num_classes'=num_classes})) 
Example 11
Project: CNNArt   Author: thomaskuestner   File: MNetArt.py    Apache License 2.0 6 votes vote down vote up
def fCreateMNet_Block(input_t, channels, kernel_size=(3, 3), type=1, forwarding=True, l1_reg=0.0, l2_reg=1e-6):
    tower_t = Conv2D(channels,
                     kernel_size=kernel_size,
                     kernel_initializer='he_normal',
                     weights=None,
                     padding='same',
                     strides=(1, 1),
                     kernel_regularizer=l1_l2(l1_reg, l2_reg),
                     )(input_t)
    tower_t = Activation('relu')(tower_t)
    for counter in range(1, type):
        tower_t = Conv2D(channels,
                         kernel_size=kernel_size,
                         kernel_initializer='he_normal',
                         weights=None,
                         padding='same',
                         strides=(1, 1),
                         kernel_regularizer=l1_l2(l1_reg, l2_reg),
                         )(tower_t)
        tower_t = Activation('relu')(tower_t)
    if (forwarding):
        tower_t = concatenate([tower_t, input_t], axis=1)
    return tower_t 
Example 12
Project: CNNArt   Author: thomaskuestner   File: motion_MNetArt.py    Apache License 2.0 6 votes vote down vote up
def fCreateMNet_Block(input_t, channels, kernel_size=(3,3), type=1, forwarding=True,l1_reg=0.0, l2_reg=1e-6 ):
    tower_t = Conv2D(channels,
                     kernel_size=kernel_size,
                     kernel_initializer='he_normal',
                     weights=None,
                     padding='same',
                     strides=(1, 1),
                     kernel_regularizer=l1_l2(l1_reg, l2_reg),
                     )(input_t)
    tower_t = Activation('relu')(tower_t)
    for counter in range(1, type):
        tower_t = Conv2D(channels,
                         kernel_size=kernel_size,
                         kernel_initializer='he_normal',
                         weights=None,
                         padding='same',
                         strides=(1, 1),
                         kernel_regularizer=l1_l2(l1_reg, l2_reg),
                         )(tower_t)
        tower_t = Activation('relu')(tower_t)
    if (forwarding):
        tower_t = concatenate([tower_t, input_t], axis=1)
    return tower_t 
Example 13
Project: market-analysis-system   Author: terentjew-alexey   File: models.py    MIT License 6 votes vote down vote up
def simple_model(input_shape, nb_output, act='linear'):
    """Simple model for RL.
        
    Returns
        model (keras.Model): Model of neural network."""

    model = Sequential()
    # model.add(Reshape((input_shape[1], input_shape[2]),
    #                     batch_input_shape=(None, 1, input_shape[0], input_shape[1], input_shape[2])))
    # model.add(BatchNormalization())
    model.add(BatchNormalization(batch_input_shape=(None, input_shape[0], input_shape[1])))
    model.add(LSTM(input_shape[1] * input_shape[2]))
    model.add(Activation('relu'))
    model.add(Dropout(0.4))
    model.add(Dense(64))
    model.add(Activation('relu'))
    model.add(Dropout(0.4))
    model.add(Dense(32))
    model.add(Activation('relu'))
    model.add(Dropout(0.4))
    model.add(Dense(nb_output, activation=act))

    return model 
Example 14
Project: talos   Author: autonomio   File: params.py    MIT License 6 votes vote down vote up
def iris():

    from keras.optimizers import Adam, Nadam
    from keras.losses import logcosh, categorical_crossentropy
    from keras.activations import relu, elu, softmax

    # here use a standard 2d dictionary for inputting the param boundaries
    p = {'lr': (0.5, 5, 10),
         'first_neuron': [4, 8, 16, 32, 64],
         'hidden_layers': [0, 1, 2, 3, 4],
         'batch_size': (2, 30, 10),
         'epochs': [2],
         'dropout': (0, 0.5, 5),
         'weight_regulizer': [None],
         'emb_output_dims':  [None],
         'shapes': ['brick', 'triangle', 0.2],
         'optimizer': [Adam, Nadam],
         'losses': [logcosh, categorical_crossentropy],
         'activation': [relu, elu],
         'last_activation': [softmax]}

    return p 
Example 15
Project: talos   Author: autonomio   File: params.py    MIT License 6 votes vote down vote up
def breast_cancer():

    from keras.optimizers import Adam, Nadam, RMSprop
    from keras.losses import logcosh, binary_crossentropy
    from keras.activations import relu, elu, sigmoid

    # then we can go ahead and set the parameter space
    p = {'lr': (0.5, 5, 10),
         'first_neuron': [4, 8, 16, 32, 64],
         'hidden_layers': [0, 1, 2],
         'batch_size': (2, 30, 10),
         'epochs': [50, 100, 150],
         'dropout': (0, 0.5, 5),
         'shapes': ['brick', 'triangle', 'funnel'],
         'optimizer': [Adam, Nadam, RMSprop],
         'losses': [logcosh, binary_crossentropy],
         'activation': [relu, elu],
         'last_activation': [sigmoid]}

    return p 
Example 16
Project: Jtyoui   Author: jtyoui   File: HandWritingRecognition.py    MIT License 5 votes vote down vote up
def cnn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train.reshape(-1, 28, 28, 1) / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)

    model = Sequential([
        # input_shape:输入平面,就在第一个位置设置
        # filters:卷积核、滤波器
        # kernel_size:卷积核大小
        # strides:步长
        # padding有两种方式:same/valid
        # activation:激活函数
        Convolution2D(input_shape=(28, 28, 1), filters=32, kernel_size=5, strides=1, padding='same', activation=relu),
        MaxPool2D(pool_size=2, strides=2, padding='same'),
        Convolution2D(filters=64, kernel_size=5, padding='same', activation=relu),
        MaxPool2D(pool_size=2, trainable=2, padding='same'),
        Flatten(),  # 扁平化
        Dense(units=1024, activation=relu),
        Dropout(0.5),
        Dense(units=10, activation=softmax),
    ])
    opt = Adam(lr=1e-4)
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['accuracy'])
    model.fit(x=x_train, y=y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
Example 17
Project: dep-gan-im   Author: febrianrachmadi   File: RLD44-depgan-im-flair-noSL.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def dense_bn_relu(units=32, padding='same', name_to_concat='', input_tensor=None):
    tensor = Dense(units, kernel_initializer='he_normal', name=str('dense_'+name_to_concat))(input_tensor)
    tensor = BatchNormalization(name=str('dense_bn_'+name_to_concat))(tensor)
    tensor = Activation('relu', name=str('dense_relu_'+name_to_concat))(tensor)

    return tensor 
Example 18
Project: dep-gan-im   Author: febrianrachmadi   File: RLD44-depgan-im-flair-noSL.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def conv1d_bn_relu(filter_size=3, filter_number=32, padding='same', name_to_concat='', input_tensor=None):
    tensor = Conv1D(filter_number, filter_size, padding=padding, name=str('conv1d_'+name_to_concat))(input_tensor)
    tensor = BatchNormalization(name=str('bn_'+name_to_concat))(tensor)
    tensor = Activation('relu', name=str('relu_'+name_to_concat))(tensor)

    return tensor 
Example 19
Project: dep-gan-im   Author: febrianrachmadi   File: RLD44-depgan-im-flair-noSL.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def conv2d_bn_relu(filter_size=3, filter_number=32, padding='same', name_to_concat='', input_tensor=None):
    tensor = Conv2D(filter_number, (filter_size, filter_size), padding=padding, name=str('conv2d_'+name_to_concat))(input_tensor)
    tensor = BatchNormalization(name=str('bn_'+name_to_concat))(tensor)
    tensor = Activation('relu', name=str('relu_'+name_to_concat))(tensor)

    return tensor 
Example 20
Project: dep-gan-im   Author: febrianrachmadi   File: RLD44-depgan-im-flair-noSL.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def conv2d_relu(filter_size=3, filter_number=16, padding='same', name_to_concat='', input_tensor=None):
    tensor = Conv2D(filter_number, (filter_size, filter_size), padding=padding, name=str('conv2d_'+name_to_concat))(input_tensor)
    tensor = Activation('relu', name=str('relu_'+name_to_concat))(tensor)

    return tensor 
Example 21
Project: dep-gan-im   Author: febrianrachmadi   File: RLD44-depgan-im-flair-noSL.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def deconv2d_bn_relu(filter_size=4, filter_number=16, padding='valid', name_to_concat='', input_tensor=None):
    tensor = Conv2DTranspose(filter_number, (filter_size, filter_size), strides=(2, 2), padding=padding, name=str('deconv2d_'+name_to_concat))(input_tensor)
    tensor = BatchNormalization(name=str('bn_'+name_to_concat))(tensor)
    tensor = Activation('relu', name=str('relu_'+name_to_concat))(tensor)

    return tensor

# In[5]: 
Example 22
Project: dep-gan-im   Author: febrianrachmadi   File: RLD441-depgan-twoCritics-im-flair-noSL.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def conv1d_bn_relu(filter_size=3, filter_number=32, padding='same', name_to_concat='', input_tensor=None):
    tensor = Conv1D(filter_number, filter_size, padding=padding, name=str('conv1d_'+name_to_concat))(input_tensor)
    tensor = BatchNormalization(name=str('bn_'+name_to_concat))(tensor)
    tensor = Activation('relu', name=str('relu_'+name_to_concat))(tensor)

    return tensor 
Example 23
Project: dep-gan-im   Author: febrianrachmadi   File: RLD441-depgan-twoCritics-im-flair-noSL.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def conv2d_bn_relu(filter_size=3, filter_number=32, padding='same', name_to_concat='', input_tensor=None):
    tensor = Conv2D(filter_number, (filter_size, filter_size), padding=padding, name=str('conv2d_'+name_to_concat))(input_tensor)
    tensor = BatchNormalization(name=str('bn_'+name_to_concat))(tensor)
    tensor = Activation('relu', name=str('relu_'+name_to_concat))(tensor)

    return tensor 
Example 24
Project: dep-gan-im   Author: febrianrachmadi   File: RLD441-depgan-twoCritics-im-flair-noSL.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def conv2d_relu(filter_size=3, filter_number=16, padding='same', name_to_concat='', input_tensor=None):
    tensor = Conv2D(filter_number, (filter_size, filter_size), padding=padding, name=str('conv2d_'+name_to_concat))(input_tensor)
    tensor = Activation('relu', name=str('relu_'+name_to_concat))(tensor)

    return tensor 
Example 25
Project: dep-gan-im   Author: febrianrachmadi   File: RLD441-depgan-twoCritics-im-flair-noSL.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def deconv2d_bn_relu(filter_size=4, filter_number=16, padding='valid', name_to_concat='', input_tensor=None):
    tensor = Conv2DTranspose(filter_number, (filter_size, filter_size), strides=(2, 2), padding=padding, name=str('deconv2d_'+name_to_concat))(input_tensor)
    tensor = BatchNormalization(name=str('bn_'+name_to_concat))(tensor)
    tensor = Activation('relu', name=str('relu_'+name_to_concat))(tensor)

    return tensor


# In[5]: 
Example 26
Project: TCFPN-ISBA   Author: Zephyr-D   File: tf_models.py    MIT License 5 votes vote down vote up
def temporal_convs_linear(n_nodes, conv_len, n_classes, n_feat, max_len,
                          causal=False, loss='categorical_crossentropy',
                          optimizer='adam', return_param_str=False):
    """ Used in paper:
    Segmental Spatiotemporal CNNs for Fine-grained Action Segmentation
    Lea et al. ECCV 2016

    Note: Spatial dropout was not used in the original paper.
    It tends to improve performance a little.
    """

    inputs = Input(shape=(max_len, n_feat))
    if causal: model = ZeroPadding1D((conv_len // 2, 0))(model)
    model = Conv1D(n_nodes, conv_len, input_dim=n_feat, input_length=max_len, padding='same',
                   activation='relu')(inputs)
    if causal: model = Cropping1D((0, conv_len // 2))(model)

    model = SpatialDropout1D(0.3)(model)

    model = TimeDistributed(Dense(n_classes, activation="softmax"))(model)

    model = Model(input=inputs, output=model)
    model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal")

    if return_param_str:
        param_str = "tConv_C{}".format(conv_len)
        if causal:
            param_str += "_causal"

        return model, param_str
    else:
        return model 
Example 27
Project: Generative-Adversarial-Networks-Cookbook   Author: PacktPublishing   File: discriminator.py    MIT License 5 votes vote down vote up
def model(self):
        input_layer = Input(shape=self.SHAPE)
        x = Convolution2D(96,3,3, subsample=(2,2), border_mode='same',activation='relu')(input_layer)
        x = Convolution2D(64,3,3, subsample=(2,2), border_mode='same',activation='relu')(x)
        x = MaxPooling2D(pool_size=(3,3),border_mode='same')(x)
        x = Convolution2D(32,3,3, subsample=(1,1), border_mode='same',activation='relu')(x)
        x = Convolution2D(32,1,1, subsample=(1,1), border_mode='same',activation='relu')(x)
        x = Convolution2D(2,1,1, subsample=(1,1), border_mode='same',activation='relu')(x)
        output_layer = Reshape((-1,2))(x)
        return Model(input_layer,output_layer) 
Example 28
Project: workspace_2017   Author: nwiizo   File: test_activations.py    MIT License 5 votes vote down vote up
def test_relu():
    '''
    Relu implementation doesn't depend on the value being
    a theano variable. Testing ints, floats and theano tensors.
    '''
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.relu(x)])

    test_values = get_standard_values()
    result = f([test_values])[0]

    # because no negatives in test values
    assert_allclose(result, test_values, rtol=1e-05) 
Example 29
Project: applications   Author: geomstats   File: activations_test.py    MIT License 5 votes vote down vote up
def test_serialization():
    all_activations = ['softmax', 'relu', 'elu', 'tanh',
                       'sigmoid', 'hard_sigmoid', 'linear',
                       'softplus', 'softsign', 'selu']
    for name in all_activations:
        fn = activations.get(name)
        ref_fn = getattr(activations, name)
        assert fn == ref_fn
        config = activations.serialize(fn)
        fn = activations.deserialize(config)
        assert fn == ref_fn 
Example 30
Project: applications   Author: geomstats   File: activations_test.py    MIT License 5 votes vote down vote up
def test_relu():
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.relu(x)])

    test_values = get_standard_values()
    result = f([test_values])[0]
    assert_allclose(result, test_values, rtol=1e-05) 
Example 31
Project: experiments   Author: Octavian-ai   File: adjacency_layer.py    MIT License 5 votes vote down vote up
def __init__(self, person_count, product_count, style_width, **kwargs):
		self.person_count = person_count
		self.product_count = product_count
		self.style_width = style_width
		self.dense1 = layers.Dense(units=(style_width), activation=activations.softplus, use_bias=False, kernel_regularizer=Clip)
		#self.dense2 = layers.(units=(1), activation=activations.linear)
		self.dense3 = layers.Dense(units=1, activation=partial(activations.relu, alpha=0.1), use_bias=False, kernel_regularizer=Clip)
		super(Adjacency, self).__init__(**kwargs) 
Example 32
Project: experiments   Author: Octavian-ai   File: adjacency_layer.py    MIT License 5 votes vote down vote up
def call_dense(self, x):
		self.jitter(idx=[0,1], var=0.1)

		pr = self.product
		pe = self.person

		pr = K.softmax(pr)
		pe = K.softmax(pe)

		all_pairs = self.cartesian_product_matrix(pr, pe)
		flat = K.reshape(all_pairs, (self.product_count * self.person_count, self.style_width * 2))

		m = K.dot(flat, self.w1)
		# m = K.bias_add(m, self.b1)
		m = K.relu(m, alpha=0.1)

		m = K.dropout(m, level=0.1)

		m = K.dot(m, self.w2)
		m = K.relu(m, alpha=0.1)

		m = K.reshape(m, (1, self.product_count, self.person_count))
		masked = m * x
		return masked



	# 100pc test accuracy 
Example 33
Project: CNNArt   Author: thomaskuestner   File: CNN3DmoreLayers.py    Apache License 2.0 5 votes vote down vote up
def fGetActivation(input_t, iPReLU=0):
    init = 0.25
    if iPReLU == 1:  # one alpha for each channel
        output_t = PReLU(alpha_initializer=Constant(value=init), shared_axes=[2, 3, 4])(input_t)
    elif iPReLU == 2:  # just one alpha for each layer
        output_t = PReLU(alpha_initializer=Constant(value=init), shared_axes=[2, 3, 4, 1])(input_t)
    else:
        output_t = Activation('relu')(input_t)
    return output_t 
Example 34
Project: CNNArt   Author: thomaskuestner   File: CNN3D.py    Apache License 2.0 5 votes vote down vote up
def fGetActivation(input_t, iPReLU=0):
    init = 0.25
    if iPReLU == 1:  # one alpha for each channel
        output_t = PReLU(alpha_initializer=Constant(value=init), shared_axes=[2, 3, 4])(input_t)
    elif iPReLU == 2:  # just one alpha for each layer
        output_t = PReLU(alpha_initializer=Constant(value=init), shared_axes=[2, 3, 4, 1])(input_t)
    else:
        output_t = Activation('relu')(input_t)
    return output_t 
Example 35
Project: CNNArt   Author: thomaskuestner   File: VNetArt.py    Apache License 2.0 5 votes vote down vote up
def fGetActivation(input_t, iPReLU=0):
    init = 0.25
    if iPReLU == 1:  # one alpha for each channel
        output_t = PReLU(alpha_initializer=Constant(value=init), shared_axes=[2, 3, 4])(input_t)
    elif iPReLU == 2:  # just one alpha for each layer
        output_t = PReLU(alpha_initializer=Constant(value=init), shared_axes=[2, 3, 4, 1])(input_t)
    else:
        output_t = Activation('relu')(input_t)
    return output_t 
Example 36
Project: CNNArt   Author: thomaskuestner   File: 3D_CNN.py    Apache License 2.0 5 votes vote down vote up
def fGetActivation(input_t, iPReLU=0):
    init = 0.25
    if iPReLU == 1:  # one alpha for each channel
        output_t = PReLU(alpha_initializer=Constant(value=init), shared_axes=[2, 3, 4])(input_t)
    elif iPReLU == 2:  # just one alpha for each layer
        output_t = PReLU(alpha_initializer=Constant(value=init), shared_axes=[2, 3, 4, 1])(input_t)
    else:
        output_t = Activation('relu')(input_t)
    return output_t 
Example 37
Project: CNNArt   Author: thomaskuestner   File: VNetArt.py    Apache License 2.0 5 votes vote down vote up
def fGetActivation(input_t, iPReLU=0):
    init = 0.25
    if iPReLU == 1:  # one alpha for each channel
        output_t = PReLU(alpha_initializer=Constant(value=init), shared_axes=[2, 3, 4])(input_t)
    elif iPReLU == 2:  # just one alpha for each layer
        output_t = PReLU(alpha_initializer=Constant(value=init), shared_axes=[2, 3, 4, 1])(input_t)
    else:
        output_t = Activation('relu')(input_t)
    return output_t 
Example 38
Project: CNNArt   Author: thomaskuestner   File: motion_CNN3DmoreLayers.py    Apache License 2.0 5 votes vote down vote up
def fGetActivation(input_t,  iPReLU=0):
    init=0.25
    if iPReLU == 1:  # one alpha for each channel
        output_t = PReLU(alpha_initializer=Constant(value=init), shared_axes=[2, 3, 4])(input_t)
    elif iPReLU == 2:  # just one alpha for each layer
        output_t = PReLU(alpha_initializer=Constant(value=init), shared_axes=[2, 3, 4, 1])(input_t)
    else:
        output_t = Activation('relu')(input_t)
    return output_t 
Example 39
Project: CNNArt   Author: thomaskuestner   File: motion_all_CNN2D_multiscale.py    Apache License 2.0 5 votes vote down vote up
def fConveBlock(conv_input,l1_reg=0.0, l2_reg=1e-6, dr_rate=0):
    Kernels = fgetKernels()
    Strides = fgetStrides()
    KernelNumber = fgetKernelNumber()
    # All parameters about kernels and so on are identical with original 2DCNN
    drop_out_1 = Dropout(dr_rate)(conv_input)
    conve_out_1 = Conv2D(KernelNumber[0],
                   kernel_size=Kernels[0],
                   kernel_initializer='he_normal',
                   weights=None,
                   padding='valid',
                   strides=Strides[0],
                   kernel_regularizer=l1_l2(l1_reg, l2_reg)
                   )(drop_out_1)
    # input shape : 1 means grayscale... richtig uebergeben...
    active_out_1 = Activation('relu')(conve_out_1)

    drop_out_2 = Dropout(dr_rate)(active_out_1)
    conve_out_2 = Conv2D(KernelNumber[1],  # learning rate: 0.1 -> 76%
                   kernel_size=Kernels[1],
                   kernel_initializer='he_normal',
                   weights=None,
                   padding='valid',
                   strides=Strides[1],
                   kernel_regularizer=l1_l2(l1_reg, l2_reg)
                   )(drop_out_2)
    active_out_2 = Activation('relu')(conve_out_2)

    drop_out_3 = Dropout(dr_rate)(active_out_2)
    conve_out_3 = Conv2D(KernelNumber[2],  # learning rate: 0.1 -> 76%
                   kernel_size=Kernels[2],
                   kernel_initializer='he_normal',
                   weights=None,
                   padding='valid',
                   strides=Strides[2],
                   kernel_regularizer=l1_l2(l1_reg, l2_reg)
                         )(drop_out_3)
    active_out_3 = Activation('relu')(conve_out_3)
    return active_out_3 
Example 40
Project: CNNArt   Author: thomaskuestner   File: motion_VNetArt.py    Apache License 2.0 5 votes vote down vote up
def fGetActivation(input_t,  iPReLU=0):
    init=0.25
    if iPReLU == 1:  # one alpha for each channel
        output_t = PReLU(alpha_initializer=Constant(value=init), shared_axes=[2, 3, 4])(input_t)
    elif iPReLU == 2:  # just one alpha for each layer
        output_t = PReLU(alpha_initializer=Constant(value=init), shared_axes=[2, 3, 4, 1])(input_t)
    else:
        output_t = Activation('relu')(input_t)
    return output_t 
Example 41
Project: ECG_Heartbeat_Classification   Author: CVxTz   File: baseline_ptbdb_transfer_fullupdate.py    MIT License 5 votes vote down vote up
def get_model():
    nclass = 1
    inp = Input(shape=(187, 1))
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp)
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
    dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
    dense_1 = Dense(nclass, activation=activations.sigmoid, name="dense_3_ptbdb")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt, loss=losses.binary_crossentropy, metrics=['acc'])
    model.summary()
    return model 
Example 42
Project: ECG_Heartbeat_Classification   Author: CVxTz   File: baseline_ptbdb.py    MIT License 5 votes vote down vote up
def get_model():
    nclass = 1
    inp = Input(shape=(187, 1))
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp)
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
    dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
    dense_1 = Dense(nclass, activation=activations.sigmoid, name="dense_3_ptbdb")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt, loss=losses.binary_crossentropy, metrics=['acc'])
    model.summary()
    return model 
Example 43
Project: ECG_Heartbeat_Classification   Author: CVxTz   File: baseline_ptbdb_transfer_freeze.py    MIT License 5 votes vote down vote up
def get_model():
    nclass = 1
    inp = Input(shape=(187, 1))
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid", trainable=False)(inp)
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
    dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
    dense_1 = Dense(nclass, activation=activations.sigmoid, name="dense_3_ptbdb")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt, loss=losses.binary_crossentropy, metrics=['acc'])
    model.summary()
    return model 
Example 44
Project: ECG_Heartbeat_Classification   Author: CVxTz   File: baseline_mitbih.py    MIT License 5 votes vote down vote up
def get_model():
    nclass = 5
    inp = Input(shape=(187, 1))
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp)
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
    dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
    dense_1 = Dense(nclass, activation=activations.softmax, name="dense_3_mitbih")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt, loss=losses.sparse_categorical_crossentropy, metrics=['acc'])
    model.summary()
    return model 
Example 45
Project: steppy-toolkit   Author: neptune-ml   File: architectures.py    MIT License 5 votes vote down vote up
def prelu_block(use_prelu):
    def f(x):
        if use_prelu:
            x = PReLU()(x)
        else:
            x = Lambda(relu)(x)
        return x

    return f 
Example 46
Project: TF_PG_GANS   Author: naykun   File: model.py    MIT License 5 votes vote down vote up
def vlrelu(x): return K.relu(x, 0.3) 
Example 47
Project: EEG_classification   Author: CVxTz   File: models.py    Apache License 2.0 5 votes vote down vote up
def get_model():
    nclass = 5
    inp = Input(shape=(3000, 1))
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp)
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = SpatialDropout1D(rate=0.01)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = SpatialDropout1D(rate=0.01)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = SpatialDropout1D(rate=0.01)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.01)(img_1)

    dense_1 = Dropout(rate=0.01)(Dense(64, activation=activations.relu, name="dense_1")(img_1))
    dense_1 = Dropout(rate=0.05)(Dense(64, activation=activations.relu, name="dense_2")(dense_1))
    dense_1 = Dense(nclass, activation=activations.softmax, name="dense_3")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt, loss=losses.sparse_categorical_crossentropy, metrics=['acc'])
    model.summary()
    return model 
Example 48
Project: EEG_classification   Author: CVxTz   File: models.py    Apache License 2.0 5 votes vote down vote up
def get_base_model():
    inp = Input(shape=(3000, 1))
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp)
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = SpatialDropout1D(rate=0.01)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = SpatialDropout1D(rate=0.01)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = SpatialDropout1D(rate=0.01)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.01)(img_1)

    dense_1 = Dropout(0.01)(Dense(64, activation=activations.relu, name="dense_1")(img_1))

    base_model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    base_model.compile(optimizer=opt, loss=losses.sparse_categorical_crossentropy, metrics=['acc'])
    #model.summary()
    return base_model 
Example 49
Project: EEG_classification   Author: CVxTz   File: models.py    Apache License 2.0 5 votes vote down vote up
def get_model_cnn():
    nclass = 5

    seq_input = Input(shape=(None, 3000, 1))
    base_model = get_base_model()
    # for layer in base_model.layers:
    #     layer.trainable = False
    encoded_sequence = TimeDistributed(base_model)(seq_input)
    encoded_sequence = SpatialDropout1D(rate=0.01)(Convolution1D(128,
                                                               kernel_size=3,
                                                               activation="relu",
                                                               padding="same")(encoded_sequence))
    encoded_sequence = Dropout(rate=0.05)(Convolution1D(128,
                                                               kernel_size=3,
                                                               activation="relu",
                                                               padding="same")(encoded_sequence))

    #out = TimeDistributed(Dense(nclass, activation="softmax"))(encoded_sequence)
    out = Convolution1D(nclass, kernel_size=3, activation="softmax", padding="same")(encoded_sequence)

    model = models.Model(seq_input, out)

    model.compile(optimizers.Adam(0.001), losses.sparse_categorical_crossentropy, metrics=['acc'])
    model.summary()

    return model 
Example 50
Project: EEG_classification   Author: CVxTz   File: models.py    Apache License 2.0 5 votes vote down vote up
def get_model_cnn_crf(lr=0.001):
    nclass = 5

    seq_input = Input(shape=(None, 3000, 1))
    base_model = get_base_model()
    # for layer in base_model.layers:
    #     layer.trainable = False
    encoded_sequence = TimeDistributed(base_model)(seq_input)
    encoded_sequence = SpatialDropout1D(rate=0.01)(Convolution1D(128,
                                                               kernel_size=3,
                                                               activation="relu",
                                                               padding="same")(encoded_sequence))
    encoded_sequence = Dropout(rate=0.05)(Convolution1D(128,
                                                               kernel_size=3,
                                                               activation="linear",
                                                               padding="same")(encoded_sequence))

    #out = TimeDistributed(Dense(nclass, activation="softmax"))(encoded_sequence)
    # out = Convolution1D(nclass, kernel_size=3, activation="linear", padding="same")(encoded_sequence)

    crf = CRF(nclass, sparse_target=True)

    out = crf(encoded_sequence)


    model = models.Model(seq_input, out)

    model.compile(optimizers.Adam(lr), crf.loss_function, metrics=[crf.accuracy])
    model.summary()

    return model 
Example 51
Project: talos   Author: autonomio   File: params.py    MIT License 5 votes vote down vote up
def titanic():

    # here use a standard 2d dictionary for inputting the param boundaries
    p = {'lr': (0.5, 5, 10),
         'first_neuron': [4, 8, 16],
         'batch_size': [20, 30, 40],
         'dropout': (0, 0.5, 5),
         'optimizer': ['Adam', 'Nadam'],
         'losses': ['logcosh', 'binary_crossentropy'],
         'activation': ['relu', 'elu'],
         'last_activation': ['sigmoid']}

    return p 
Example 52
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: tf_models.py    MIT License 5 votes vote down vote up
def temporal_convs_linear(n_nodes, conv_len, n_classes, n_feat, max_len, 
                        causal=False, loss='categorical_crossentropy', 
                        optimizer='adam', return_param_str=False):
    """ Used in paper: 
    Segmental Spatiotemporal CNNs for Fine-grained Action Segmentation
    Lea et al. ECCV 2016

    Note: Spatial dropout was not used in the original paper. 
    It tends to improve performance a little.  
    """

    inputs = Input(shape=(max_len,n_feat))
    if causal: model = ZeroPadding1D((conv_len//2,0))(model)
    model = Convolution1D(n_nodes, conv_len, input_dim=n_feat, input_length=max_len, border_mode='same', activation='relu')(inputs)
    if causal: model = Cropping1D((0,conv_len//2))(model)

    model = SpatialDropout1D(0.3)(model)

    model = TimeDistributed(Dense(n_classes, activation="softmax" ))(model)
    
    model = Model(input=inputs, output=model)
    model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal")

    if return_param_str:
        param_str = "tConv_C{}".format(conv_len)
        if causal:
            param_str += "_causal"
    
        return model, param_str
    else:
        return model 
Example 53
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: tf_models.py    MIT License 5 votes vote down vote up
def convolution_module(model, n_nodes, conv_len, dropout_ratio=0.3,
                       activation='norm_relu'):

    ## the first convolution module
    model= Conv1D(n_nodes, conv_len, padding='same')(model)
    if dropout_ratio != 0:
        model = SpatialDropout1D(dropout_ratio)(model)
    
    # activation function
    if activation=='norm_relu': 
        model = Activation('relu')(model)
        model = Lambda(channel_normalization)(model)
    elif activation=='wavenet': 
        model = WaveNet_activation(model) 
    elif activation=='charbonnier':
        model = CharbonnierAcfun()(model)
    elif activation=='swish':
        model = SwishAcfun()(model)
    elif activation=='leaky_relu':
        model = keras.layers.LeakyReLU(alpha=0.2)(model)
    elif activation == 'pm':
        model = PMAcfun()(model)
    elif activation == 'sqrt':
        model = SqrtAcfun()(model)
    else:
        model = Activation(activation)(model) 

    return model 
Example 54
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 5 votes vote down vote up
def __init__(self,
                 n_basis=8,
                 n_components=1,
                 use_normalization=False,
                 activation=None,
                 out_fusion_type='avg', # or max or w-sum
                 stride=2, 
                 time_window_size=5,
                 **kwargs):
        
        self.n_basis = n_basis
        self.out_dim = n_basis**2
        self.n_components=n_components
        self.out_fusion_type = out_fusion_type
        self.stride = stride
        self.use_normalization = use_normalization
        self.time_window_size = time_window_size

        if activation == None:
            self.act_fun = tf.keras.activations.linear
        elif activation == 'tanh':
            self.act_fun = tf.keras.activations.tanh
        elif activation == 'relu':
            self.act_fun = tf.keras.activations.relu

        super(RPBinaryPooling2, self).__init__(**kwargs) 
Example 55
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 5 votes vote down vote up
def __init__(self,
                 n_basis=8,
                 n_components=1, 
                 init_sigma=None,
                 use_normalization=False,
                 activation=None,
                 learnable_radius=True,
                 out_fusion_type='avg', # or max or w-sum
                 stride=2, 
                 time_window_size=5,
                 **kwargs):
        
        self.n_basis = n_basis
        self.out_dim = n_basis**2
        self.n_components=n_components
        self.out_fusion_type = out_fusion_type
        self.stride = stride
        self.use_normalization = use_normalization
        self.time_window_size = time_window_size
        self.learnable_radius = learnable_radius
        self.init_sigma = init_sigma

        if activation == None:
            self.act_fun = tf.keras.activations.linear
        elif activation == 'tanh':
            self.act_fun = tf.keras.activations.tanh
        elif activation == 'relu':
            self.act_fun = tf.keras.activations.relu

        super(RPGaussianPooling, self).__init__(**kwargs) 
Example 56
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 5 votes vote down vote up
def __init__(self,
                 n_basis=8,
                 n_components=1, 
                 init_sigma=None,
                 use_normalization=False,
                 activation=None,
                 learnable_radius=True,
                 out_fusion_type='avg', # or max or w-sum
                 stride=2, 
                 time_window_size=5,
                 **kwargs):
        
        self.n_basis = n_basis
        self.n_components=n_components
        self.out_fusion_type = out_fusion_type
        self.stride = stride
        self.use_normalization = use_normalization
        self.time_window_size = time_window_size
        self.learnable_radius = True
        self.init_sigma = init_sigma
        self.out_dim = n_components*(n_basis)**2
        # print('-----------init_sigma={}-------------'.format(init_sigma))
        if activation == None:
            self.act_fun = tf.keras.activations.linear
        elif activation == 'tanh':
            self.act_fun = tf.keras.activations.tanh
        elif activation == 'relu':
            self.act_fun = tf.keras.activations.relu
        super(RPGaussianPooling2, self).__init__(**kwargs) 
Example 57
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 5 votes vote down vote up
def __init__(self,
                 n_basis=8,
                 n_components=1, 
                 use_normalization=False,
                 activation=None,
                 learnable_radius=True,
                 out_fusion_type='avg', # or max or w-sum
                 stride=2, 
                 time_window_size=5,
                 **kwargs):
        
        self.n_basis = n_basis
        self.n_components=n_components
        self.out_fusion_type = out_fusion_type
        self.stride = stride
        self.use_normalization = use_normalization
        self.time_window_size = time_window_size
        self.learnable_radius = learnable_radius
        self.out_dim = n_components*(n_basis)**2

        if activation == None:
            self.act_fun = tf.keras.activations.linear
        elif activation == 'tanh':
            self.act_fun = tf.keras.activations.tanh
        elif activation == 'relu':
            self.act_fun = tf.keras.activations.relu

        super(RPLearnable, self).__init__(**kwargs) 
Example 58
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 5 votes vote down vote up
def __init__(self,
                 n_basis=8,
                 n_components=1,
                 use_normalization=False,
                 activation=None,
                 learnable_radius=True,
                 out_fusion_type='avg', # or max or w-sum
                 stride=2, 
                 time_window_size=5,
                 **kwargs):
        
        self.n_basis = n_basis
        self.out_dim = n_basis
        self.out_fusion_type = out_fusion_type
        self.stride = stride
        self.use_normalization = use_normalization
        self.time_window_size = time_window_size
        self.learnable_radius = learnable_radius
        self.n_components = n_components
        
        if activation == None:
            self.act_fun = tf.keras.activations.linear
        elif activation == 'tanh':
            self.act_fun = tf.keras.activations.tanh
        elif activation == 'relu':
            self.act_fun = tf.keras.activations.relu

        super(MultiModalLowRankPooling, self).__init__(**kwargs) 
Example 59
Project: open-solution-toxic-comments   Author: minerva-ml   File: models.py    MIT License 5 votes vote down vote up
def _prelu(use_prelu):
    def f(x):
        if use_prelu:
            x = PReLU()(x)
        else:
            x = Lambda(relu)(x)
        return x

    return f 
Example 60
Project: Keras-progressive_growing_of_gans   Author: MSC-BUAA   File: model.py    MIT License 5 votes vote down vote up
def vlrelu(x): return K.relu(x, 0.3) 
Example 61
Project: DeepScope   Author: miguelesteras   File: gan_test_bugs.py    MIT License 5 votes vote down vote up
def filt(inobj, nfilt, k, BN=False, act=True, prelu=False, dilation=1):
    reg = lambda: L1L2(l1=1e-7, l2=1e-7)
    
    conv = Convolution2D(nfilt, k, strides=(1,1), activation=None, border_mode='same', 
                         init=he_normal(), dilation_rate=dilation, kernel_regularizer=reg())(inobj)
    
    if BN: conv = BatchNormalization(axis=-1)(conv)
    if act and prelu: conv = PReLU(shared_axes=[1,2])(conv)
    if act and not prelu: conv = Activation('relu')(conv)
    
    return conv 
Example 62
Project: DeepScope   Author: miguelesteras   File: gan_test_bugs.py    MIT License 5 votes vote down vote up
def incep_module(input_layer, feature_size=32):
    
    convA1 = filt(input_layer, 32, 1, BN=True, prelu=True)
    convB1 = filt(input_layer, 32, 1, BN=True, prelu=True)    
#    convC1 = filt(input_layer, 32, 1, prelu=True)
#    convD1 = filt(input_layer, 32, 1, prelu=True)
#    convE1 = filt(input_layer, 32, 1, prelu=True)
    ################
    
    convB2 = filt(convB1, 32, 3, BN=True, prelu=True)
#    convC2 = filt(convC1, 32, 5, prelu=True)
#    convD2 = filt(convD1, 32, 7, prelu=True)
#    convE2 = filt(convE1, 32, 9, prelu=True)
    
    cat = concatenate([convA1, convB2])
    
    
#    cat = concatenate([convA1, convB2, convC2])
#    cat = concatenate([convA1, convB2, convC2, convD2])
#    cat = concatenate([convA1, convB2, convC2, convD2, convE2])
    cat = filt(cat, feature_size, 7, BN=True, act=False)
    
    cat = Lambda(lambda x: 0.1 * x)(cat) # scaling suggested by paper
    
#    cat = Dropout(0.1)(cat)
    
    output_layer = add([input_layer, cat])
    output_layer = BatchNormalization(axis=-1)(output_layer)
    output_layer = PReLU(shared_axes=[1,2])(output_layer)
#    output_layer = Activation('relu')(output_layer)
    
    return output_layer 
Example 63
Project: ip9   Author: tiefenauer   File: models.py    MIT License 5 votes vote down vote up
def clipped_relu(x):
    return relu(x, max_value=20) 
Example 64
Project: algorimp   Author: marczellm   File: _main.py    GNU General Public License v3.0 5 votes vote down vote up
def _build_net(self) -> keras.models.Model:
        input_tensor = keras.layers.Input(shape=self.inputshape())
        hidden_tensor = keras.layers.Dense(800, activation=relu)(input_tensor)

        pitch_tensor = keras.layers.Dense(127, activation=softmax)(hidden_tensor)
        tsbq_tensor = keras.layers.Dense(self.maxtsbq + 1, activation=softmax)(hidden_tensor)
        dq_tensor = keras.layers.Dense(self.maxdq + 1, activation=softmax)(hidden_tensor)

        model = keras.models.Model(inputs=input_tensor, outputs=[pitch_tensor, tsbq_tensor, dq_tensor])
        model.compile(optimizer=adagrad(), loss=categorical_crossentropy)

        self.epochs = 20
        self.outfuns = sampler(.3), weighted_nlargest(2), weighted_nlargest(2)
        return model 
Example 65
Project: algorimp   Author: marczellm   File: _main.py    GNU General Public License v3.0 5 votes vote down vote up
def _build_net(self) -> keras.models.Model:
        input_tensor = keras.layers.Input(shape=self.inputshape())
        hidden_tensor = keras.layers.Dense(800, activation=relu)(input_tensor)
        hidden_tensor = keras.layers.Dense(800, activation=relu)(hidden_tensor)

        pitch_tensor = keras.layers.Dense(127, activation=softmax)(hidden_tensor)
        tsbq_tensor = keras.layers.Dense(self.maxtsbq + 1, activation=softmax)(hidden_tensor)
        dq_tensor = keras.layers.Dense(self.maxdq + 1, activation=softmax)(hidden_tensor)

        model = keras.models.Model(inputs=input_tensor, outputs=[pitch_tensor, tsbq_tensor, dq_tensor])
        model.compile(optimizer=adagrad(), loss=categorical_crossentropy)

        self.epochs = 25
        self.outfuns = (sampler(.5),) * 3
        return model 
Example 66
Project: TCFPN-ISBA   Author: Zephyr-D   File: tf_models.py    MIT License 4 votes vote down vote up
def ED_TCN(n_nodes, conv_len, n_classes, n_feat, max_len,
           loss='categorical_crossentropy', causal=False,
           optimizer="rmsprop", activation='norm_relu',
           return_param_str=False):
    n_layers = len(n_nodes)

    inputs = Input(shape=(max_len, n_feat))
    model = inputs

    # ---- Encoder ----
    for i in range(n_layers):
        # Pad beginning of sequence to prevent usage of future data
        if causal: model = ZeroPadding1D((conv_len // 2, 0))(model)
        model = Conv1D(n_nodes[i], conv_len, padding='same')(model)
        if causal: model = Cropping1D((0, conv_len // 2))(model)

        model = SpatialDropout1D(0.3)(model)

        if activation == 'norm_relu':
            model = Activation('relu')(model)
            model = Lambda(channel_normalization, name="encoder_norm_{}".format(i))(model)
        elif activation == 'wavenet':
            model = WaveNet_activation(model)
        else:
            model = Activation(activation)(model)

        model = MaxPooling1D(2)(model)

    # ---- Decoder ----
    for i in range(n_layers):
        model = UpSampling1D(2)(model)
        if causal: model = ZeroPadding1D((conv_len // 2, 0))(model)
        model = Conv1D(n_nodes[-i - 1], conv_len, padding='same')(model)
        if causal: model = Cropping1D((0, conv_len // 2))(model)

        model = SpatialDropout1D(0.3)(model)

        if activation == 'norm_relu':
            model = Activation('relu')(model)
            model = Lambda(channel_normalization, name="decoder_norm_{}".format(i))(model)
        elif activation == 'wavenet':
            model = WaveNet_activation(model)
        else:
            model = Activation(activation)(model)

    # Output FC layer
    model = TimeDistributed(Dense(n_classes, activation="softmax"))(model)

    model = Model(inputs=inputs, outputs=model)
    model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal")

    if return_param_str:
        param_str = "ED-TCN_C{}_L{}".format(conv_len, n_layers)
        if causal:
            param_str += "_causal"

        return model, param_str
    else:
        return model 
Example 67
Project: TCFPN-ISBA   Author: Zephyr-D   File: tf_models.py    MIT License 4 votes vote down vote up
def ED_TCN_atrous(n_nodes, conv_len, n_classes, n_feat, max_len,
                  loss='categorical_crossentropy', causal=False,
                  optimizer="rmsprop", activation='norm_relu',
                  return_param_str=False):
    n_layers = len(n_nodes)

    inputs = Input(shape=(None, n_feat))
    model = inputs

    # ---- Encoder ----
    for i in range(n_layers):
        # Pad beginning of sequence to prevent usage of future data
        if causal: model = ZeroPadding1D((conv_len // 2, 0))(model)
        model = AtrousConv1D(n_nodes[i], conv_len, atrous_rate=i + 1, padding='same')(model)
        if causal: model = Cropping1D((0, conv_len // 2))(model)

        model = SpatialDropout1D(0.3)(model)

        if activation == 'norm_relu':
            model = Activation('relu')(model)
            model = Lambda(channel_normalization, name="encoder_norm_{}".format(i))(model)
        elif activation == 'wavenet':
            model = WaveNet_activation(model)
        else:
            model = Activation(activation)(model)

            # ---- Decoder ----
    for i in range(n_layers):
        if causal: model = ZeroPadding1D((conv_len // 2, 0))(model)
        model = AtrousConv1D(n_nodes[-i - 1], conv_len, atrous_rate=n_layers - i, padding='same')(model)
        if causal: model = Cropping1D((0, conv_len // 2))(model)

        model = SpatialDropout1D(0.3)(model)

        if activation == 'norm_relu':
            model = Activation('relu')(model)
            model = Lambda(channel_normalization, name="decoder_norm_{}".format(i))(model)
        elif activation == 'wavenet':
            model = WaveNet_activation(model)
        else:
            model = Activation(activation)(model)

    # Output FC layer
    model = TimeDistributed(Dense(n_classes, activation="softmax"))(model)

    model = Model(input=inputs, output=model)

    model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal", metrics=['accuracy'])

    if return_param_str:
        param_str = "ED-TCNa_C{}_L{}".format(conv_len, n_layers)
        if causal:
            param_str += "_causal"

        return model, param_str
    else:
        return model 
Example 68
Project: TCFPN-ISBA   Author: Zephyr-D   File: tf_models.py    MIT License 4 votes vote down vote up
def TimeDelayNeuralNetwork(n_nodes, conv_len, n_classes, n_feat, max_len,
                           loss='categorical_crossentropy', causal=False,
                           optimizer="rmsprop", activation='sigmoid',
                           return_param_str=False):
    # Time-delay neural network
    n_layers = len(n_nodes)

    inputs = Input(shape=(max_len, n_feat))
    model = inputs
    inputs_mask = Input(shape=(max_len, 1))
    model_masks = [inputs_mask]

    # ---- Encoder ----
    for i in range(n_layers):
        # Pad beginning of sequence to prevent usage of future data
        if causal: model = ZeroPadding1D((conv_len // 2, 0))(model)
        model = AtrousConv1D(n_nodes[i], conv_len, atrous_rate=i + 1, padding='same')(model)
        # model = SpatialDropout1D(0.3)(model)
        if causal: model = Cropping1D((0, conv_len // 2))(model)

        if activation == 'norm_relu':
            model = Activation('relu')(model)
            model = Lambda(channel_normalization, name="encoder_norm_{}".format(i))(model)
        elif activation == 'wavenet':
            model = WaveNet_activation(model)
        else:
            model = Activation(activation)(model)

            # Output FC layer
    model = TimeDistributed(Dense(n_classes, activation="softmax"))(model)

    model = Model(input=inputs, output=model)
    model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal", metrics=['accuracy'])

    if return_param_str:
        param_str = "TDN_C{}".format(conv_len)
        if causal:
            param_str += "_causal"

        return model, param_str
    else:
        return model 
Example 69
Project: CNNArt   Author: thomaskuestner   File: MNetArt.py    Apache License 2.0 4 votes vote down vote up
def fCreateModel(patchSize, learningRate=1e-3, optimizer='SGD',
                 dr_rate=0.0, input_dr_rate=0.0, max_norm=5, iPReLU=0, l2_reg=1e-6):
    l2_reg = 1e-4

    # (4 stages-each 2 convs)(378,722 params)(for 40x40x10)
    input_t = Input(shape=(1, int(patchSize[0, 0]), int(patchSize[0, 1]), int(patchSize[0, 2])))
    input2D_t = Permute((4, 1, 2, 3))(input_t)
    input2D_t = Reshape(target_shape=(int(patchSize[0, 2]), int(patchSize[0, 0]), int(patchSize[0, 1])))(
        input2D_t)
    # use zDimension as number of channels
    twoD_t = Conv2D(16,
                    kernel_size=(7, 7),
                    padding='same',
                    kernel_initializer='he_normal',
                    kernel_regularizer=l2(l2_reg),
                    strides=(1, 1)
                    )(input2D_t)
    twoD_t = Activation('relu')(twoD_t)

    l_w2_t = fCreateMaxPooling2D(twoD_t, stride=(2, 2))
    l_w3_t = fCreateMaxPooling2D(l_w2_t, stride=(2, 2))
    l_w4_t = fCreateMaxPooling2D(l_w3_t, stride=(2, 2))

    stage1_res1_t = fCreateMNet_Block(twoD_t, 16, kernel_size=(3, 3), forwarding=True, l2_reg=l2_reg)
    stage1_res2_t = fCreateMNet_Block(stage1_res1_t, 32, kernel_size=(3, 3), forwarding=False, l2_reg=l2_reg)

    stage2_inp_t = fCreateMaxPooling2D(stage1_res2_t, stride=(2, 2))
    stage2_inp_t = concatenate([stage2_inp_t, l_w2_t], axis=1)
    stage2_res1_t = fCreateMNet_Block(stage2_inp_t, 32, l2_reg=l2_reg)
    stage2_res2_t = fCreateMNet_Block(stage2_res1_t, 48, forwarding=False)

    stage3_inp_t = fCreateMaxPooling2D(stage2_res2_t, stride=(2, 2))
    stage3_inp_t = concatenate([stage3_inp_t, l_w3_t], axis=1)
    stage3_res1_t = fCreateMNet_Block(stage3_inp_t, 48, l2_reg=l2_reg)
    stage3_res2_t = fCreateMNet_Block(stage3_res1_t, 64, forwarding=False, l2_reg=l2_reg)

    stage4_inp_t = fCreateMaxPooling2D(stage3_res2_t, stride=(2, 2))
    stage4_inp_t = concatenate([stage4_inp_t, l_w4_t], axis=1)
    stage4_res1_t = fCreateMNet_Block(stage4_inp_t, 64, l2_reg=l2_reg)
    stage4_res2_t = fCreateMNet_Block(stage4_res1_t, 128, forwarding=False, l2_reg=l2_reg)

    after_flat_t = Flatten()(stage4_res2_t)

    after_dense_t = Dense(units=2,
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(l2_reg))(after_flat_t)
    output_t = Activation('softmax')(after_dense_t)

    cnn = Model(inputs=[input_t], outputs=[output_t])

    opti, loss = fGetOptimizerAndLoss(optimizer, learningRate=learningRate)
    cnn.compile(optimizer=opti, loss=loss, metrics=['accuracy'])
    sArchiSpecs = '3stages_l2{}'.format(l2_reg) 
Example 70
Project: CNNArt   Author: thomaskuestner   File: MNetArt.py    Apache License 2.0 4 votes vote down vote up
def fCreateModel(patchSize, learningRate=1e-3, optimizer='SGD',
                 dr_rate=0.0, input_dr_rate=0.0, max_norm=5, iPReLU=0, l2_reg=1e-6):
    l2_reg = 1e-4

    # (4 stages-each 2 convs)(378,722 params)(for 40x40x10)
    input_t = Input(shape=(1, int(patchSize[0, 0]), int(patchSize[0, 1]), int(patchSize[0, 2])))
    input2D_t = Permute((4, 1, 2, 3))(input_t)
    input2D_t = Reshape(target_shape=(int(patchSize[0, 2]), int(patchSize[0, 0]), int(patchSize[0, 1])))(
        input2D_t)
    # use zDimension as number of channels
    twoD_t = Conv2D(16,
                    kernel_size=(7, 7),
                    padding='same',
                    kernel_initializer='he_normal',
                    kernel_regularizer=l2(l2_reg),
                    strides=(1, 1)
                    )(input2D_t)
    twoD_t = Activation('relu')(twoD_t)

    l_w2_t = fCreateMaxPooling2D(twoD_t, stride=(2, 2))
    l_w3_t = fCreateMaxPooling2D(l_w2_t, stride=(2, 2))
    l_w4_t = fCreateMaxPooling2D(l_w3_t, stride=(2, 2))

    stage1_res1_t = fCreateMNet_Block(twoD_t, 16, kernel_size=(3, 3), forwarding=True, l2_reg=l2_reg)
    stage1_res2_t = fCreateMNet_Block(stage1_res1_t, 32, kernel_size=(3, 3), forwarding=False, l2_reg=l2_reg)

    stage2_inp_t = fCreateMaxPooling2D(stage1_res2_t, stride=(2, 2))
    stage2_inp_t = concatenate([stage2_inp_t, l_w2_t], axis=1)
    stage2_res1_t = fCreateMNet_Block(stage2_inp_t, 32, l2_reg=l2_reg)
    stage2_res2_t = fCreateMNet_Block(stage2_res1_t, 48, forwarding=False)

    stage3_inp_t = fCreateMaxPooling2D(stage2_res2_t, stride=(2, 2))
    stage3_inp_t = concatenate([stage3_inp_t, l_w3_t], axis=1)
    stage3_res1_t = fCreateMNet_Block(stage3_inp_t, 48, l2_reg=l2_reg)
    stage3_res2_t = fCreateMNet_Block(stage3_res1_t, 64, forwarding=False, l2_reg=l2_reg)

    stage4_inp_t = fCreateMaxPooling2D(stage3_res2_t, stride=(2, 2))
    stage4_inp_t = concatenate([stage4_inp_t, l_w4_t], axis=1)
    stage4_res1_t = fCreateMNet_Block(stage4_inp_t, 64, l2_reg=l2_reg)
    stage4_res2_t = fCreateMNet_Block(stage4_res1_t, 128, forwarding=False, l2_reg=l2_reg)

    after_flat_t = Flatten()(stage4_res2_t)

    after_dense_t = Dense(units=2,
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(l2_reg))(after_flat_t)
    output_t = Activation('softmax')(after_dense_t)

    cnn = Model(inputs=[input_t], outputs=[output_t])

    opti, loss = fGetOptimizerAndLoss(optimizer, learningRate=learningRate)
    cnn.compile(optimizer=opti, loss=loss, metrics=['accuracy'])
    sArchiSpecs = '3stages_l2{}'.format(l2_reg) 
Example 71
Project: CNNArt   Author: thomaskuestner   File: motion_MNetArt.py    Apache License 2.0 4 votes vote down vote up
def fCreateModel(patchSize, learningRate=1e-3, optimizer='SGD',
        dr_rate=0.0, input_dr_rate=0.0, max_norm=5, iPReLU=0, l2_reg=1e-6):
        l2_reg=1e-4

        #(4 stages-each 2 convs)(378,722 params)(for 40x40x10)
        input_t=Input(shape=(1,int(patchSize[0, 0]),int(patchSize[0, 1]), int(patchSize[0, 2])))
        input2D_t=Permute((4,1,2,3))(input_t)
        input2D_t=Reshape(target_shape=(int(patchSize[0, 2]),int(patchSize[0, 0]), int(patchSize[0, 1])))(
            input2D_t)
        #use zDimension as number of channels
        twoD_t=Conv2D(16,
                      kernel_size=(7,7),
                      padding='same',
                      kernel_initializer='he_normal',
                      kernel_regularizer=l2(l2_reg),
                      strides=(1,1)
                      )(input2D_t)
        twoD_t = Activation('relu')(twoD_t)

        l_w2_t = fCreateMaxPooling2D(twoD_t, stride=(2, 2))
        l_w3_t = fCreateMaxPooling2D(l_w2_t, stride=(2, 2))
        l_w4_t = fCreateMaxPooling2D(l_w3_t, stride=(2, 2))

        stage1_res1_t=fCreateMNet_Block(twoD_t,16,kernel_size=(3,3), forwarding=True, l2_reg=l2_reg)
        stage1_res2_t=fCreateMNet_Block(stage1_res1_t,32,kernel_size=(3,3), forwarding=False, l2_reg=l2_reg)

        stage2_inp_t=fCreateMaxPooling2D(stage1_res2_t, stride=(2,2))
        stage2_inp_t=concatenate([stage2_inp_t,l_w2_t], axis=1)
        stage2_res1_t=fCreateMNet_Block(stage2_inp_t,32,l2_reg=l2_reg)
        stage2_res2_t=fCreateMNet_Block(stage2_res1_t,48, forwarding=False)

        stage3_inp_t=fCreateMaxPooling2D(stage2_res2_t, stride=(2,2))
        stage3_inp_t=concatenate([stage3_inp_t,l_w3_t], axis=1)
        stage3_res1_t=fCreateMNet_Block(stage3_inp_t,48,l2_reg=l2_reg)
        stage3_res2_t = fCreateMNet_Block(stage3_res1_t, 64, forwarding=False,l2_reg=l2_reg)

        stage4_inp_t = fCreateMaxPooling2D(stage3_res2_t, stride=(2, 2))
        stage4_inp_t = concatenate([stage4_inp_t, l_w4_t], axis=1)
        stage4_res1_t = fCreateMNet_Block(stage4_inp_t, 64,l2_reg=l2_reg)
        stage4_res2_t = fCreateMNet_Block(stage4_res1_t, 128, forwarding=False,l2_reg=l2_reg)

        after_flat_t = Flatten()(stage4_res2_t)

        after_dense_t = Dense(units=2,
                              kernel_initializer='he_normal',
                              kernel_regularizer=l2(l2_reg))(after_flat_t)
        output_t = Activation('softmax')(after_dense_t)

        cnn = Model(inputs=[input_t], outputs=[output_t])

        opti, loss = fGetOptimizerAndLoss(optimizer, learningRate=learningRate)
        cnn.compile(optimizer=opti, loss=loss, metrics=['accuracy'])
        sArchiSpecs = '3stages_l2{}'.format(l2_reg) 
Example 72
Project: TF_PG_GANS   Author: naykun   File: model.py    MIT License 4 votes vote down vote up
def Generator(
    num_channels        =1,
    resolution          =32,
    label_size          =0,
    fmap_base           =4096,
    fmap_decay          =1.0,
    fmap_max            =256,
    latent_size         =None,
    normalize_latents   =True,
    use_wscale          =True,
    use_pixelnorm       =True,
    use_leakyrelu       =True,
    use_batchnorm       =False,
    tanh_at_end         =None,
    **kwargs):
    R = int(np.log2(resolution))
    assert resolution == 2 ** R and resolution >= 4
    cur_lod = K.variable(np.float32(0.0), dtype='float32', name='cur_lod')

    def numf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
    if latent_size is None:
        latent_size = numf(0)
    (act, act_init) = (lrelu, lrelu_init) if use_leakyrelu else (relu, relu_init)

    inputs = [Input(shape=[latent_size], name='Glatents')]
    net = inputs[-1]

    #print("DEEEEEEEE")

    if normalize_latents:
        net = PixelNormLayer(name='Gnorm')(net)
    if label_size:
        inputs += [Input(shape=[label_size], name='Glabels')]
        net = Concatenate(name='G1na')([net, inputs[-1]])
    net = Reshape((1, 1,K.int_shape(net)[1]), name='G1nb')(net)

    net = G_convblock(net, numf(1), 4, act, act_init, pad='full', use_wscale=use_wscale,
                      use_batchnorm=use_batchnorm, use_pixelnorm=use_pixelnorm, name='G1a')
    net = G_convblock(net, numf(1), 3, act, act_init, pad=1, use_wscale=use_wscale,
                      use_batchnorm=use_batchnorm, use_pixelnorm=use_pixelnorm, name='G1b')
    lods = [net]
    for I in range(2, R):
        net = UpSampling2D(2, name='G%dup' % I)(net)
        net = G_convblock(net, numf(I), 3, act, act_init, pad=1, use_wscale=use_wscale,
                          use_batchnorm=use_batchnorm, use_pixelnorm=use_pixelnorm, name='G%da' % I)
        net = G_convblock(net, numf(I), 3, act, act_init, pad=1, use_wscale=use_wscale,
                          use_batchnorm=use_batchnorm, use_pixelnorm=use_pixelnorm, name='G%db' % I)
        lods += [net]

    lods = [NINblock(l, num_channels, linear, linear_init, use_wscale=use_wscale,
                     name='Glod%d' % i) for i, l in enumerate(reversed(lods))]
    output = LODSelectLayer(cur_lod, name='Glod')(lods)
    if tanh_at_end is not None:
        output = Activation('tanh', name='Gtanh')(output)
        if tanh_at_end != 1.0:
            output = Lambda(lambda x: x * tanh_at_end, name='Gtanhs')

    model = Model(inputs=inputs, outputs=[output])
    model.cur_lod = cur_lod
    return model 
Example 73
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: tf_models.py    MIT License 4 votes vote down vote up
def ED_TCN(n_nodes, conv_len, n_classes, n_feat, max_len, 
            loss='categorical_crossentropy', causal=False, 
            optimizer="rmsprop", activation='norm_relu',
            return_param_str=False):
    n_layers = len(n_nodes)

    inputs = Input(shape=(max_len,n_feat))
    model = inputs
    # ---- Encoder ----
    for i in range(n_layers):
        # Pad beginning of sequence to prevent usage of future data
        if causal: model = ZeroPadding1D((conv_len//2,0))(model)
        model = Convolution1D(n_nodes[i], conv_len, border_mode='same')(model)
        if causal: model = Cropping1D((0,conv_len//2))(model)

        model = SpatialDropout1D(0.3)(model)
        
        if activation=='norm_relu': 
            model = Activation('relu')(model)            
            model = Lambda(channel_normalization, name="encoder_norm_{}".format(i))(model)
        elif activation=='wavenet': 
            model = WaveNet_activation(model) 

        else:
            model = Activation(activation)(model)    

        
        model = MaxPooling1D(2)(model)

    # ---- Decoder ----
    for i in range(n_layers):
        model = UpSampling1D(2)(model)
        if causal: model = ZeroPadding1D((conv_len//2,0))(model)
        model = Convolution1D(n_nodes[-i-1], conv_len, border_mode='same')(model)
        if causal: model = Cropping1D((0,conv_len//2))(model)

        model = SpatialDropout1D(0.3)(model)

        if activation=='norm_relu': 
            model = Activation('relu')(model)
            model = Lambda(channel_normalization, name="decoder_norm_{}".format(i))(model)
        elif activation=='wavenet': 
            model = WaveNet_activation(model) 
        else:
            model = Activation(activation)(model)

    # Output FC layer
    model = TimeDistributed(Dense(n_classes, activation="softmax" ))(model)
    
    model = Model(input=inputs, output=model)
    model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal", metrics=['accuracy'])

    if return_param_str:
        param_str = "ED-TCN_C{}_L{}".format(conv_len, n_layers)
        if causal:
            param_str += "_causal"
    
        return model, param_str
    else:
        return model 
Example 74
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: tf_models.py    MIT License 4 votes vote down vote up
def ED_TCN_atrous(n_nodes, conv_len, n_classes, n_feat, max_len, 
                loss='categorical_crossentropy', causal=False, 
                optimizer="rmsprop", activation='norm_relu',
                return_param_str=False):
    n_layers = len(n_nodes)

    inputs = Input(shape=(None,n_feat))
    model = inputs

    # ---- Encoder ----
    for i in range(n_layers):
        # Pad beginning of sequence to prevent usage of future data
        if causal: model = ZeroPadding1D((conv_len//2,0))(model)
        model = AtrousConvolution1D(n_nodes[i], conv_len, atrous_rate=i+1, border_mode='same')(model)
        if causal: model = Cropping1D((0,conv_len//2))(model)

        model = SpatialDropout1D(0.3)(model)
        
        if activation=='norm_relu': 
            model = Activation('relu')(model)            
            model = Lambda(channel_normalization, name="encoder_norm_{}".format(i))(model)
        elif activation=='wavenet': 
            model = WaveNet_activation(model) 
        else:
            model = Activation(activation)(model)            

    # ---- Decoder ----
    for i in range(n_layers):
        if causal: model = ZeroPadding1D((conv_len//2,0))(model)
        model = AtrousConvolution1D(n_nodes[-i-1], conv_len, atrous_rate=n_layers-i, border_mode='same')(model)      
        if causal: model = Cropping1D((0,conv_len//2))(model)

        model = SpatialDropout1D(0.3)(model)

        if activation=='norm_relu': 
            model = Activation('relu')(model)
            model = Lambda(channel_normalization, name="decoder_norm_{}".format(i))(model)
        elif activation=='wavenet': 
            model = WaveNet_activation(model) 
        else:
            model = Activation(activation)(model)

    # Output FC layer
    model = TimeDistributed(Dense(n_classes, activation="softmax" ))(model)

    model = Model(input=inputs, output=model)

    model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal", metrics=['accuracy'])

    if return_param_str:
        param_str = "ED-TCNa_C{}_L{}".format(conv_len, n_layers)
        if causal:
            param_str += "_causal"
    
        return model, param_str
    else:
        return model 
Example 75
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: tf_models.py    MIT License 4 votes vote down vote up
def TimeDelayNeuralNetwork(n_nodes, conv_len, n_classes, n_feat, max_len, 
                loss='categorical_crossentropy', causal=False, 
                optimizer="rmsprop", activation='sigmoid',
                return_param_str=False):
    # Time-delay neural network
    n_layers = len(n_nodes)

    inputs = Input(shape=(max_len,n_feat))
    model = inputs
    inputs_mask = Input(shape=(max_len,1))
    model_masks = [inputs_mask]

    # ---- Encoder ----
    for i in range(n_layers):
        # Pad beginning of sequence to prevent usage of future data
        if causal: model = ZeroPadding1D((conv_len//2,0))(model)
        model = AtrousConvolution1D(n_nodes[i], conv_len, atrous_rate=i+1, border_mode='same')(model)
        # model = SpatialDropout1D(0.3)(model)
        if causal: model = Cropping1D((0,conv_len//2))(model)
        
        if activation=='norm_relu': 
            model = Activation('relu')(model)            
            model = Lambda(channel_normalization, name="encoder_norm_{}".format(i))(model)
        elif activation=='wavenet': 
            model = WaveNet_activation(model) 
        else:
            model = Activation(activation)(model)            

    # Output FC layer
    model = TimeDistributed(Dense(n_classes, activation="softmax"))(model)

    model = Model(input=inputs, output=model)
    model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal", metrics=['accuracy'])

    if return_param_str:
        param_str = "TDN_C{}".format(conv_len)
        if causal:
            param_str += "_causal"
    
        return model, param_str
    else:
        return model 
Example 76
Project: Keras-progressive_growing_of_gans   Author: MSC-BUAA   File: model.py    MIT License 4 votes vote down vote up
def Generator(
    num_channels        =1,
    resolution          =32,
    label_size          =0,
    fmap_base           =4096,
    fmap_decay          =1.0,
    fmap_max            =256,
    latent_size         =None,
    normalize_latents   =True,
    use_wscale          =True,
    use_pixelnorm       =True,
    use_leakyrelu       =True,
    use_batchnorm       =False,
    tanh_at_end         =None,
    **kwargs):
    R = int(np.log2(resolution))
    assert resolution == 2 ** R and resolution >= 4
    cur_lod = K.variable(np.float32(0.0), dtype='float32', name='cur_lod')

    def numf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
    if latent_size is None:
        latent_size = numf(0)
    (act, act_init) = (lrelu, lrelu_init) if use_leakyrelu else (relu, relu_init)

    inputs = [Input(shape=[latent_size], name='Glatents')]
    net = inputs[-1]

    #print("DEEEEEEEE")

    if normalize_latents:
        net = PixelNormLayer(name='Gnorm')(net)
    if label_size:
        inputs += [Input(shape=[label_size], name='Glabels')]
        net = Concatenate(name='G1na')([net, inputs[-1]])
    net = Reshape((1, 1,K.int_shape(net)[1]), name='G1nb')(net)

    net = G_convblock(net, numf(1), 4, act, act_init, pad='full', use_wscale=use_wscale,
                      use_batchnorm=use_batchnorm, use_pixelnorm=use_pixelnorm, name='G1a')
    net = G_convblock(net, numf(1), 3, act, act_init, pad=1, use_wscale=use_wscale,
                      use_batchnorm=use_batchnorm, use_pixelnorm=use_pixelnorm, name='G1b')
    lods = [net]
    for I in range(2, R):
        net = UpSampling2D(2, name='G%dup' % I)(net)
        net = G_convblock(net, numf(I), 3, act, act_init, pad=1, use_wscale=use_wscale,
                          use_batchnorm=use_batchnorm, use_pixelnorm=use_pixelnorm, name='G%da' % I)
        net = G_convblock(net, numf(I), 3, act, act_init, pad=1, use_wscale=use_wscale,
                          use_batchnorm=use_batchnorm, use_pixelnorm=use_pixelnorm, name='G%db' % I)
        lods += [net]

    lods = [NINblock(l, num_channels, linear, linear_init, use_wscale=use_wscale,
                     name='Glod%d' % i) for i, l in enumerate(reversed(lods))]
    output = LODSelectLayer(cur_lod, name='Glod')(lods)
    if tanh_at_end is not None:
        output = Activation('tanh', name='Gtanh')(output)
        if tanh_at_end != 1.0:
            output = Lambda(lambda x: x * tanh_at_end, name='Gtanhs')

    model = Model(inputs=inputs, outputs=[output])
    model.cur_lod = cur_lod
    return model 
Example 77
Project: DeepScope   Author: miguelesteras   File: gan_test_bugs.py    MIT License 4 votes vote down vote up
def model_generator(nch=256, edge_len=10):
    reg = lambda: L1L2(l1=1e-7, l2=1e-7)
    
    inputs = Input(shape=(80*80,))
    
    l = Dense(nch * edge_len**2)(inputs)
    l = BatchNormalization()(l)
    
    l = Reshape((edge_len, edge_len, nch))(l)
    
    l = filt(l, 256, 3, BN=True, prelu=True)
    l = filt(l, 32, 3, BN=True, prelu=True)
    
    
    
    l = incep_module(l, 32)
    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = Conv2DTranspose(256, 3, strides=2, padding='same', activation='relu', kernel_regularizer=reg())(l)
#    l = filt(l, 32, 3, BN=True)
    l = UpSampling2D()(l)
    
    l = incep_module(l, 32)
    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = Conv2DTranspose(128, 3, strides=2, padding='same', activation='relu', kernel_regularizer=reg())(l)
#    l = filt(l, 32, 3, BN=True)
    l = UpSampling2D()(l)
    
    l = incep_module(l, 32)
    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = Conv2DTranspose(64, 3, strides=2, padding='same', activation='relu', kernel_regularizer=reg())(l)
#    l = filt(l, 32, 3, BN=True)
    l = UpSampling2D()(l)
    
    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = incep_module(l, 32)
    
    l = filt(l, 1, 5, BN=False, act=False)
    l = Activation('sigmoid')(l)
    
    return Model(input=inputs, output=l)