Python keras.optimizers() Examples

The following are code examples for showing how to use keras.optimizers(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: AutoSleepScorerDev   Author: skjerns   File: models.py    GNU General Public License v3.0 6 votes vote down vote up
def tsinalis(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 1, 15000, 1)
    """
    model = Sequential(name='Tsinalis')
    model.add(Conv1D (kernel_size = (200), filters = 20, input_shape=input_shape, activation='relu'))
    print(model.input_shape)
    print(model.output_shape)
    model.add(MaxPooling1D(pool_size = (20), strides=(10)))
    print(model.output_shape)
    model.add(keras.layers.core.Reshape([20,-1,1]))
    print(model.output_shape)    
    model.add(Conv2D (kernel_size = (20,30), filters = 400, activation='relu'))
    print(model.output_shape)
    model.add(MaxPooling2D(pool_size = (1,10), strides=(1,2)))
    print(model.output_shape)
    model.add(Flatten())
    print(model.output_shape)
    model.add(Dense (500, activation='relu'))
    model.add(Dense (500, activation='relu'))
    model.add(Dense(n_classes, activation = 'softmax',activity_regularizer=keras.regularizers.l2()  ))
    model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(), metrics=[keras.metrics.categorical_accuracy])
    return model 
Example 2
Project: training_results_v0.6   Author: mlperf   File: __init__.py    Apache License 2.0 6 votes vote down vote up
def DistributedOptimizer(optimizer, name=None, device_dense='', device_sparse=''):
    """
    An optimizer that wraps another keras.optimizers.Optimizer, using an allreduce to
    average gradient values before applying gradients to model weights.

    Args:
        optimizer: Optimizer to use for computing gradients and applying updates.
        name: Optional name prefix for the operations created when applying
              gradients. Defaults to "Distributed" followed by the provided
              optimizer type.
        device_dense: Device to be used for dense tensors. Uses GPU by default
                      if Horovod was build with HOROVOD_GPU_ALLREDUCE.
        device_sparse: Device to be used for sparse tensors. Uses GPU by default
                       if Horovod was build with HOROVOD_GPU_ALLGATHER.
    """
    # We dynamically create a new class that inherits from the optimizer that was passed in.
    # The goal is to override get_gradients() method with an allreduce implementation.
    # This class will have the same name as the optimizer it's wrapping, so that the saved
    # model could be easily restored without Horovod.
    cls = type(optimizer.__class__.__name__, (optimizer.__class__,),
               dict(_DistributedOptimizer.__dict__))
    return cls(name, device_dense, device_sparse, **optimizer.get_config()) 
Example 3
Project: evidX   Author: SciKnowEngine   File: keras_spreadsheet_classifier.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, embedding_matrix, max_seq_len, n_classes, num_filters = 64, weight_decay = 1e-4):

        nb_words = embedding_matrix.shape[0]
        embed_dim = embedding_matrix.shape[1]

        self.model = Sequential()
        self.model.add(Embedding(nb_words, embed_dim,
            weights=[embedding_matrix], input_length=max_seq_len, trainable=False))
        self.model.add(Conv1D(num_filters, 7, activation='relu', padding='same'))
        self.model.add(MaxPooling1D(2))
        self.model.add(Conv1D(num_filters, 7, activation='relu', padding='same'))
        self.model.add(GlobalMaxPooling1D())
        self.model.add(Dropout(0.5))
        self.model.add(Dense(32, activation='relu', kernel_regularizer=regularizers.l2(weight_decay)))
        self.model.add(Dense(n_classes, activation='sigmoid'))  #multi-label (k-hot encoding)

        adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        self.model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
        self.model.summary() 
Example 4
Project: CNNArt   Author: thomaskuestner   File: MNetArt.py    Apache License 2.0 6 votes vote down vote up
def fGetOptimizerAndLoss(optimizer, learningRate=0.001, loss='categorical_crossentropy'):
    if optimizer not in ['Adam', 'SGD', 'Adamax', 'Adagrad', 'Adadelta', 'Nadam', 'RMSprop']:
        print('this optimizer does not exist!!!')
        return None
    loss = 'categorical_crossentropy'

    if optimizer == 'Adamax':  # leave the rest as default values
        opti = keras.optimizers.Adamax(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'SGD':
        opti = keras.optimizers.SGD(lr=learningRate, momentum=0.9, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Adagrad':
        opti = keras.optimizers.Adagrad(lr=learningRate)
    elif optimizer == 'Adadelta':
        opti = keras.optimizers.Adadelta(lr=learningRate)
    elif optimizer == 'Adam':
        opti = keras.optimizers.Adam(lr=learningRate, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Nadam':
        opti = keras.optimizers.Nadam(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'RMSprop':
        opti = keras.optimizers.RMSprop(lr=learningRate)
    return opti, loss 
Example 5
Project: CNNArt   Author: thomaskuestner   File: CNN3DmoreLayers.py    Apache License 2.0 6 votes vote down vote up
def fGetOptimizerAndLoss(optimizer, learningRate=0.001, loss='categorical_crossentropy'):
    if optimizer not in ['Adam', 'SGD', 'Adamax', 'Adagrad', 'Adadelta', 'Nadam', 'RMSprop']:
        print('this optimizer does not exist!!!')
        return None
    loss = 'categorical_crossentropy'

    if optimizer == 'Adamax':  # leave the rest as default values
        opti = keras.optimizers.Adamax(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'SGD':
        opti = keras.optimizers.SGD(lr=learningRate, momentum=0.9, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Adagrad':
        opti = keras.optimizers.Adagrad(lr=learningRate)
    elif optimizer == 'Adadelta':
        opti = keras.optimizers.Adadelta(lr=learningRate)
    elif optimizer == 'Adam':
        opti = keras.optimizers.Adam(lr=learningRate, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Nadam':
        opti = keras.optimizers.Nadam(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'RMSprop':
        opti = keras.optimizers.RMSprop(lr=learningRate)
    return opti, loss 
Example 6
Project: CNNArt   Author: thomaskuestner   File: MNetArt.py    Apache License 2.0 6 votes vote down vote up
def fGetOptimizerAndLoss(optimizer, learningRate=0.001, loss='categorical_crossentropy'):
    if optimizer not in ['Adam', 'SGD', 'Adamax', 'Adagrad', 'Adadelta', 'Nadam', 'RMSprop']:
        print('this optimizer does not exist!!!')
        return None
    loss = 'categorical_crossentropy'

    if optimizer == 'Adamax':  # leave the rest as default values
        opti = keras.optimizers.Adamax(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'SGD':
        opti = keras.optimizers.SGD(lr=learningRate, momentum=0.9, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Adagrad':
        opti = keras.optimizers.Adagrad(lr=learningRate)
    elif optimizer == 'Adadelta':
        opti = keras.optimizers.Adadelta(lr=learningRate)
    elif optimizer == 'Adam':
        opti = keras.optimizers.Adam(lr=learningRate, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Nadam':
        opti = keras.optimizers.Nadam(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'RMSprop':
        opti = keras.optimizers.RMSprop(lr=learningRate)
    return opti, loss 
Example 7
Project: CNNArt   Author: thomaskuestner   File: VNetArt.py    Apache License 2.0 6 votes vote down vote up
def fGetOptimizerAndLoss(optimizer, learningRate=0.001, loss='categorical_crossentropy'):
    if optimizer not in ['Adam', 'SGD', 'Adamax', 'Adagrad', 'Adadelta', 'Nadam', 'RMSprop']:
        print('this optimizer does not exist!!!')
        return None
    loss = 'categorical_crossentropy'

    if optimizer == 'Adamax':  # leave the rest as default values
        opti = keras.optimizers.Adamax(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'SGD':
        opti = keras.optimizers.SGD(lr=learningRate, momentum=0.9, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Adagrad':
        opti = keras.optimizers.Adagrad(lr=learningRate)
    elif optimizer == 'Adadelta':
        opti = keras.optimizers.Adadelta(lr=learningRate)
    elif optimizer == 'Adam':
        opti = keras.optimizers.Adam(lr=learningRate, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Nadam':
        opti = keras.optimizers.Nadam(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'RMSprop':
        opti = keras.optimizers.RMSprop(lr=learningRate)
    return opti, loss 
Example 8
Project: CNNArt   Author: thomaskuestner   File: VNetArt.py    Apache License 2.0 6 votes vote down vote up
def fGetOptimizerAndLoss(optimizer, learningRate=0.001, loss='categorical_crossentropy'):
    if optimizer not in ['Adam', 'SGD', 'Adamax', 'Adagrad', 'Adadelta', 'Nadam', 'RMSprop']:
        print('this optimizer does not exist!!!')
        return None
    loss = 'categorical_crossentropy'

    if optimizer == 'Adamax':  # leave the rest as default values
        opti = keras.optimizers.Adamax(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'SGD':
        opti = keras.optimizers.SGD(lr=learningRate, momentum=0.9, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Adagrad':
        opti = keras.optimizers.Adagrad(lr=learningRate)
    elif optimizer == 'Adadelta':
        opti = keras.optimizers.Adadelta(lr=learningRate)
    elif optimizer == 'Adam':
        opti = keras.optimizers.Adam(lr=learningRate, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Nadam':
        opti = keras.optimizers.Nadam(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'RMSprop':
        opti = keras.optimizers.RMSprop(lr=learningRate)
    return opti, loss 
Example 9
Project: CNNArt   Author: thomaskuestner   File: motion_MNetArt.py    Apache License 2.0 6 votes vote down vote up
def fGetOptimizerAndLoss(optimizer,learningRate=0.001, loss='categorical_crossentropy'):
    if optimizer not in ['Adam', 'SGD', 'Adamax', 'Adagrad', 'Adadelta', 'Nadam', 'RMSprop']:
        print('this optimizer does not exist!!!')
        return None
    loss='categorical_crossentropy'

    if optimizer == 'Adamax':  # leave the rest as default values
        opti = keras.optimizers.Adamax(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'SGD':
        opti = keras.optimizers.SGD(lr=learningRate, momentum=0.9, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Adagrad':
        opti = keras.optimizers.Adagrad(lr=learningRate)
    elif optimizer == 'Adadelta':
        opti = keras.optimizers.Adadelta(lr=learningRate)
    elif optimizer == 'Adam':
        opti = keras.optimizers.Adam(lr=learningRate, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Nadam':
        opti = keras.optimizers.Nadam(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'RMSprop':
        opti = keras.optimizers.RMSprop(lr=learningRate)
    return opti, loss 
Example 10
Project: CNNArt   Author: thomaskuestner   File: motion_CNN3D.py    Apache License 2.0 6 votes vote down vote up
def fGetOptimizerAndLoss(optimizer,learningRate=0.001, loss='categorical_crossentropy'):
    if optimizer not in ['Adam', 'SGD', 'Adamax', 'Adagrad', 'Adadelta', 'Nadam', 'RMSprop']:
        print('this optimizer does not exist!!!')
        return None
    loss='categorical_crossentropy'

    if optimizer == 'Adamax':  # leave the rest as default values
        opti = keras.optimizers.Adamax(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'SGD':
        opti = keras.optimizers.SGD(lr=learningRate, momentum=0.9, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Adagrad':
        opti = keras.optimizers.Adagrad(lr=learningRate)
    elif optimizer == 'Adadelta':
        opti = keras.optimizers.Adadelta(lr=learningRate)
    elif optimizer == 'Adam':
        opti = keras.optimizers.Adam(lr=learningRate, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Nadam':
        opti = keras.optimizers.Nadam(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'RMSprop':
        opti = keras.optimizers.RMSprop(lr=learningRate)
    return opti, loss 
Example 11
Project: CNNArt   Author: thomaskuestner   File: motion_CNN3DmoreLayers.py    Apache License 2.0 6 votes vote down vote up
def fGetOptimizerAndLoss(optimizer,learningRate=0.001, loss='categorical_crossentropy'):
    if optimizer not in ['Adam', 'SGD', 'Adamax', 'Adagrad', 'Adadelta', 'Nadam', 'RMSprop']:
        print('this optimizer does not exist!!!')
        return None
    loss='categorical_crossentropy'

    if optimizer == 'Adamax':  # leave the rest as default values
        opti = keras.optimizers.Adamax(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'SGD':
        opti = keras.optimizers.SGD(lr=learningRate, momentum=0.9, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Adagrad':
        opti = keras.optimizers.Adagrad(lr=learningRate)
    elif optimizer == 'Adadelta':
        opti = keras.optimizers.Adadelta(lr=learningRate)
    elif optimizer == 'Adam':
        opti = keras.optimizers.Adam(lr=learningRate, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Nadam':
        opti = keras.optimizers.Nadam(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'RMSprop':
        opti = keras.optimizers.RMSprop(lr=learningRate)
    return opti, loss 
Example 12
Project: CNNArt   Author: thomaskuestner   File: motion_VNetArt.py    Apache License 2.0 6 votes vote down vote up
def fGetOptimizerAndLoss(optimizer,learningRate=0.001, loss='categorical_crossentropy'):
    if optimizer not in ['Adam', 'SGD', 'Adamax', 'Adagrad', 'Adadelta', 'Nadam', 'RMSprop']:
        print('this optimizer does not exist!!!')
        return None
    loss='categorical_crossentropy'

    if optimizer == 'Adamax':  # leave the rest as default values
        opti = keras.optimizers.Adamax(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'SGD':
        opti = keras.optimizers.SGD(lr=learningRate, momentum=0.9, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Adagrad':
        opti = keras.optimizers.Adagrad(lr=learningRate)
    elif optimizer == 'Adadelta':
        opti = keras.optimizers.Adadelta(lr=learningRate)
    elif optimizer == 'Adam':
        opti = keras.optimizers.Adam(lr=learningRate, decay=5e-5)
        loss = 'categorical_crossentropy'
    elif optimizer == 'Nadam':
        opti = keras.optimizers.Nadam(lr=learningRate)
        loss = 'categorical_crossentropy'
    elif optimizer == 'RMSprop':
        opti = keras.optimizers.RMSprop(lr=learningRate)
    return opti, loss 
Example 13
Project: hpac   Author: aghie   File: models.py    MIT License 6 votes vote down vote up
def __init__(self,conf,forms, labels, options):
        
        self.name_classifier = "HP_MLR"
        self.iforms = {w:self.INIT_REAL_INDEX+i for i,w in enumerate(sorted(forms))}
        self.conf = conf
        self.ilabels ={l:i for i,l in enumerate(sorted(labels))}
        
        self.labelsi = {self.ilabels[l]: l for l in self.ilabels}
        
        self.n_labels = len(self.ilabels)
        
        model = Sequential() 
        model.add(Dense(self.n_labels, input_dim=len(self.iforms)+len(self.SPECIAL_INDEXES), activation='softmax')) 
        
        model.compile(loss='categorical_crossentropy',
        optimizer="adam",#keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False),#'sgd',
        metrics=['accuracy'])
    
        self.model = model
        
        self.options = options 
Example 14
Project: deep-nn-car   Author: scope-lab-vu   File: train.py    MIT License 5 votes vote down vote up
def trainModel(model, X, A, Y):
    adam = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
    model.compile(loss='mse', optimizer=adam)
    # checkpoint
    filePath = "weights.best.hdf5"
    checkpoint = ModelCheckpoint(filePath, monitor='loss', verbose=1, save_best_only=True, mode='min')
    callbacks_list = [checkpoint, history]
    model.fit([X,A], Y, epochs=200,batch_size=64, callbacks=callbacks_list, verbose=2) 
Example 15
Project: DeepCCS   Author: plpla   File: DeepCCS.py    GNU General Public License v3.0 5 votes vote down vote up
def create_model(self):
        """
            Builds a neural net using a set of arguments
            """
        if len(self.smiles_encoder.converter) == 0 or len(self.adduct_encoder.converter) ==  0:
            raise ValueError("Encoders must be fit before creating a model.")
        smile_input_layer = Input(shape=(250, len(self.smiles_encoder.converter)), name="smile")
        conv = Conv1D(64, kernel_size=4, activation='relu', kernel_initializer='normal')(smile_input_layer)

        previous = conv
        for i in range(6):
            conv = Conv1D(64, kernel_size=4, activation='relu', kernel_initializer='normal')(previous)
            if i == 5:
                pool = MaxPooling1D(pool_size=2, strides=2)(conv)
            else:
                pool = MaxPooling1D(pool_size=2, strides=1)(conv)
            previous = pool

        flat = Flatten()(previous)
        adduct_input_layer = Input(shape=(len(self.adduct_encoder.converter),), name="adduct")
        remix_layer = keras.layers.concatenate([flat, adduct_input_layer], axis=-1)

        previous = remix_layer
        for i in range(2):
            dense_layer = Dense(384, activation="relu", kernel_initializer='normal')(previous)
            previous = dense_layer

        output = Dense(1, activation="linear")(previous)

        opt = getattr(keras.optimizers, 'adam')
        opt = opt(lr=0.0001)
        model = Model(input=[smile_input_layer, adduct_input_layer], outputs=output)
        model.compile(optimizer=opt, loss='mean_squared_error')

        self.model = model 
Example 16
Project: face_landmark_dnn   Author: junhwanjang   File: train_basic_models.py    MIT License 5 votes vote down vote up
def main():
#        Define X and y
# #        Load data
        PATH = "./data/64_64_1/offset_1.3/"
        X = np.load(PATH + "basic_dataset_img.npz")
        y = np.load(PATH + "basic_dataset_pts.npz")
        X = X['arr_0']
        y = y['arr_0'].reshape(-1, 136)
        

        print("Define X and Y")
        print("=======================================")
        
        # Split train / test dataset
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
        print("Success of getting train / test dataset")
        print("=======================================")
        print("X_train: ", X_train.shape)
        print("y_train: ", y_train.shape)
        print("X_test: ", X_test.shape)
        print("y_test: ", y_test.shape)
        print("=======================================")

        model.compile(loss=smoothL1, optimizer=keras.optimizers.Adam(lr=1e-3), metrics=['mape'])
        print(model.summary())
        # checkpoint
        filepath="./basic_checkpoints/smooth_L1-{epoch:02d}-{val_mean_absolute_percentage_error:.5f}.hdf5"
        checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
        callbacks_list = [checkpoint]
        history = model.fit(X_train, y_train, batch_size=64, epochs=10000, shuffle=True,\
                            verbose=1, validation_data=(X_test, y_test), callbacks=callbacks_list)

        # Save model
        model.save("./model/face_landmark_dnn.h5")
        print("=======================================")
        print("Save Final Model")
        print("=======================================") 
Example 17
Project: sisy   Author: qorrect   File: build.py    Apache License 2.0 5 votes vote down vote up
def _build_optimizer(training):
    optimizer = getattr(keras.optimizers, training.optimizer.optimizer)
    return optimizer(**training.optimizer.parameters) 
Example 18
Project: laughter-detection   Author: jrgillick   File: train_model.py    MIT License 5 votes vote down vote up
def initialize_model():
    model = Sequential()
    model.add(Dense(600, use_bias=True,input_dim=2886))#1924
    model.add(keras.layers.BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Activation("relu"))
    model.add(Dense(100, use_bias=True,input_dim=1924))
    model.add(keras.layers.BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Activation("relu"))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    optimizer = keras.optimizers.Adam()
    model.compile(optimizer=optimizer,loss='binary_crossentropy',metrics=['accuracy'])
    return model 
Example 19
Project: thyroid_segmentation   Author: suryatejadev   File: train.py    MIT License 5 votes vote down vote up
def select_optimizer(optimizer_name, optimizer_args):
    optimizers = {
        'sgd': SGD,
        'rmsprop': RMSprop,
        'adagrad': Adagrad,
        'adadelta': Adadelta,
        'adam': Adam,
        'adamax': Adamax,
        'nadam': Nadam,
    }
    if optimizer_name not in optimizers:
        raise Exception("Unknown optimizer ({}).".format(name))

    return optimizers[optimizer_name](**optimizer_args) 
Example 20
Project: spec-img-finesse   Author: kilinco   File: final.py    MIT License 5 votes vote down vote up
def reset_model():
    model = getModel(X_train[0].shape, numComponents*3)
    prunable_model = convert_to_masked_model(model)
    prunable_model.load_weights(weights_path)
    opt = keras.optimizers.Adam(lr=lr,decay=1e-6)
    prunable_model.compile(loss='categorical_crossentropy',optimizer=opt,metrics=['accuracy'])
    return prunable_model 
Example 21
Project: POEM   Author: Rinoahu   File: deep_operon.py    GNU General Public License v3.0 5 votes vote down vote up
def fit_lstm(self, X_train, y_train, X_test=None, y_test=None):
        self.max_features = 2**12
        #print X_train.shape, y_train.shape
        N, D = X_train.shape
        model = Sequential()
        model.add(Embedding(self.max_features, D))
        #model.add(LSTM(D, dropout=0.2, recurrent_dropout=0.2))
        model.add(Bidirectional(CuDNNGRU(D, return_sequences=True)))

        #model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
        model.add(Dropout(0.2))

        model.add(Bidirectional(CuDNNGRU(D)))
        #model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
        model.add(Dropout(0.2))

        model.add(Dense(1, activation='sigmoid'))

        # try using different optimizers and different optimizer configs
        nb_classes = len(set(y_train))
        loss = nb_classes > 2 and 'categorical_crossentropy' or 'binary_crossentropy'
        #model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[self.metric])
        model.compile(loss=loss, optimizer='adam', metrics=[self.metric])
        print('Train..., loss is %s %s'%(loss, D))
        if type(X_test) != type(None) and type(y_test) != type(None):
            model.fit(X_train, y_train, batch_size=self.batch_size, epochs=self.nb_epoch, validation_data=(X_test, y_test), shuffle=True, callbacks=self.checkpointer)
        else:
            model.fit(X_train, y_train, batch_size=self.batch_size, epochs=self.nb_epoch, validation_data=(X_test, y_test), verbose=1, shuffle=True, validation_split=self.cross_val, callbacks=self.checkpointer)
        score, acc = model.evaluate(X_test, y_test, batch_size=self.batch_size)
        print('Test score:', score)
        print('Test accuracy:', acc)
        self.model_2d = model 
Example 22
Project: POEM   Author: Rinoahu   File: deep_operon.py    GNU General Public License v3.0 5 votes vote down vote up
def fit_lstm(self, X_train, y_train, X_test=None, y_test=None):
        self.max_features = 2**12
        #print X_train.shape, y_train.shape
        N, D = X_train.shape
        model = Sequential()
        model.add(Embedding(self.max_features, D))
        #model.add(LSTM(D, dropout=0.2, recurrent_dropout=0.2))
        model.add(Bidirectional(CuDNNGRU(D, return_sequences=True)))
        #model.add(Bidirectional(GRU(D, return_sequences=True)))
        #model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
        model.add(Dropout(0.2))

        model.add(Bidirectional(CuDNNGRU(D)))
        #model.add(Bidirectional(GRU(D)))

        #model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
        model.add(Dropout(0.2))

        model.add(Dense(1, activation='sigmoid'))

        # try using different optimizers and different optimizer configs
        nb_classes = len(set(y_train))
        loss = nb_classes > 2 and 'categorical_crossentropy' or 'binary_crossentropy'
        #model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[self.metric])
        model.compile(loss=loss, optimizer='adam', metrics=[self.metric])
        print('Train..., loss is %s %s'%(loss, D))
        if type(X_test) != type(None) and type(y_test) != type(None):
            model.fit(X_train, y_train, batch_size=self.batch_size, epochs=self.nb_epoch, validation_data=(X_test, y_test), shuffle=True, callbacks=self.checkpointer)
        else:
            model.fit(X_train, y_train, batch_size=self.batch_size, epochs=self.nb_epoch, validation_data=(X_test, y_test), verbose=1, shuffle=True, validation_split=self.cross_val, callbacks=self.checkpointer)
        score, acc = model.evaluate(X_test, y_test, batch_size=self.batch_size)
        print('Test score:', score)
        print('Test accuracy:', acc)
        self.model_2d = model 
Example 23
Project: neuroseed-mvp   Author: Neuroseed   File: constructor.py    MIT License 5 votes vote down vote up
def compile_model(model, config):
    # compile
    loss = config['loss']
    optimizer_name = config['optimizer']['name']
    optimizer_config = config['optimizer'].get('config', {})
    optimizer = getattr(optimizers, optimizer_name)(**optimizer_config)
    metrics = config.get('metrics', [])
    metrics.append('accuracy')

    # compile keras model
    model.compile(
        loss=loss,
        optimizer=optimizer,
        metrics=metrics) 
Example 24
Project: CNNArt   Author: thomaskuestner   File: MNetArt.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input: X: Samples to predict on. The shape of X should fit to the input shape of the model y: Labels for the
    Samples. Number of Samples should be equal to the number of samples in X sModelPath: (String) full path to a
    trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same
    directory! sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy
    stored. The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the
    suffix '_pred.mat' batchSize: Batchsize, number of samples that are processed at once """
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example 25
Project: CNNArt   Author: thomaskuestner   File: CNN3DmoreLayers.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input: X: Samples to predict on. The shape of X should fit to the input shape of the model y: Labels for the
    Samples. Number of Samples should be equal to the number of samples in X sModelPath: (String) full path to a
    trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same
    directory! sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy
    stored. The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the
    suffix '_pred.mat' batchSize: Batchsize, number of samples that are processed at once """
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example 26
Project: CNNArt   Author: thomaskuestner   File: CNN3D.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input: X: Samples to predict on. The shape of X should fit to the input shape of the model y: Labels for the
    Samples. Number of Samples should be equal to the number of samples in X sModelPath: (String) full path to a
    trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same
    directory! sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy
    stored. The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the
    suffix '_pred.mat' batchSize: Batchsize, number of samples that are processed at once """
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example 27
Project: CNNArt   Author: thomaskuestner   File: MNetArt.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input:
        X: Samples to predict on. The shape of X should fit to the input shape of the model
        y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
        sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
        sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                    The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
        batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example 28
Project: CNNArt   Author: thomaskuestner   File: VNetArt.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input: X: Samples to predict on. The shape of X should fit to the input shape of the model y: Labels for the
    Samples. Number of Samples should be equal to the number of samples in X sModelPath: (String) full path to a
    trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same
    directory! sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy
    stored. The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the
    suffix '_pred.mat' batchSize: Batchsize, number of samples that are processed at once """
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example 29
Project: CNNArt   Author: thomaskuestner   File: 3D_CNN.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input:
        X: Samples to predict on. The shape of X should fit to the input shape of the model
        y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
        sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
        sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                    The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
        batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example 30
Project: CNNArt   Author: thomaskuestner   File: 2D_CNN.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    # takes the .mat file as a string

    sModelPath = sModelPath.replace(".mat", "")
    # sModelPath = sModelPath.replace("_json", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '.json'
    model_all = sModelPath + '_model.h5'

    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('score:' + str(score_test) + 'acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)

    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print(modelSave)
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example 31
Project: CNNArt   Author: thomaskuestner   File: 3D_VResFCN_Upsampling_small_single.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X_test, y=None, Y_segMasks_test=None, sModelPath=None, sOutPath=None, batch_size=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input: X: Samples to predict on. The shape of X should fit to the input shape of the model y: Labels for the
    Samples. Number of Samples should be equal to the number of samples in X sModelPath: (String) full path to a
    trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same
    directory! sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy
    stored. The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the
    suffix '_pred.mat' batchSize: Batchsize, number of samples that are processed at once """

    X_test = np.expand_dims(X_test, axis=-1)
    Y_segMasks_test_foreground = np.expand_dims(Y_segMasks_test, axis=-1)
    Y_segMasks_test_background = np.ones(Y_segMasks_test_foreground.shape) - Y_segMasks_test_foreground
    Y_segMasks_test = np.concatenate((Y_segMasks_test_background, Y_segMasks_test_foreground), axis=-1)

    _, sPath = os.path.splitdrive(sModelPath)
    sPath, sFilename = os.path.split(sPath)
    sFilename, sExt = os.path.splitext(sFilename)

    listdir = os.listdir(sModelPath)

    # load weights and model (new way)
    with open(sModelPath + os.sep + sFilename + '.json', 'r') as fp:
        model_string = fp.read()

    model = model_from_json(model_string)

    model.summary()

    model.compile(loss=dice_coef_loss, optimizer=keras.optimizers.Adam(), metrics=[dice_coef])
    model.load_weights(sModelPath + os.sep + sFilename + '_weights.h5')

    score_test, acc_test = model.evaluate(X_test, Y_segMasks_test, batch_size=2)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))

    prob_pre = model.predict(X_test, batch_size=batch_size, verbose=1)

    predictions = {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}

    return predictions 
Example 32
Project: CNNArt   Author: thomaskuestner   File: multiclass_3D_SE-DenseNet-BC.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input: X: Samples to predict on. The shape of X should fit to the input shape of the model y: Labels for the
    Samples. Number of Samples should be equal to the number of samples in X sModelPath: (String) full path to a
    trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same
    directory! sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy
    stored. The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the
    suffix '_pred.mat' batchSize: Batchsize, number of samples that are processed at once """
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 33
Project: CNNArt   Author: thomaskuestner   File: multiclass_3D_ResNet.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input: X: Samples to predict on. The shape of X should fit to the input shape of the model y: Labels for the
    Samples. Number of Samples should be equal to the number of samples in X sModelPath: (String) full path to a
    trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same
    directory! sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy
    stored. The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the
    suffix '_pred.mat' batchSize: Batchsize, number of samples that are processed at once """
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 34
Project: CNNArt   Author: thomaskuestner   File: multiclass_3D_SE-DenseNet.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input: X: Samples to predict on. The shape of X should fit to the input shape of the model y: Labels for the
    Samples. Number of Samples should be equal to the number of samples in X sModelPath: (String) full path to a
    trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same
    directory! sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy
    stored. The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the
    suffix '_pred.mat' batchSize: Batchsize, number of samples that are processed at once """
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 35
Project: CNNArt   Author: thomaskuestner   File: multiclass_SE-ResNet-50.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X,y,  sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 36
Project: CNNArt   Author: thomaskuestner   File: multiclass_SE-ResNet-44_dense.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X,y,  sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 37
Project: CNNArt   Author: thomaskuestner   File: multiclass_SE-DenseNet-34.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 38
Project: CNNArt   Author: thomaskuestner   File: multiclass_SE-DenseNet-BC-100.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X,y,  sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 39
Project: CNNArt   Author: thomaskuestner   File: multiclass_ResNet-50.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X,y,  sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 40
Project: CNNArt   Author: thomaskuestner   File: multiclass_ResNet-56.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X,y,  sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 41
Project: CNNArt   Author: thomaskuestner   File: motion_MNetArt.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X,y,  sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input:
        X: Samples to predict on. The shape of X should fit to the input shape of the model
        y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
        sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
        sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                    The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
        batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath= sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json= open(model_json, 'r')
    model_string=model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy',optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)


    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss'+str(score_test)+ '   acc:'+ str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14,:])
    _,sModelFileSave  = os.path.split(sModelPath)

    modelSave = sOutPath +sModelFileSave+ '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example 42
Project: CNNArt   Author: thomaskuestner   File: motion_CNN3D.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X,y,  sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input:
        X: Samples to predict on. The shape of X should fit to the input shape of the model
        y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
        sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
        sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                    The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
        batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath=sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json= open(model_json, 'r')
    model_string=model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy',optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)


    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss'+str(score_test)+ '   acc:'+ str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14,:])
    _,sModelFileSave  = os.path.split(sModelPath)

    modelSave = sOutPath +sModelFileSave+ '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example 43
Project: CNNArt   Author: thomaskuestner   File: motion_all_CNN2D_multiscale.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X_test, y_test, model_name, sOutPath, batchSize, X_test_p2=None, y_test_p2=None,  patchSize=[]):
    weight_name = sOutPath + '/' + model_name + '_weights.h5'
    model_json = sOutPath + '/' + model_name + '_json'
    model_all = sOutPath + '/' + model_name + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    # model = createModel(patchSize, patchSize_down=patchSize_down, ScaleFactor=ScaleFactor)
    # opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    # callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1)]

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    X_test = np.expand_dims(X_test, axis=1)
    y_test = np.asarray([y_test[:], np.abs(np.asarray(y_test[:], dtype=np.float32) - 1)]).T
    X_test_p2 = np.expand_dims(X_test_p2, axis=1)
    y_test_p2 = np.asarray([y_test_p2[:], np.abs(np.asarray(y_test_p2[:], dtype=np.float32) - 1)]).T

    test_loss, p1_loss, p2_loss, p1_acc, p2_acc = model.evaluate([X_test, X_test_p2], [y_test, y_test_p2], batch_size=batchSize, verbose=1)
    print('p1_loss:' + str(p1_loss) + '   p1_acc:' + str(p1_acc) + '   p2_loss:' + str(p2_loss) + '   p2_acc:' + str(p2_acc))
    prob_pre = model.predict([X_test,X_test_p2], batch_size=batchSize, verbose=1)

    # modelSave = model_name[:-5] + '_pred.mat'
    modelSave = sOutPath + '/' + model_name + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'p1_loss': p1_loss, 'p1_acc': p1_acc, 'p2_loss': p2_loss, 'p2_acc': p2_acc})
    #model.save(model_all)

## helper functions 
Example 44
Project: CNNArt   Author: thomaskuestner   File: motion_vgg_CNN2D.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X_test, y_test, model_name, sOutPath, patchSize, batchSize):
    # weight_name = sOutPath + '/' + model_name + '_weights.h5'
    # model_json = sOutPath + model_name + '_json'
    model_all = sOutPath + model_name + '_model.h5'

    # model = createModel(patchSize)
    # opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    # callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1)]
    #
    # model.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])
    # model.load_weights(weight_name)

    # load complete model (including weights); keras > 0.7
    model = load_model(model_all)

    # assume artifact affected shall be tested!
    # y_test = np.ones((len(X_test),1))

    X_test = np.expand_dims(X_test, axis=1)
    y_test = np.asarray([y_test[:], np.abs(np.asarray(y_test[:], dtype=np.float32) - 1)]).T

    score_test, acc_test = model.evaluate(X_test, y_test, batch_size=batchSize)
    prob_pre = model.predict(X_test, batchSize, 1)

    # modelSave = model_name[:-5] + '_pred.mat'
    modelSave = sOutPath + '/' + model_name + '_pred.mat'
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example 45
Project: CNNArt   Author: thomaskuestner   File: motion_VNetArt.py    Apache License 2.0 5 votes vote down vote up
def fPredict(X_test,y_test,  model_name, sOutPath, batchSize=64,patchSize=[40,40,5]):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input:
        X: Samples to predict on. The shape of X should fit to the input shape of the model
        y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
        sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
        sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                    The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
        batchSize: Batchsize, number of samples that are processed at once"""
    weight_name = sOutPath + '/' + model_name + '_weights.h5'
    model_json = sOutPath + '/' + model_name + '_json.txt'
    model_all = sOutPath + '/' + model_name + '_model.h5'

    # load weights and model (new way)
    model_json= open(model_json, 'r')
    model_string=model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy',optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    X_test = np.expand_dims(X_test, axis=1)
    y_test = np.asarray([y_test[:], np.abs(np.asarray(y_test[:], dtype=np.float32) - 1)]).T

    score_test, acc_test = model.evaluate(X_test, y_test, batch_size=batchSize)
    print('loss'+str(score_test)+ '   acc:'+ str(acc_test))
    prob_pre = model.predict(X_test, batch_size=batchSize, verbose=1)

    modelSave = sOutPath + '/' + model_name + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example 46
Project: cnn_fire   Author: aleixo   File: cnn_architectures.py    GNU General Public License v3.0 5 votes vote down vote up
def smallCustomArch(numChannels, imgRows, imgCols, numClasses):

        model = Sequential()
        model.add(Convolution2D(32, (3, 3), input_shape=(numChannels, imgRows, imgCols)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Convolution2D(32, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Convolution2D(64, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())  
        model.add(Dense(64))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(numClasses))
        model.add(Activation('softmax'))

        adadelta = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)

        model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['accuracy'])

        return model 
Example 47
Project: cnn_fire   Author: aleixo   File: cnn_architectures.py    GNU General Public License v3.0 5 votes vote down vote up
def miniVGGNet(numChannels, imgRows, imgCols, numClasses):

        model = Sequential()
        model.add(Convolution2D(32, 3, 3, border_mode="same",input_shape=(numChannels, imgRows, imgCols)))
        model.add(Activation("relu"))
        model.add(Convolution2D(32, 3, 3))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))  
    
        model.add(Convolution2D(64, 3, 3, border_mode="same"))
        model.add(Activation("relu"))
        model.add(Convolution2D(64, 3, 3))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
    
        model.add(Flatten())
        model.add(Dense(512))
        model.add(Activation("relu"))
        model.add(Dropout(0.5))
        model.add(Dense(numClasses))
        
        adadelta = keras.optimizers.Adadelta(lr=0.1, rho=0.95, epsilon=1e-08, decay=0.0)

        model.add(Activation("softmax"))
        model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['accuracy'])
        
        return model 
Example 48
Project: DrivenData-Identify-Fish-Challenge-2nd-Place-Solution   Author: ZFTurbo   File: a02_zoo.py    GNU General Public License v3.0 5 votes vote down vote up
def get_optim(cnn_type, optim_type, learning_rate=-1):
    from keras.optimizers import SGD
    from keras.optimizers import Adam

    if learning_rate == -1:
        lr = get_learning_rate(cnn_type)
    else:
        lr = learning_rate
    if optim_type == 'Adam':
        optim = Adam(lr=lr)
    else:
        optim = SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True)
    return optim 
Example 49
Project: hpac   Author: aghie   File: models.py    MIT License 5 votes vote down vote up
def __init__(self,conf, vocab,labels, options):
        
        self.name_classifier = "HP_MLP"
        self.conf = conf
        self.ilabels ={l:i for i,l in enumerate(sorted(labels))}
        self.n_labels = len(self.ilabels)
        self.labelsi = {self.ilabels[l]: l for l in self.ilabels}
        self.iforms = {w:self.INIT_REAL_INDEX+i for i,w in enumerate(sorted(vocab))}
        input_iw = Input(shape=(len(self.iforms)+len(self.SPECIAL_INDEXES),), name="input", dtype='float32')
        
        x = input_iw
        for l in range(0,int(self.conf[LAYERS])):
            x = Dense(int(self.conf[NEURONS]))(x)
            x = Dropout(float(self.conf[DROPOUT]))(x)          
            x = Activation('relu')(x)
    
        x = Dense(self.n_labels)(x) 
        output = Activation('softmax', name='output')(x)
        
        model = Model(inputs = [input_iw], outputs = [output])
              
        model.compile(loss="categorical_crossentropy",
                    optimizer="adam",#keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=1e-6, nesterov=False),
                    metrics=['accuracy'])           
        
        self.model = model

        self.options = options 
Example 50
Project: Sound_event_detection   Author: Kikyo-16   File: metricCallback.py    MIT License 5 votes vote down vote up
def get_opt(self, lr):
		""""
		Optimizer with specified learning rate.
		Args:
			lr: float
				learning rate
		Return:
			opt: keras.optimizers
				Adam optimizer
		"""
		opt = keras.optimizers.Adam(lr = lr, beta_1 = 0.9, 
			beta_2 = 0.999, epsilon = 1e-8, decay = 1e-8)
		return opt 
Example 51
Project: MUSE-EEG-DeepLearning   Author: MikeMpapa   File: train_deepModel.py    MIT License 5 votes vote down vote up
def Inception( im_h, im_w,num_classes, batch_size,epochs, lr=0.01, decay=1e-6, momentum=0.9, nesterov=True):
    base_model = keras.applications.inception_v3.InceptionV3(include_top=False, weights='imagenet',input_shape=(im_h, im_w,3))
    #top_model = Sequential()
     # add a global spatial average pooling layer
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    # let's add a fully-connected layer
    x = Dense(1024, activation='tanh')(x)
    # and a logistic layer -- let's say we have 200 classes
    predictions = Dense(num_classes, activation='softmax')(x)

    # this is the model we will train
    model = Model(inputs=base_model.input, outputs=predictions)

    # first: train only the top layers (which were randomly initialized)
    # i.e. freeze all convolutional InceptionV3 layers
    for layer in model.layers:
        layer.trainable = False
    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.5, nesterov=True)
    rms = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
    # compile the model (should be done *after* setting layers to non-trainable)
    model.compile(optimizer=sgd, loss='binary_crossentropy',metrics=['accuracy'])


    #top_model.add(Flatten(input_shape=model.output_shape[1:]))
    #top_model.add(Dense(256, activation='relu'))
    #top_model.add(Dropout(0.5))
    #top_model.add(Dense(1, activation='sigmoid'))
    #model.add(top_model)
    #model.compile(loss='binary_crossentropy',
     #         optimizer=SGD(lr=1e-4, momentum=0.9),
    #metrics=['accuracy'])
    return  model 
Example 52
Project: cactus-maml   Author: kylehkhsu   File: baselines.py    MIT License 4 votes vote down vote up
def embedding_mlp(num_classes=FLAGS.way, num_shots=FLAGS.shot, num_tasks=FLAGS.num_tasks,
                  num_encoding_dims=FLAGS.num_encoding_dims, test_set=FLAGS.test_set, dataset=FLAGS.dataset,
                  units=FLAGS.units, dropout=FLAGS.dropout):
    import keras
    from keras.layers import Dense, Dropout
    from keras.losses import categorical_crossentropy
    from keras.callbacks import EarlyStopping
    from keras import backend as K

    if dataset != 'celeba':
        _, _, _, X_test, Y_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
        task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
        partition = task_generator.get_partition_from_labels(Y_test)
        partitions = [partition]
    else:
        _, _, _, X_test, attributes_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
        task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
        partitions = task_generator.get_celeba_task_pool(attributes_test)
    tasks = task_generator.get_tasks(num_tasks=num_tasks, partitions=partitions)

    train_accuracies, test_accuracies = [], []

    start = time.time()
    for i_task, task in enumerate(tqdm(tasks)):
        if (i_task + 1) % (num_tasks // 10) == 0:
            tqdm.write('test {}, accuracy {:.5}'.format(i_task + 1, np.mean(test_accuracies)))
        ind_train_few, Y_train_few, ind_test_few, Y_test_few = task
        Z_train_few, Z_test_few = Z_test[ind_train_few], Z_test[ind_test_few]
        Y_train_few, Y_test_few = keras.utils.to_categorical(Y_train_few, num_classes=num_classes), keras.utils.to_categorical(Y_test_few, num_classes=num_classes)

        model = keras.Sequential()
        model.add(Dense(units=units, activation='relu', input_dim=Z_train_few.shape[1]))
        model.add(Dropout(rate=dropout))
        model.add(Dense(units=num_classes, activation='softmax'))
        model.compile(loss=categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
        early_stopping = EarlyStopping(monitor='val_loss', patience=2)
        model.fit(Z_train_few, Y_train_few, batch_size=Z_train_few.shape[0], epochs=500, verbose=0, validation_data=(Z_test_few, Y_test_few), callbacks=[early_stopping])
        train_score = model.evaluate(Z_train_few, Y_train_few, verbose=0)
        train_accuracies.append(train_score[1])
        test_score = model.evaluate(Z_test_few, Y_test_few, verbose=0)
        test_accuracies.append(test_score[1])
        K.clear_session()

    print('units={}, dropout={}'.format(units, dropout))
    print('{}-way {}-shot embedding mlp: {:.5} with 95% CI {:.5} over {} tests'.format(num_classes, num_shots, np.mean(test_accuracies), 1.96*np.std(test_accuracies)/np.sqrt(num_tasks), num_tasks))
    print('Mean training accuracy: {:.5}; standard deviation: {:.5}'.format(np.mean(train_accuracies), np.std(train_accuracies)))
    print('{} few-shot classification tasks: {:.5} seconds.'.format(num_tasks, time.time() - start)) 
Example 53
Project: DeepFashion   Author: abhishekrana   File: train_multi_v2.py    Apache License 2.0 4 votes vote down vote up
def get_optimizer(optimizer='Adagrad', lr=None, decay=0.0, momentum=0.0):

    if optimizer == 'SGD':
        if lr is None:
            lr = 0.01
        optimizer_mod = keras.optimizers.SGD(lr=lr, momentum=momentum, decay=decay, nesterov=False)

    elif optimizer == 'RMSprop':
        if lr is None:
            lr = 0.001
        optimizer_mod = keras.optimizers.RMSprop(lr=lr, rho=0.9, epsilon=1e-08, decay=decay)

    elif optimizer == 'Adagrad':
        if lr is None:
            lr = 0.01
        optimizer_mod = keras.optimizers.Adagrad(lr=lr, epsilon=1e-08, decay=decay)

    elif optimizer == 'Adadelta':
        if lr is None:
            lr = 1.0
        optimizer_mod = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)

    elif optimizer == 'Adam':
        if lr is None:
            lr = 0.001
        optimizer_mod = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    elif optimizer == 'Adamax':
        if lr is None:
            lr = 0.002
        optimizer_mod = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    elif optimizer == 'Nadam':
        if lr is None:
            lr = 0.002
        optimizer_mod = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)

    else:
        logging.error('Unknown optimizer {}'.format(optimizer))
        exit(1)

    # logging.debug('lr {}'.format(lr))
    # logging.debug('momentum {}'.format(momentum))
    # logging.debug('decay {}'.format(decay))
    # logging.debug('optimizer_mod {}'.format(optimizer_mod))

    return optimizer_mod, lr 
Example 54
Project: DeepFashion   Author: abhishekrana   File: train_multi.py    Apache License 2.0 4 votes vote down vote up
def get_optimizer(optimizer='Adagrad', lr=None, decay=0.0, momentum=0.0):

    if optimizer == 'SGD':
        if lr is None:
            lr = 0.01
        optimizer_mod = keras.optimizers.SGD(lr=lr, momentum=momentum, decay=decay, nesterov=False)

    elif optimizer == 'RMSprop':
        if lr is None:
            lr = 0.001
        optimizer_mod = keras.optimizers.RMSprop(lr=lr, rho=0.9, epsilon=1e-08, decay=decay)

    elif optimizer == 'Adagrad':
        if lr is None:
            lr = 0.01
        optimizer_mod = keras.optimizers.Adagrad(lr=lr, epsilon=1e-08, decay=decay)

    elif optimizer == 'Adadelta':
        if lr is None:
            lr = 1.0
        optimizer_mod = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)

    elif optimizer == 'Adam':
        if lr is None:
            lr = 0.001
        optimizer_mod = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    elif optimizer == 'Adamax':
        if lr is None:
            lr = 0.002
        optimizer_mod = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    elif optimizer == 'Nadam':
        if lr is None:
            lr = 0.002
        optimizer_mod = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)

    else:
        logging.error('Unknown optimizer {}'.format(optimizer))
        exit(1)

    # logging.debug('lr {}'.format(lr))
    # logging.debug('momentum {}'.format(momentum))
    # logging.debug('decay {}'.format(decay))
    # logging.debug('optimizer_mod {}'.format(optimizer_mod))

    return optimizer_mod, lr 
Example 55
Project: kaggle_dsb   Author: syagev   File: classifier.py    Apache License 2.0 4 votes vote down vote up
def train_ensemble(trainset, valset, path_data, path_session, hyper_param):
    """Train an ensemble of models per set of hyper param.

    Args:
        trainset, valset: training and validation sets from `split_train_val()`
        path_data: /path/to/train_detections.hdf5
        path_session: string specifying the session's output path
        hyper_param: dictionary with entries as follows -
                        * epochs: number of epochs
                        * batch_sz: batch size in training
                        * batch_norm: do batch normalization?
                        * optimizer: a keras.optimizers beast
                        * lr_scheduler: a keras.callback.LearningRateScheduler

    """

    models = []
    for i, batch_sz in enumerate(hyper_param["batch_sz"]):
        for j, optimizer in enumerate(hyper_param["optimizers"]):
            for k, lr_param in enumerate(hyper_param["lr_scheduler_param"]):
                for l, dropout_rate in enumerate(hyper_param["dropout_rate"]):
                    for m, batch_norm in enumerate(hyper_param["batch_norm"]):
                        for n, pool_type in enumerate(hyper_param["pool_type"]):

                            # prepare the tasks' hyper param
                            hyper_param_ = {
                                "epochs": hyper_param["epochs"],
                                "batch_sz": batch_sz,
                                "optimizer": optimizer,
                                "lr_schedule": make_lr_scheduler(*lr_param),
                                "dropout_rate": dropout_rate,
                                "batch_norm": batch_norm,
                                "pool_type": pool_type
                                }

                            # task's path
                            session_id_ = "{}.{}_{}_{}_{}_{}_{}". \
                            format(os.path.basename(path_session),
                                   i, j, k, l, m, n)
                            path_session_ = os.path.join(path_session,
                                                         session_id_)
                            if not os.path.exists(path_session_):
                                os.mkdir(path_session_)

                            # train
                            models.append(train(
                                trainset,
                                valset,
                                path_data,
                                path_session_,
                                hyper_param_))
   
    # sort by validation loss
    return models.sort(key=lambda tuple: tuple[1]) 
Example 56
Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning_reg.py    MIT License 4 votes vote down vote up
def train_model(self,file_list,labels,n_fold=5,batch_size=16,epochs=40,dim=224,lr=1e-5,model='ResNet50'):
		model_save_dest = {}
		k = 0
		kf = KFold(n_splits=n_fold, random_state=0, shuffle=True)

		for train_index,test_index in kf.split(file_list):


			k += 1
			file_list = np.array(file_list)
			labels   = np.array(labels)
			train_files,train_labels  = file_list[train_index],labels[train_index]
			val_files,val_labels  = file_list[test_index],labels[test_index]
			
			if model == 'Resnet50':
				model_final = self.resnet_pseudo(dim=224,freeze_layers=10,full_freeze='N')
			
			if model == 'VGG16':
				model_final = self.VGG16_pseudo(dim=224,freeze_layers=10,full_freeze='N') 
			
			if model == 'InceptionV3':
				model_final = self.inception_pseudo(dim=224,freeze_layers=10,full_freeze='N')
				
			adam = optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
			model_final.compile(optimizer=adam, loss=["mse"],metrics=['mse'])
			reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.50,patience=3, min_lr=0.000001)
			early = EarlyStopping(monitor='val_loss', patience=10, mode='min', verbose=1)
			logger = CSVLogger('keras-5fold-run-01-v1-epochs_ib.log', separator=',', append=False)
			checkpoint = ModelCheckpoint(
								'kera1-5fold-run-01-v1-fold-' + str('%02d' % (k + 1)) + '-run-' + str('%02d' % (1 + 1)) + '.check',
								monitor='val_loss', mode='min',
								save_best_only=True,
								verbose=1) 
			callbacks = [reduce_lr,early,checkpoint,logger]
			train_gen = DataGenerator(train_files,train_labels,batch_size=32,n_classes=len(self.class_folders),dim=(self.dim,self.dim,3),shuffle=True)
			val_gen = DataGenerator(val_files,val_labels,batch_size=32,n_classes=len(self.class_folders),dim=(self.dim,self.dim,3),shuffle=True)
			model_final.fit_generator(train_gen,epochs=epochs,verbose=1,validation_data=(val_gen),callbacks=callbacks)
			model_name = 'kera1-5fold-run-01-v1-fold-' + str('%02d' % (k + 1)) + '-run-' + str('%02d' % (1 + 1)) + '.check'
			del model_final
			f = h5py.File(model_name, 'r+')
			del f['optimizer_weights']
			f.close()
			model_final = keras.models.load_model(model_name)
			model_name1 = self.outdir + str(model) + '___' + str(k) 
			model_final.save(model_name1)
			model_save_dest[k] = model_name1
				
		return model_save_dest

	# Hold out dataset validation function 
Example 57
Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning_ffd.py    MIT License 4 votes vote down vote up
def train_model(self,train_dir,val_dir,n_fold=5,batch_size=16,epochs=40,dim=224,lr=1e-5,model='ResNet50'):
        if model == 'Resnet50':
            model_final = self.resnet_pseudo(dim=224,freeze_layers=10,full_freeze='N')
        if model == 'VGG16':
            model_final = self.VGG16_pseudo(dim=224,freeze_layers=10,full_freeze='N') 
        if model == 'InceptionV3':
            model_final = self.inception_pseudo(dim=224,freeze_layers=10,full_freeze='N')
            
        train_file_names = glob.glob(f'{train_dir}/*/*')
        val_file_names = glob.glob(f'{val_dir}/*/*')
        train_steps_per_epoch = len(train_file_names)/float(batch_size)
        val_steps_per_epoch = len(val_file_names)/float(batch_size)
        train_datagen = ImageDataGenerator(horizontal_flip = True,vertical_flip = True,width_shift_range = 0.1,height_shift_range = 0.1,
                channel_shift_range=0,zoom_range = 0.2,rotation_range = 20,preprocessing_function=pre_process)
        val_datagen = ImageDataGenerator(preprocessing_function=pre_process)
        train_generator = train_datagen.flow_from_directory(train_dir,
        target_size=(dim,dim),
        batch_size=batch_size,
        class_mode='categorical')
        val_generator = val_datagen.flow_from_directory(val_dir,
        target_size=(dim,dim),
        batch_size=batch_size,
        class_mode='categorical')
        print(train_generator.class_indices)
        joblib.dump(train_generator.class_indices,f'{self.outdir}/class_indices.pkl')
        adam = optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        model_final.compile(optimizer=adam, loss=["categorical_crossentropy"],metrics=['accuracy'])
        reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.50,patience=3, min_lr=0.000001)
        early = EarlyStopping(monitor='val_loss', patience=10, mode='min', verbose=1)
        logger = CSVLogger(f'{self.outdir}/keras-epochs_ib.log', separator=',', append=False)
        model_name = f'{self.outdir}/keras_transfer_learning-run.check'
        checkpoint = ModelCheckpoint(
                model_name,
                monitor='val_loss', mode='min',
                save_best_only=True,
                verbose=1) 
        callbacks = [reduce_lr,early,checkpoint,logger]
        model_final.fit_generator(train_generator,steps_per_epoch=train_steps_per_epoch,epochs=epochs,verbose=1,validation_data=(val_generator),validation_steps=val_steps_per_epoch,callbacks=callbacks,
                                                                                                                  class_weight={0:0.012,1:0.12,2:0.058,3:0.36,4:0.43})
        #model_final.fit_generator(train_generator,steps_per_epoch=1,epochs=epochs,verbose=1,validation_data=(val_generator),validation_steps=1,callbacks=callbacks)
        
        del model_final
        f = h5py.File(model_name, 'r+')
        del f['optimizer_weights']
        f.close()
        model_final = keras.models.load_model(model_name)
        model_to_store_path = f'{self.outdir}/{model}' 
        model_final.save(model_to_store_path)
        return model_to_store_path,train_generator.class_indices

# Hold out dataset validation function 
Example 58
Project: training_results_v0.6   Author: mlperf   File: __init__.py    Apache License 2.0 4 votes vote down vote up
def load_model(filepath, custom_optimizers=None, custom_objects=None):
    """
    Loads a saved Keras model with a Horovod DistributedOptimizer.

    The DistributedOptimizer will wrap the underlying optimizer used to train
    the saved model, so that the optimizer state (params and weights) will
    be picked up for retraining.

    By default, all optimizers in the module `keras.optimizers` will be loaded
    and wrapped without needing to specify any `custom_optimizers` or
    `custom_objects`.

    # Arguments
        filepath: One of the following:
            - string, path to the saved model, or
            - h5py.File object from which to load the model
        custom_optimizers: Optional list of Optimizer subclasses to support
            during loading.
        custom_objects: Optional dictionary mapping names (strings) to custom
            classes or functions to be considered during deserialization.

    # Returns
        A Keras model instance.

    # Raises
        ImportError: If h5py is not available.
        ValueError: In case of an invalid savefile.
    """
    def wrap_optimizer(cls):
        return lambda **kwargs: DistributedOptimizer(cls(**kwargs))

    horovod_objects = {
        subclass.__name__.lower(): wrap_optimizer(subclass)
        for subclass in keras.optimizers.Optimizer.__subclasses__()
        if subclass.__module__ == 'keras.optimizers'
    }

    if custom_optimizers is not None:
        horovod_objects.update({
            cls.__name__: wrap_optimizer(cls)
            for cls in custom_optimizers
        })

    if custom_objects is not None:
        horovod_objects.update(custom_objects)

    return keras.models.load_model(filepath, custom_objects=horovod_objects) 
Example 59
Project: CodeNeuron   Author: vmarkovtsev   File: train_model.py    MIT License 4 votes vote down vote up
def create_char_rnn_model(args: argparse.Namespace, classes: int,
                          weights: Optional[List[numpy.ndarray]] = None):
    # this late import prevents from loading Tensorflow too soon
    import tensorflow as tf
    tf.set_random_seed(args.seed)
    from keras import layers, models, initializers, optimizers, metrics
    log = logging.getLogger("model")
    if args.devices:
        dev1, dev2 = ("/gpu:" + dev for dev in args.devices.split(","))
    else:
        dev1 = dev2 = "/cpu:0"

    def add_rnn(device):
        with tf.device(device):
            input = layers.Input(batch_shape=(args.batch_size, args.length), dtype="uint8")
            log.info("Added %s", input)
            embedding = layers.Embedding(
                200, 200, embeddings_initializer=initializers.Identity(), trainable=False)(input)
            log.info("Added %s", embedding)
        layer = embedding
        layer_sizes = [int(n) for n in args.layers.split(",")]
        for i, nn in enumerate(layer_sizes):
            with tf.device(device):
                layer_type = getattr(layers, args.type)
                ret_seqs = (i < len(layer_sizes) - 1)
                try:
                    layer = layer_type(nn, return_sequences=ret_seqs, implementation=2)(layer)
                except TypeError:
                    # implementation kwarg is not present in CuDNN layers
                    layer = layer_type(nn, return_sequences=ret_seqs)(layer)
                log.info("Added %s", layer)
            if args.dropout > 0:
                layer = layers.Dropout(args.dropout)(layer)
                log.info("Added %s", layer)
        return input, layer

    forward_input, forward_output = add_rnn(dev1)
    reverse_input, reverse_output = add_rnn(dev2)
    with tf.device(dev1):
        merged = layers.Concatenate()([forward_output, reverse_output])
        log.info("Added %s", merged)
        dense = layers.Dense(classes, activation="softmax")
        decision = dense(merged)
        log.info("Added %s", decision)
    optimizer = getattr(optimizers, args.optimizer)(lr=args.lr, decay=args.decay)
    log.info("Added %s", optimizer)
    model = models.Model(inputs=[forward_input, reverse_input], outputs=[decision])
    log.info("Compiling...")
    model.compile(optimizer=optimizer, loss="categorical_crossentropy",
                  metrics=[metrics.categorical_accuracy, metrics.top_k_categorical_accuracy])
    if weights:
        log.info("Setting weights...")
        dense_weights = dense.get_weights()
        weights[-len(dense_weights):] = dense_weights
        model.set_weights(weights)
    log.info("Done")
    return model 
Example 60
Project: cascor   Author: lxc-xx   File: mlp.py    GNU General Public License v2.0 4 votes vote down vote up
def main():

    with open('./two_spiral.pkl','rb') as f:
        data = pkl.load(f)

    batch_size = 100
    nb_classes = 2
    nb_epoch = 1000

    X_train = data['X']
    Y_train = data['Y']

    Xmax = np.max(np.abs(X_train))

    #X_train = X_train/Xmax
    Y_train = np_utils.to_categorical(Y_train, nb_classes)

    model = Sequential()
    model.add(Dense(10, input_shape=(2,)))
    #model.add(Activation('sigmoid'))
    #model.add(Dropout(0.2))
    model.add(Dense(10))
    model.add(Activation('sigmoid'))
    #model.add(Dropout(0.2))
    model.add(Dense(2))
    model.add(Activation('softmax'))

    #opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
    opt = RMSprop()
    #opt = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-6)
    #opt = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-6)
    #opt = keras.optimizers.SGD(lr=0.01, momentum=0., decay=0., nesterov=False)

    model.compile(loss='categorical_crossentropy', optimizer=opt)
    model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_data=(X_train, Y_train))
    #score = model.evaluate(X_train, Y_train, show_accuracy=True, verbose=0)
    Y_pred = model.predict(X_train)
    print Y_pred
    print Y_train - Y_pred


    return 
Example 61
Project: intelligent-annotation   Author: shibing624   File: allconv.py    Apache License 2.0 4 votes vote down vote up
def build_model(self, X):
        # assumes that data axis order is same as the backend
        input_shape = X.shape[1:]
        np.random.seed(self.random_state)
        tf.set_random_seed(self.random_state)

        model = Sequential()
        model.add(Conv2D(96, (3, 3), padding='same',
                         input_shape=input_shape, name='conv1'))
        model.add(Activation('relu'))
        model.add(Conv2D(96, (3, 3), name='conv2', padding='same'))
        model.add(Activation('relu'))
        model.add(Conv2D(96, (3, 3), strides=(2, 2), padding='same', name='conv3'))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))

        model.add(Conv2D(192, (3, 3), name='conv4', padding='same'))
        model.add(Activation('relu'))
        model.add(Conv2D(192, (3, 3), name='conv5', padding='same'))
        model.add(Activation('relu'))
        model.add(Conv2D(192, (3, 3), strides=(2, 2), name='conv6', padding='same'))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))

        model.add(Conv2D(192, (3, 3), name='conv7', padding='same'))
        model.add(Activation('relu'))
        model.add(Conv2D(192, (1, 1), name='conv8', padding='valid'))
        model.add(Activation('relu'))
        model.add(Conv2D(10, (1, 1), name='conv9', padding='valid'))

        model.add(GlobalAveragePooling2D())
        model.add(Activation('softmax', name='activation_top'))
        model.summary()

        try:
            optimizer = getattr(keras.optimizers, self.solver)
        except:
            raise NotImplementedError('optimizer not implemented in keras')
        # All optimizers with the exception of nadam take decay as named arg
        try:
            opt = optimizer(lr=self.learning_rate, decay=self.lr_decay)
        except:
            opt = optimizer(lr=self.learning_rate, schedule_decay=self.lr_decay)

        model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
                      metrics=['accuracy'])
        # Save initial weights so that model can be retrained with same
        # initialization
        self.initial_weights = copy.deepcopy(model.get_weights())

        self.model = model 
Example 62
Project: intelligent-annotation   Author: shibing624   File: small_cnn.py    Apache License 2.0 4 votes vote down vote up
def build_model(self, X):
        # assumes that data axis order is same as the backend
        input_shape = X.shape[1:]
        np.random.seed(self.random_state)
        tf.set_random_seed(self.random_state)

        model = Sequential()
        model.add(Conv2D(32, (3, 3), padding='same',
                         input_shape=input_shape, name='conv1'))
        model.add(Activation('relu'))
        model.add(Conv2D(32, (3, 3), name='conv2'))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Conv2D(64, (3, 3), padding='same', name='conv3'))
        model.add(Activation('relu'))
        model.add(Conv2D(64, (3, 3), name='conv4'))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(512, name='dense1'))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(self.n_classes, name='dense2'))
        model.add(Activation('softmax'))

        try:
            optimizer = getattr(keras.optimizers, self.solver)
        except:
            raise NotImplementedError('optimizer not implemented in keras')
        # All optimizers with the exception of nadam take decay as named arg
        try:
            opt = optimizer(lr=self.learning_rate, decay=self.lr_decay)
        except:
            opt = optimizer(lr=self.learning_rate, schedule_decay=self.lr_decay)

        model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
                      metrics=['accuracy'])
        # Save initial weights so that model can be retrained with same
        # initialization
        self.initial_weights = copy.deepcopy(model.get_weights())

        self.model = model 
Example 63
Project: CNNArt   Author: thomaskuestner   File: motion_abd_CNN2D.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test, y_test, model_name, sOutPath, patchSize, batchSize):
    weight_name = model_name[0] + '_weights.h5'
    model_json = model_name[0] + '.json'
    model_all = model_name[0] + '_model.h5'

    #    # load weights and model (OLD WAY)
    #    conten = sio.loadmat(model_name)
    #    weig = content['wei']
    #    nSize = weig.shape
    #    weigh = []
    #
    #    for i in drange(0,nSize[1],2):
    #    	w0 = weig[0,i]
    #    	w1 = weig[0,i+1]
    #    	w1=w1.T
    #    	w1 = np.concatenate(w1,axis=0)
    #
    #    	weigh= weigh.extend([w0, w1])
    #
    #    model = model_from_json(model_json)
    #    model.set_weights(weigh)

    # load weights and model (new way)
    # model = model_from_json(model_json)
    model = createModel(patchSize)
    opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1)]

    model.compile(loss='categorical_crossentropy', optimizer=opti)
    model.load_weights(weight_name)

    # load complete model (including weights); keras > 0.7
    # model = load_model(model_all)

    # assume artifact affected shall be tested!
    # y_test = np.ones((len(X_test),1))

    score_test, acc_test = model.evaluate(X_test, y_test, batch_size=batchSize, show_accuracy=True)

    prob_pre = model.predict(X_test, batchSize, 0)

    modelSave = model_name[0] + '_pred.mat'
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 64
Project: CNNArt   Author: thomaskuestner   File: 2D_CNN.py    Apache License 2.0 4 votes vote down vote up
def fCreateModel(patchSize, optimizer='Adam', learningRate=0.001):  # only on linse 3!!!!!!!!!!!!
    l1_reg = 0
    l2_reg = 1e-6
    cnn = Sequential()
    # Total params: 272,994
    cnn.add(Conv2D(32,
                   kernel_size=(14, 14),
                   kernel_initializer='he_normal',
                   weights=None,
                   padding='valid',
                   strides=(1, 1),
                   kernel_regularizer=l1_l2(l1_reg, l2_reg),
                   input_shape=(1, int(patchSize[0, 0]), int(patchSize[0, 1]))))
    # input shape : 1 means grayscale... richtig ├╝bergeben...
    cnn.add(Activation('relu'))

    cnn.add(Conv2D(64,  # learning rate: 0.1 -> 76%
                   kernel_size=(7, 7),
                   kernel_initializer='he_normal',
                   weights=None,
                   padding='valid',
                   strides=(1, 1),
                   kernel_regularizer=l1_l2(l1_reg, l2_reg),
                   # data_format='channels_first'
                   ))
    cnn.add(Activation('relu'))
    cnn.add(Conv2D(128,  # learning rate: 0.1 -> 76%
                   kernel_size=(3, 3),
                   kernel_initializer='he_normal',
                   weights=None,
                   padding='valid',
                   strides=(1, 1),
                   kernel_regularizer=l1_l2(l1_reg, l2_reg)))
    cnn.add(Activation('relu'))
    cnn.add(Flatten())
    cnn.add(Dense(units=2,
                  kernel_initializer='he_normal',
                  kernel_regularizer='l2'))
    cnn.add(Activation('softmax'))
    loss = 'categorical_crossentropy'
    opti = keras.optimizers.Adam(lr=learningRate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    # opti,loss=archi.fGetOptimizerAndLoss(optimizer=optimizer,learningRate=learningRate)
    cnn.compile(loss=loss, optimizer=opti, metrics=['accuracy'])
    print(cnn.summary())
    return cnn 
Example 65
Project: CNNArt   Author: thomaskuestner   File: motion_all_CNN2D.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test, y_test, model_name, sOutPath, patchSize, batchSize):
    weight_name = model_name[0] + '_weights.h5'
    model_json = model_name[0] + '.json'
    model_all = model_name[0] + '_model.h5'

    #    # load weights and model (OLD WAY)
    #    conten = sio.loadmat(model_name)
    #    weig = content['wei']
    #    nSize = weig.shape
    #    weigh = []
    #
    #    for i in drange(0,nSize[1],2):
    #    	w0 = weig[0,i]
    #    	w1 = weig[0,i+1]
    #    	w1=w1.T
    #    	w1 = np.concatenate(w1,axis=0)
    #
    #    	weigh= weigh.extend([w0, w1])
    #
    #    model = model_from_json(model_json)
    #    model.set_weights(weigh)

    # load weights and model (new way)
    # model = model_from_json(model_json)
    model = createModel(patchSize)
    opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1)]

    model.compile(loss='categorical_crossentropy', optimizer=opti)
    model.load_weights(weight_name)

    # load complete model (including weights); keras > 0.7
    # model = load_model(model_all)

    # assume artifact affected shall be tested!
    # y_test = np.ones((len(X_test),1))

    score_test, acc_test = model.evaluate(X_test, y_test, batch_size=batchSize, show_accuracy=True)
    prob_pre = model.predict(X_test, batchSize, 0)

    # modelSave = model_name[:-5] + '_pred.mat'
    modelSave = model_name[0] + '_pred.mat'
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 66
Project: CNNArt   Author: thomaskuestner   File: motion_head_CNN2D.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test,y_test,model_name, sOutPath, patchSize, batchSize):

    weight_name = model_name[0] + '_weights.h5'
    model_json = model_name[0] + '.json'
    model_all = model_name[0] + '_model.h5'

#    # load weights and model (OLD WAY)
#    conten = sio.loadmat(model_name)
#    weig = content['wei']
#    nSize = weig.shape
#    weigh = []
#
#    for i in drange(0,nSize[1],2):
#    	w0 = weig[0,i]
#    	w1 = weig[0,i+1]
#    	w1=w1.T
#    	w1 = np.concatenate(w1,axis=0)
#
#    	weigh= weigh.extend([w0, w1])
#
#    model = model_from_json(model_json)
#    model.set_weights(weigh)

    # load weights and model (new way)
    #model = model_from_json(model_json)
    model = createModel(patchSize)
    opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    callbacks = [EarlyStopping(monitor='val_loss',patience=10,verbose=1)]

    model.compile(loss='categorical_crossentropy', optimizer=opti)
    model.load_weights(weight_name)

    # load complete model (including weights); keras > 0.7
    #model = load_model(model_all)

    # assume artifact affected shall be tested!
    #y_test = np.ones((len(X_test),1))

    score_test, acc_test = model.evaluate(X_test, y_test,batch_size=batchSize,show_accuracy=True)
    prob_pre = model.predict(X_test, batchSize, 0)

    #modelSave = model_name[:-5] + '_pred.mat'
    modelSave = model_name[0] + '_pred.mat'
    sio.savemat(modelSave, {'prob_pre':prob_pre, 'score_test': score_test, 'acc_test':acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 67
Project: CNNArt   Author: thomaskuestner   File: 3D_VResFCN_Upsampling_small.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test, y=None, Y_segMasks_test=None, sModelPath=None, sOutPath=None, batch_size=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""

    X_test = np.expand_dims(X_test, axis=-1)
    Y_segMasks_test_foreground = np.expand_dims(Y_segMasks_test, axis=-1)
    Y_segMasks_test_background = np.ones(Y_segMasks_test_foreground.shape) - Y_segMasks_test_foreground
    Y_segMasks_test = np.concatenate((Y_segMasks_test_background, Y_segMasks_test_foreground), axis=-1)

    _, sPath = os.path.splitdrive(sModelPath)
    sPath, sFilename = os.path.split(sPath)
    sFilename, sExt = os.path.splitext(sFilename)

    listdir = os.listdir(sModelPath)

    #sModelPath = sModelPath.replace("_json.txt", "")
    #weight_name = sModelPath + '_weights.h5'
    #model_json = sModelPath + '_json.txt'
    #model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    with open(sModelPath + os.sep + sFilename + '.json', 'r') as fp:
        model_string = fp.read()

    model = model_from_json(model_string)

    model.summary()

    model.compile(loss=dice_coef_loss, optimizer=keras.optimizers.Adam(), metrics=[dice_coef])
    model.load_weights(sModelPath+ os.sep + sFilename+'_weights.h5')

    score_test, acc_test = model.evaluate(X_test, Y_segMasks_test, batch_size=2)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))

    prob_pre = model.predict(X_test, batch_size=batch_size, verbose=1)

    predictions = {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}

    return predictions 
Example 68
Project: CNNArt   Author: thomaskuestner   File: 3D_VResFCN_Upsampling_final.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test, y=None, Y_segMasks_test=None, sModelPath=None, sOutPath=None, batch_size=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""

    X_test = np.expand_dims(X_test, axis=-1)
    Y_segMasks_test_foreground = np.expand_dims(Y_segMasks_test, axis=-1)
    Y_segMasks_test_background = np.ones(Y_segMasks_test_foreground.shape) - Y_segMasks_test_foreground
    Y_segMasks_test = np.concatenate((Y_segMasks_test_background, Y_segMasks_test_foreground), axis=-1)

    _, sPath = os.path.splitdrive(sModelPath)
    sPath, sFilename = os.path.split(sPath)
    sFilename, sExt = os.path.splitext(sFilename)

    listdir = os.listdir(sModelPath)

    # sModelPath = sModelPath.replace("_json.txt", "")
    # weight_name = sModelPath + '_weights.h5'
    # model_json = sModelPath + '_json.txt'
    # model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    with open(sModelPath + os.sep + sFilename + '.json', 'r') as fp:
        model_string = fp.read()

    model = model_from_json(model_string)

    model.summary()

    model.compile(loss=dice_coef_loss, optimizer=keras.optimizers.Adam(), metrics=[dice_coef])
    model.load_weights(sModelPath + os.sep + sFilename + '_weights.h5')

    score_test, acc_test = model.evaluate(X_test, Y_segMasks_test, batch_size=2)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))

    prob_pre = model.predict(X_test, batch_size=batch_size, verbose=1)

    predictions = {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}

    return predictions 
Example 69
Project: CNNArt   Author: thomaskuestner   File: 3D_VResFCN_Upsampling_final_Motion_Shim_Multi_Label.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test, y=None, Y_segMasks_test=None, sModelPath=None, sOutPath=None, batch_size=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""

    X_test = np.expand_dims(X_test, axis=-1)
    Y_segMasks_test_foreground = np.expand_dims(Y_segMasks_test, axis=-1)
    Y_segMasks_test_background = np.ones(Y_segMasks_test_foreground.shape) - Y_segMasks_test_foreground
    Y_segMasks_test = np.concatenate((Y_segMasks_test_background, Y_segMasks_test_foreground), axis=-1)

    _, sPath = os.path.splitdrive(sModelPath)
    sPath, sFilename = os.path.split(sPath)
    sFilename, sExt = os.path.splitext(sFilename)

    listdir = os.listdir(sModelPath)

    #sModelPath = sModelPath.replace("_json.txt", "")
    #weight_name = sModelPath + '_weights.h5'
    #model_json = sModelPath + '_json.txt'
    #model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    with open(sModelPath + os.sep + sFilename + '.json', 'r') as fp:
        model_string = fp.read()

    model = model_from_json(model_string)

    model.summary()

    model.compile(loss=dice_coef_loss, optimizer=keras.optimizers.Adam(), metrics=[dice_coef])
    model.load_weights(sModelPath+ os.sep + sFilename+'_weights.h5')

    score_test, acc_test = model.evaluate(X_test, Y_segMasks_test, batch_size=2)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))

    prob_pre = model.predict(X_test, batch_size=batch_size, verbose=1)

    predictions = {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}

    return predictions 
Example 70
Project: CNNArt   Author: thomaskuestner   File: 3D_VResFCN_Upsampling_final_Motion_Binary_modified.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test, y=None, Y_segMasks_test=None, sModelPath=None, sOutPath=None, batch_size=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""

    X_test = np.expand_dims(X_test, axis=-1)
    Y_segMasks_test_foreground = np.expand_dims(Y_segMasks_test, axis=-1)
    Y_segMasks_test_background = np.ones(Y_segMasks_test_foreground.shape) - Y_segMasks_test_foreground
    Y_segMasks_test = np.concatenate((Y_segMasks_test_background, Y_segMasks_test_foreground), axis=-1)

    _, sPath = os.path.splitdrive(sModelPath)
    sPath, sFilename = os.path.split(sPath)
    sFilename, sExt = os.path.splitext(sFilename)

    listdir = os.listdir(sModelPath)

    # sModelPath = sModelPath.replace("_json.txt", "")
    # weight_name = sModelPath + '_weights.h5'
    # model_json = sModelPath + '_json.txt'
    # model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    with open(sModelPath + os.sep + sFilename + '.json', 'r') as fp:
        model_string = fp.read()

    model = model_from_json(model_string)

    model.summary()

    model.compile(loss=dice_coef_loss, optimizer=keras.optimizers.Adam(), metrics=[dice_coef])
    model.load_weights(sModelPath + os.sep + sFilename + '_weights.h5')

    score_test, acc_test = model.evaluate(X_test, Y_segMasks_test, batch_size=2)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))

    prob_pre = model.predict(X_test, batch_size=batch_size, verbose=1)

    predictions = {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}

    return predictions 
Example 71
Project: CNNArt   Author: thomaskuestner   File: 3D_VResFCN_Upsampling.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test, y=None, Y_segMasks_test=None, sModelPath=None, sOutPath=None, batch_size=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input: X: Samples to predict on. The shape of X should fit to the input shape of the model y: Labels for the
    Samples. Number of Samples should be equal to the number of samples in X sModelPath: (String) full path to a
    trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same
    directory! sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy
    stored. The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the
    suffix '_pred.mat' batchSize: Batchsize, number of samples that are processed at once """

    X_test = np.expand_dims(X_test, axis=-1)
    Y_segMasks_test_foreground = np.expand_dims(Y_segMasks_test, axis=-1)
    Y_segMasks_test_background = np.ones(Y_segMasks_test_foreground.shape) - Y_segMasks_test_foreground
    Y_segMasks_test = np.concatenate((Y_segMasks_test_background, Y_segMasks_test_foreground), axis=-1)

    _, sPath = os.path.splitdrive(sModelPath)
    sPath, sFilename = os.path.split(sPath)
    sFilename, sExt = os.path.splitext(sFilename)

    listdir = os.listdir(sModelPath)

    # sModelPath = sModelPath.replace("_json.txt", "")
    # weight_name = sModelPath + '_weights.h5'
    # model_json = sModelPath + '_json.txt'
    # model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    with open(sModelPath + os.sep + sFilename + '.json', 'r') as fp:
        model_string = fp.read()

    model = model_from_json(model_string)

    model.summary()

    model.compile(loss=dice_coef_loss, optimizer=keras.optimizers.Adam(), metrics=[dice_coef])
    model.load_weights(sModelPath + os.sep + sFilename + '_weights.h5')

    score_test, acc_test = model.evaluate(X_test, Y_segMasks_test, batch_size=2)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))

    prob_pre = model.predict(X_test, batch_size=batch_size, verbose=1)

    predictions = {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}

    return predictions 
Example 72
Project: CNNArt   Author: thomaskuestner   File: motion_vgg_CNN2D.py    Apache License 2.0 4 votes vote down vote up
def fTrainInner(X_train, y_train, X_test, y_test, sOutPath, patchSize, batchSize=None, learningRate=None, iEpochs=None):
    # parse inputs
    batchSize = [64] if batchSize is None else batchSize
    learningRate = [0.01] if learningRate is None else learningRate
    iEpochs = 300 if iEpochs is None else iEpochs

    print('Training 2D CNN')
    print('with lr = ' + str(learningRate) + ' , batchSize = ' + str(batchSize))

    # save names
    _, sPath = os.path.splitdrive(sOutPath)
    sPath, sFilename = os.path.split(sPath)
    sFilename, sExt = os.path.splitext(sFilename)
    model_name = sPath + '/' + sFilename + '/' + sFilename + '_lr_' + str(learningRate) + '_bs_' + str(batchSize)
    weight_name = model_name + '_weights.h5'
    model_json = model_name + '_json'
    model_all = model_name + '_model.h5'
    model_mat = model_name + '.mat'

    if (os.path.isfile(model_mat)):  # no training if output file exists
        return

    # create model
    cnn = createModel(patchSize)

    cnn.summary()

    # opti = SGD(lr=learningRate, momentum=1e-8, decay=0.1, nesterov=True);#Adag(lr=0.01, epsilon=1e-06)
    opti = keras.optimizers.Adam(lr=learningRate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1)]
    callbacks.append(ModelCheckpoint(model_all, save_weights_only=False, monitor='val_acc', verbose=1, period=2, save_best_only=True))  # overrides the last checkpoint, its just for security
    callbacks.append(ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, min_lr=1e-4, verbose=1))

    cnn.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])
    print(cnn.summary)

    cnn.fit(X_train,
            y_train,
            validation_data=[X_test, y_test],
            epochs=iEpochs,
            batch_size=batchSize,
            callbacks=callbacks,
            verbose=1)

    # save model
    # cnn.save_weights(weight_name, overwrite=True)
    cnn.save(model_all)  # keras > v0.7 
Example 73
Project: CNNArt   Author: thomaskuestner   File: motion_abd_CNN2D.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test,y_test,model_name,sOutPath,patchSize,batchSize):
       
    weight_name = sOutPath + model_name + '_weights.h5'
    model_json = sOutPath + model_name + '_json'
    model_all = sOutPath + model_name + '_model.h5'

#    # load weights and model (OLD WAY)
#    conten = sio.loadmat(model_name)
#    weig = content['wei']
#    nSize = weig.shape
#    weigh = []
#    
#    for i in drange(0,nSize[1],2):
#    	w0 = weig[0,i]
#    	w1 = weig[0,i+1]
#    	w1=w1.T
#    	w1 = np.concatenate(w1,axis=0)
#    	
#    	weigh= weigh.extend([w0, w1])
#    	
#    model = model_from_json(model_json)
#    model.set_weights(weigh)
    
    # load weights and model (new way)
    #model = model_from_json(model_json)
    model = createModel(patchSize)
    opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    callbacks = [EarlyStopping(monitor='val_loss',patience=10,verbose=1)]
        
    model.compile(loss='categorical_crossentropy', optimizer=opti)
    model.load_weights(weight_name)

    # load complete model (including weights); keras > 0.7
    #model = load_model(model_all)
	
    # assume artifact affected shall be tested!
    #y_test = np.ones((len(X_test),1))
    X_test = np.expand_dims(X_test, axis=1)
    y_test = np.asarray([y_test[:], np.abs(np.asarray(y_test[:], dtype=np.float32) - 1)]).T
    
    score_test, acc_test = model.evaluate(X_test, y_test,batch_size=batchSize,show_accuracy=True)
    
    prob_pre = model.predict(X_test, batchSize, 0)
    
    modelSave = sOutPath + model_name + '_pred.mat'
    sio.savemat(modelSave, {'prob_pre':prob_pre, 'score_test': score_test, 'acc_test':acc_test})

    
###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 74
Project: CNNArt   Author: thomaskuestner   File: motion_head_CNN2D.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test,y_test,model_name, sOutPath, patchSize, batchSize):
       
    weight_name = sOutPath + model_name + '_weights.h5'
    model_json = sOutPath + model_name + '_json'
    model_all = sOutPath + model_name + '_model.h5'

#    # load weights and model (OLD WAY)
#    conten = sio.loadmat(model_name)
#    weig = content['wei']
#    nSize = weig.shape
#    weigh = []
#    
#    for i in drange(0,nSize[1],2):
#    	w0 = weig[0,i]
#    	w1 = weig[0,i+1]
#    	w1=w1.T
#    	w1 = np.concatenate(w1,axis=0)
#    	
#    	weigh= weigh.extend([w0, w1])
#    	
#    model = model_from_json(model_json)
#    model.set_weights(weigh)
    
    # load weights and model (new way)
    #model = model_from_json(model_json)
    model = createModel(patchSize)
    opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    callbacks = [EarlyStopping(monitor='val_loss',patience=10,verbose=1)]

    model.compile(loss='categorical_crossentropy', optimizer=opti)
    model.load_weights(weight_name)

    # load complete model (including weights); keras > 0.7
    #model = load_model(model_all)
	
    # assume artifact affected shall be tested!
    #y_test = np.ones((len(X_test),1))
    X_test = np.expand_dims(X_test, axis=1)
    y_test = np.asarray([y_test[:], np.abs(np.asarray(y_test[:], dtype=np.float32) - 1)]).T
    
    score_test, acc_test = model.evaluate(X_test, y_test,batch_size=batchSize,show_accuracy=True)
    prob_pre = model.predict(X_test, batchSize, 0)
    
    #modelSave = model_name[:-5] + '_pred.mat'
    modelSave = sOutPath + model_name + '_pred.mat'
    sio.savemat(modelSave, {'prob_pre':prob_pre, 'score_test': score_test, 'acc_test':acc_test})

    
###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 75
Project: CNNArt   Author: thomaskuestner   File: VResFCN_3D_Upsampling_final_Motion_Binary_DLArt.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test, y=None, Y_segMasks_test=None, sModelPath=None, sOutPath=None, batch_size=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""

    X_test = np.expand_dims(X_test, axis=-1)
    Y_segMasks_test_foreground = np.expand_dims(Y_segMasks_test, axis=-1)
    Y_segMasks_test_background = np.ones(Y_segMasks_test_foreground.shape) - Y_segMasks_test_foreground
    Y_segMasks_test = np.concatenate((Y_segMasks_test_background, Y_segMasks_test_foreground), axis=-1)

    _, sPath = os.path.splitdrive(sModelPath)
    sPath, sFilename = os.path.split(sPath)
    sFilename, sExt = os.path.splitext(sFilename)

    listdir = os.listdir(sModelPath)

    #sModelPath = sModelPath.replace("_json.txt", "")
    #weight_name = sModelPath + '_weights.h5'
    #model_json = sModelPath + '_json.txt'
    #model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    with open(sModelPath + os.sep + sFilename + '.json', 'r') as fp:
        model_string = fp.read()

    model = model_from_json(model_string)

    model.summary()

    model.compile(loss=dice_coef_loss, optimizer=keras.optimizers.Adam(), metrics=[dice_coef])
    model.load_weights(sModelPath+ os.sep + sFilename+'_weights.h5')

    score_test, acc_test = model.evaluate(X_test, Y_segMasks_test, batch_size=2)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))

    prob_pre = model.predict(X_test, batch_size=batch_size, verbose=1)

    predictions = {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}

    return predictions 
Example 76
Project: CNNArt   Author: thomaskuestner   File: VResFCN_3D.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test, y=None, Y_segMasks_test=None, sModelPath=None, sOutPath=None, batch_size=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""

    X_test = np.expand_dims(X_test, axis=-1)
    Y_segMasks_test_foreground = np.expand_dims(Y_segMasks_test, axis=-1)
    Y_segMasks_test_background = np.ones(Y_segMasks_test_foreground.shape) - Y_segMasks_test_foreground
    Y_segMasks_test = np.concatenate((Y_segMasks_test_background, Y_segMasks_test_foreground), axis=-1)

    _, sPath = os.path.splitdrive(sModelPath)
    sPath, sFilename = os.path.split(sPath)
    sFilename, sExt = os.path.splitext(sFilename)

    listdir = os.listdir(sModelPath)

    #sModelPath = sModelPath.replace("_json.txt", "")
    #weight_name = sModelPath + '_weights.h5'
    #model_json = sModelPath + '_json.txt'
    #model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    with open(sModelPath + os.sep + sFilename + '.json', 'r') as fp:
        model_string = fp.read()

    model = model_from_json(model_string)

    model.summary()

    model.compile(loss=dice_coef_loss, optimizer=keras.optimizers.Adam(), metrics=[dice_coef])
    model.load_weights(sModelPath+ os.sep + sFilename+'_weights.h5')

    score_test, acc_test = model.evaluate(X_test, Y_segMasks_test, batch_size=2)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))

    prob_pre = model.predict(X_test, batch_size=batch_size, verbose=1)

    predictions = {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}

    return predictions 
Example 77
Project: BiDiSentiment   Author: vmarkovtsev   File: train_model.py    MIT License 4 votes vote down vote up
def create_char_rnn_model(args: argparse.Namespace):
    # this late import prevents from loading Tensorflow too soon
    import tensorflow as tf
    tf.set_random_seed(args.seed)
    from keras import layers, models, initializers, optimizers, metrics
    log = logging.getLogger("model")
    if args.devices:
        dev1, dev2 = ("/gpu:" + dev for dev in args.devices.split(","))
    else:
        dev1 = dev2 = "/cpu:0"

    def add_rnn(device):
        with tf.device(device):
            input = layers.Input(batch_shape=(args.batch_size, args.length), dtype="uint8")
            log.info("Added %s", input)
            embedding = layers.Embedding(
                256, 256, embeddings_initializer=initializers.Identity(), trainable=False)(input)
            log.info("Added %s", embedding)
        layer = embedding
        layer_sizes = [int(n) for n in args.layers.split(",")]
        for i, nn in enumerate(layer_sizes):
            with tf.device(device):
                layer_type = getattr(layers, args.type)
                ret_seqs = (i < len(layer_sizes) - 1)
                try:
                    layer = layer_type(nn, return_sequences=ret_seqs, implementation=2)(layer)
                except TypeError:
                    # implementation kwarg is not present in CuDNN layers
                    layer = layer_type(nn, return_sequences=ret_seqs)(layer)
                log.info("Added %s", layer)
            if args.dropout > 0:
                layer = layers.Dropout(args.dropout)(layer)
                log.info("Added %s", layer)
        return input, layer

    forward_input, forward_output = add_rnn(dev1)
    reverse_input, reverse_output = add_rnn(dev2)
    with tf.device(dev1):
        merged = layers.Concatenate()([forward_output, reverse_output])
        log.info("Added %s", merged)
        dense = layers.Dense(2, activation="softmax")
        decision = dense(merged)
        log.info("Added %s", decision)
    optimizer = getattr(optimizers, args.optimizer)(lr=args.lr, decay=args.decay)
    log.info("Added %s", optimizer)
    model = models.Model(inputs=[forward_input, reverse_input], outputs=[decision])
    log.info("Compiling...")
    model.compile(optimizer=optimizer, loss="binary_crossentropy",
                  metrics=[metrics.binary_accuracy])
    log.info("Done")
    return model 
Example 78
Project: deeprace   Author: psteinb   File: resnet_details.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def infer(data, num_inferences, optsdict):
    """ perform <num_inferences> on the given data """

    import keras
    from keras.models import model_from_json
    from keras.optimizers import Adam
    ####################################################################################################################
    # LOADING THE MODEL
    ##
    no_ext = os.path.splitext(optsdict["weights_file"])[0]
    model_json = no_ext + '.json'

    if not os.path.exists(model_json):
        logging.error("%s does not exist, unable to load a model and thus perform inference!", model_json)
        return None, None, None

    model_weights = no_ext + '.h5'
    if not os.path.exists(model_weights):
        logging.error("%s does not exist, unable to load the model weights and thus perform inference!", model_json)
        return None, None, None

    # Watch out for
    with open(model_json, "r") as f:
        json_str = f.read()
        f.close()

    model = keras.models.model_from_json(json_str)

    # Weights
    model.load_weights(model_weights)
    model.compile(optimizer=Adam())

    nsamples_infer = int(num_inferences) if int(num_inferences) <= data[0].shape[0] else data[0].shape[0]
    batch_size = int(optsdict["batch_size"]) if int(optsdict["batch_size"]) < nsamples_infer else 1

    n_iterations = nsamples_infer // batch_size
    timings = [0] * n_iterations
    predictions = [0.] * n_iterations

    for t in range(n_iterations):

        random_id = int(round(random.uniform(0, data[0].shape[0] - batch_size)))
        input_data = data[0][random_id:random_id + batch_size, ...]

        start = datetime.datetime.now()
        result = model.evaluate(input_data, verbose=True, batch_size=batch_size)
        end = datetime.datetime.now()

        timings[t] = ((end - start).total_seconds())
        predictions[t] = result

    return predictions, timings, None 
Example 79
Project: Fully-Convolutional-DenseNets   Author: salty-vanilla   File: callbacks.py    MIT License 4 votes vote down vote up
def test():
    '''Trains a simple deep NN on the MNIST dataset.
    Gets to 98.40% test accuracy after 20 epochs
    (there is *a lot* of margin for parameter tuning).
    2 seconds per epoch on a K520 GPU.
    '''

    import keras
    from keras.datasets import mnist
    from keras.models import Sequential
    from keras.layers import Dense, Dropout
    from keras.optimizers import RMSprop

    batch_size = 128
    num_classes = 10
    epochs = 20

    # the data, shuffled and split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    model = Sequential()
    model.add(Dense(512, activation='relu', input_shape=(784,)))
    model.add(Dropout(0.2))
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(10, activation='softmax'))

    model.summary()

    callbacks = [Visualizer(x=x_test)]

    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSprop(),
                  metrics=['accuracy'])

    history = model.fit(x_train, y_train,
                        batch_size=batch_size,
                        epochs=epochs,
                        verbose=1,
                        validation_data=(x_test, y_test),
                        callbacks=callbacks)

    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1]) 
Example 80
Project: active-learning   Author: google   File: allconv.py    Apache License 2.0 4 votes vote down vote up
def build_model(self, X):
    # assumes that data axis order is same as the backend
    input_shape = X.shape[1:]
    np.random.seed(self.random_state)
    tf.set_random_seed(self.random_state)

    model = Sequential()
    model.add(Conv2D(96, (3, 3), padding='same',
                     input_shape=input_shape, name='conv1'))
    model.add(Activation('relu'))
    model.add(Conv2D(96, (3, 3), name='conv2', padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(96, (3, 3), strides=(2, 2), padding='same', name='conv3'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Conv2D(192, (3, 3), name='conv4', padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(192, (3, 3), name='conv5', padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(192, (3, 3), strides=(2, 2), name='conv6', padding='same'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Conv2D(192, (3, 3), name='conv7', padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(192, (1, 1), name='conv8', padding='valid'))
    model.add(Activation('relu'))
    model.add(Conv2D(10, (1, 1),  name='conv9', padding='valid'))

    model.add(GlobalAveragePooling2D())
    model.add(Activation('softmax', name='activation_top'))
    model.summary()

    try:
      optimizer = getattr(keras.optimizers, self.solver)
    except:
      raise NotImplementedError('optimizer not implemented in keras')
    # All optimizers with the exception of nadam take decay as named arg
    try:
      opt = optimizer(lr=self.learning_rate, decay=self.lr_decay)
    except:
      opt = optimizer(lr=self.learning_rate, schedule_decay=self.lr_decay)

    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    # Save initial weights so that model can be retrained with same
    # initialization
    self.initial_weights = copy.deepcopy(model.get_weights())

    self.model = model