Python keras.models() Examples

The following are code examples for showing how to use keras.models(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: keras-crf-ner   Author: Super-Louis   File: model.py    MIT License 7 votes vote down vote up
def train():
    model = Sequential()
    model.add(Embedding(voc_size, 128, mask_zero=True))
    model.add(Bidirectional(LSTM(64, return_sequences=True)))
    model.add(Dropout(rate=0.5))
    model.add(Dense(tag_size))
    crf = CRF(tag_size, sparse_target=True)
    model.add(crf)
    model.summary()
    model.compile('adam', loss=crf.loss_function, metrics=[crf.accuracy])
    X_train, X_test, Y_train, Y_test = gen_datasets()
    # 可视化
    tb = TensorBoard(log_dir='./tb_logs/0914', histogram_freq=0, write_graph=True, write_images=False,
                     embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
    cp = ModelCheckpoint('./models/crf.{epoch:02d}-{val_acc:.2f}.hdf5', monitor='val_acc', verbose=0,
                         save_best_only=False, save_weights_only=False, mode='auto', period=1)
    model.fit(X_train, Y_train, batch_size=100, epochs=epoches,
              validation_data=[X_test, Y_test], callbacks=[tb, cp])

    # evaluate
    score = model.evaluate(X_test, Y_test, batch_size=100)
    print('Test loss:', score[0])
    print('Test accuracy', score[1])
    model.save('keras_crf') 
Example 2
Project: models   Author: kipoi   File: prepare_model_yaml.py    MIT License 6 votes vote down vote up
def make_model_yaml(template_yaml, model_json, output_yaml_path):
    #
    with open(template_yaml, 'r') as f:
        model_yaml = yaml.load(f)
    #
    # get the model config:
    json_file = open(model_json, 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = keras.models.model_from_json(loaded_model_json)
    #
    model_yaml["schema"]["targets"] = []
    for oname, oshape in zip(loaded_model.output_names, loaded_model.output_shape):
        append_el ={"name":oname , "shape":str(oshape)#replace("None,", "")
        , "doc":"Methylation probability for %s"%oname}
        model_yaml["schema"]["targets"].append(append_el)
    #
    with open(output_yaml_path, 'w') as f:
        yaml.dump(model_yaml, f, default_flow_style=False) 
Example 3
Project: models   Author: kipoi   File: prepare_model_yaml.py    MIT License 6 votes vote down vote up
def make_secondary_dl_yaml(template_yaml, model_json, output_yaml_path):
    with open(template_yaml, 'r') as f:
        model_yaml = yaml.load(f)
    #
    # get the model config:
    json_file = open(model_json, 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = keras.models.model_from_json(loaded_model_json)
    #
    model_yaml["output_schema"]["targets"] = []
    for oname, oshape in zip(loaded_model.output_names, loaded_model.output_shape):
        append_el ={"name":oname , "shape":str(oshape)#replace("None,", "")
        , "doc":"Methylation probability for %s"%oname}
        model_yaml["output_schema"]["targets"].append(append_el)
    #
    with open(output_yaml_path, 'w') as f:
        yaml.dump(model_yaml, f, default_flow_style=False) 
Example 4
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: seq2seq_class.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, model_save_path='../models', 
              model_structure_name='seq2seq_model_demo', 
              model_weights_name='seq2seq_model_demo', 
              model_name=None):
        super().__init__()

        self.model_save_path = model_save_path
        self.model_structure_name=model_structure_name + self.model_name_format_str +'.json'
        self.model_weights_name=model_weights_name + self.model_name_format_str +'.h5'
        print('model_structure_name:', self.model_structure_name)
        print('model_weights_name:', self.model_weights_name)

        self.pred_result = None # Predicted mean value
        self.pred_var_result = None # Predicted variance value
        self.current_mean_val_loss = None
        self.EARLY_STOP=False
        self.val_loss_list=[]
        self.train_loss_list=[]
        self.pred_var_result = [] 
Example 5
Project: Incognito   Author: Chinmay26   File: helper.py    MIT License 6 votes vote down vote up
def convert(self, tf_graph_file_name, custom_objects=None):
        """Convert keras models to TF models

			Args
			----
				tf_model_graph_name(str): name of the converted TF graph
				custom_objects(dict): custom parts of the model and their Tensorflow definitions

		"""
        K.clear_session()  # for multiple runs, clear the session
        keras_model = self.load_keras_model(custom_objects)
        tf_frozen_graph = self.freeze_session(
            K.get_session(), output_names=[out.op.name for out in keras_model.outputs]
        )
        tf.train.write_graph(
            tf_frozen_graph, base_dir, tf_graph_file_name, as_text=False
        ) 
Example 6
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 6 votes vote down vote up
def compute_backbone_shapes(config, image_shape):
    """Computes the width and height of each stage of the backbone network.
    
    Returns:
        [N, (height, width)]. Where N is the number of stages
    """
    if callable(config.BACKBONE):
        return config.COMPUTE_BACKBONE_SHAPE(image_shape)

    # Currently supports ResNet only
    assert config.BACKBONE in ["resnet50", "resnet101"]
    return np.array(
        [[int(math.ceil(image_shape[0] / stride)),
            int(math.ceil(image_shape[1] / stride))]
            for stride in config.BACKBONE_STRIDES])


############################################################
#  Resnet Graph
############################################################

# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py 
Example 7
Project: typhon   Author: atmtools   File: qrnn.py    MIT License 6 votes vote down vote up
def predict(self, x):
        r"""
        Predict quantiles of the conditional distribution P(y|x).

        Forward propagates the inputs in `x` through the network to
        obtain the predicted quantiles `y`.

        Arguments:

            x(np.array): Array of shape `(n, m)` containing `n` m-dimensional inputs
                         for which to predict the conditional quantiles.

        Returns:

             Array of shape `(n, k)` with the columns corresponding to the k
             quantiles of the network.

        """
        predictions = np.stack(
            [m.predict((x - self.x_mean) / self.x_sigma) for m in self.models])
        return np.mean(predictions, axis=0) 
Example 8
Project: AIX360   Author: IBM   File: test_shap.py    Apache License 2.0 6 votes vote down vote up
def test_ShapGradientExplainer(self):

    #     model = VGG16(weights='imagenet', include_top=True)
    #     X, y = shap.datasets.imagenet50()
    #     to_explain = X[[39, 41]]
    #
    #     url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
    #     fname = shap.datasets.cache(url)
    #     with open(fname) as f:
    #         class_names = json.load(f)
    #
    #     def map2layer(x, layer):
    #         feed_dict = dict(zip([model.layers[0].input], [preprocess_input(x.copy())]))
    #         return K.get_session().run(model.layers[layer].input, feed_dict)
    #
    #     e = GradientExplainer((model.layers[7].input, model.layers[-1].output),
    #                           map2layer(preprocess_input(X.copy()), 7))
    #     shap_values, indexes = e.explain_instance(map2layer(to_explain, 7), ranked_outputs=2)
    #
          print("Skipped Shap GradientExplainer") 
Example 9
Project: gccaps   Author: tqbl   File: main.py    MIT License 6 votes vote down vote up
def _load_model(epoch):
    """Load model based on specified epoch number.

    Args:
        epoch (int): Epoch number of the model to load.

    Returns:
        An instance of a Keras model.
    """
    import keras.models

    from capsules import CapsuleLayer
    from gated_conv import GatedConv

    model_path = glob.glob(os.path.join(
        cfg.model_path, '*.%.02d*.hdf5' % epoch))[0]

    custom_objects = {
        'GatedConv': GatedConv,
        'CapsuleLayer': CapsuleLayer,
    }

    return keras.models.load_model(model_path, custom_objects) 
Example 10
Project: PanopticSegmentation   Author: dmechea   File: model.py    MIT License 6 votes vote down vote up
def compute_backbone_shapes(config, image_shape):
    """Computes the width and height of each stage of the backbone network.

    Returns:
        [N, (height, width)]. Where N is the number of stages
    """
    if callable(config.BACKBONE):
        return config.COMPUTE_BACKBONE_SHAPE(image_shape)

    # Currently supports ResNet only
    assert config.BACKBONE in ["resnet50", "resnet101"]
    return np.array(
        [[int(math.ceil(image_shape[0] / stride)),
            int(math.ceil(image_shape[1] / stride))]
            for stride in config.BACKBONE_STRIDES])


############################################################
#  Resnet Graph
############################################################

# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py 
Example 11
Project: Sacred_Deep_Learning   Author: AAbercrombie0492   File: evaluate_model.py    GNU General Public License v3.0 6 votes vote down vote up
def define_model(weights_path):
    '''
    Define model structure with weights.
    '''
    from resnet50 import ResNet50
    from keras.models import Model
    from keras.layers import Dense, GlobalAveragePooling2D


    resnet50_model = ResNet50()
    fc1000 = resnet50_model.get_layer('fc1000').output
    final_softmax = Dense(output_dim=2, activation='softmax')(fc1000)
    resnet50_finetune_1skip = Model(input=resnet50_model.input, output=final_softmax)
    resnet50_finetune_1skip.load_weights(weights_path)

    resnet50_finetune_1skip.compile(loss="categorical_crossentropy",
                                optimizer='nadam',
                                metrics=['accuracy'])

    return resnet50_finetune_1skip 
Example 12
Project: FasterRCNNbyKeras   Author: meijianhan   File: test_net.py    MIT License 6 votes vote down vote up
def read_datasets(args):
    imdb, roidb = read_db(args.imdb_name)
    #print('{:d} roidb entries'.format(len(roidb)))

    # output directory where the models are saved
    #output_dir = get_output_dir(imdb, args.tag)
    #output_dir = args.model
    #print('Output will be saved to `{:s}`'.format(output_dir))

    # tensorboard directory where the summaries are saved during training
    tb_dir = get_output_tb_dir(imdb, args.tag)
    print('TensorFlow summaries will be saved to `{:s}`'.format(tb_dir))

    # also add the validation set, but with no flipping images
    orgflip = cfg.TRAIN.USE_FLIPPED
    cfg.TRAIN.USE_FLIPPED = False
    #imdb, valroidb = read_db(args.imdbval_name)

    print('{:d} validation roidb entries'.format(len(valroidb)))
    cfg.TRAIN.USE_FLIPPED = orgflip

    return imdb, roidb 
Example 13
Project: RLDonkeycar   Author: downingbots   File: RLKeras.py    MIT License 6 votes vote down vote up
def load(self, model, model_path=None):
      global cfg

      # ARD: TODO: try using save_weights and load_weights instead of the whole
      # model. use the model definition from this file.
      try:
        #load keras model
        if model_path is None:
          model_path = cfg.RL_MODEL_PATH
        print("LOADING %s_%s" % (model_path, model))
        if model == "critic":
          Model = keras.models.load_model(os.path.expanduser(model_path + "_" + model))
        else:
          advantage_in = Input(shape=(1,))
          old_prediction_in = Input(shape=(1,))
          Model = keras.models.load_model(os.path.expanduser(model_path + "_" + model), custom_objects={'loss': proximal_policy_optimization_loss_continuous( advantage=advantage_in, old_prediction=old_prediction_in)})
        print("loaded")
        return Model
      except:
        print("No model to load")
        return None 
Example 14
Project: MovieTaster-Open   Author: lujiaying   File: keras_item2vec.py    MIT License 6 votes vote down vote up
def cbow_base_model(dict_size, emb_size=100, context_window_size=4):
    model = keras.models.Sequential()
    model.add(Embedding(dict_size, emb_size, 
        input_length=context_window_size,
        embeddings_initializer=keras.initializers.TruncatedNormal(mean=0.0, stddev=0.2),
        ))
    model.add(Lambda(lambda x: K.mean(x, axis=1), output_shape=(emb_size,)))
    model.add(Dense(dict_size))
    model.add(Activation('softmax')) # TODO: use nce

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
            loss='categorical_crossentropy',)
    return model 
Example 15
Project: elmo-bilstm-cnn-crf   Author: UKPLab   File: ELMoBiLSTM.py    Apache License 2.0 6 votes vote down vote up
def tagSentences(self, sentences):
        # Pad characters
        if 'characters' in self.params['featureNames']:
            self.padCharacters(sentences)

        labels = {}
        for modelName, model in self.models.items():
            paddedPredLabels = self.predictLabels(model, sentences)
            predLabels = []
            for idx in range(len(sentences)):
                unpaddedPredLabels = []
                for tokenIdx in range(len(sentences[idx]['tokens'])):
                    if sentences[idx]['tokens'][tokenIdx] != 0:  # Skip padding tokens
                        unpaddedPredLabels.append(paddedPredLabels[idx][tokenIdx])

                predLabels.append(unpaddedPredLabels)

            idx2Label = self.idx2Labels[modelName]
            labels[modelName] = [[idx2Label[tag] for tag in tagSentence] for tagSentence in predLabels]

        return labels 
Example 16
Project: elmo-bilstm-cnn-crf   Author: UKPLab   File: ELMoBiLSTM.py    Apache License 2.0 6 votes vote down vote up
def computeF1(self, modelName, sentences):
        labelKey = self.labelKeys[modelName]
        model = self.models[modelName]
        idx2Label = self.idx2Labels[modelName]
        
        correctLabels = [sentences[idx][labelKey] for idx in range(len(sentences))]
        predLabels = self.predictLabels(model, sentences)

        labelKey = self.labelKeys[modelName]
        encodingScheme = labelKey[labelKey.index('_')+1:]
        
        pre, rec, f1 = BIOF1Validation.compute_f1(predLabels, correctLabels, idx2Label, 'O', encodingScheme)
        pre_b, rec_b, f1_b = BIOF1Validation.compute_f1(predLabels, correctLabels, idx2Label, 'B', encodingScheme)
        
        if f1_b > f1:
            logging.debug("Setting wrong tags to B- improves from %.4f to %.4f" % (f1, f1_b))
            pre, rec, f1 = pre_b, rec_b, f1_b
        
        return pre, rec, f1 
Example 17
Project: elmo-bilstm-cnn-crf   Author: UKPLab   File: ELMoBiLSTM.py    Apache License 2.0 6 votes vote down vote up
def saveModel(self, modelName, epoch, dev_score, test_score):
        import json
        import h5py

        if self.modelSavePath == None:
            raise ValueError('modelSavePath not specified.')

        savePath = self.modelSavePath.replace("[DevScore]", "%.4f" % dev_score).replace("[TestScore]", "%.4f" % test_score).replace("[Epoch]", str(epoch+1)).replace("[ModelName]", modelName)

        directory = os.path.dirname(savePath)
        if not os.path.exists(directory):
            os.makedirs(directory)

        if os.path.isfile(savePath):
            logging.info("Model "+savePath+" already exists. Model will be overwritten")

        self.models[modelName].save(savePath, True)

        with h5py.File(savePath, 'a') as h5file:
            h5file.attrs['mappings'] = json.dumps(self.mappings)
            h5file.attrs['params'] = json.dumps(self.params)
            h5file.attrs['modelName'] = modelName
            h5file.attrs['labelKey'] = self.datasets[modelName]['label'] 
Example 18
Project: neural-fingerprinting   Author: StephanZheng   File: utils_keras.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fprop(self, x):
        """
        Exposes all the layers of the model returned by get_layer_names.
        :param x: A symbolic representation of the network input
        :return: A dictionary mapping layer names to the symbolic
                 representation of their output.
        """
        from keras.models import Model as KerasModel

        if self.keras_model is None:
            # Get the input layer
            new_input = self.model.get_input_at(0)

            # Make a new model that returns each of the layers as output
            out_layers = [x_layer.output for x_layer in self.model.layers]
            self.keras_model = KerasModel(new_input, out_layers)

        # and get the outputs for that model on the input x
        outputs = self.keras_model(x)

        # Keras only returns a list for outputs of length >= 1, if the model
        # is only one layer, wrap a list
        if len(self.model.layers) == 1:
            outputs = [outputs]

        # compute the dict to return
        fprop_dict = dict(zip(self.get_layer_names(), outputs))

        return fprop_dict 
Example 19
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, id_embd, time_embd, 
        num_input_features, num_output_features, num_decoder_features,
               input_sequence_length, target_sequence_length,
              num_steps_to_predict, regulariser = None,
              lr=0.001, decay=0, loss = "mse",
              layers=[35, 35], model_save_path='../models', 
              model_structure_name='seq2seq_model.json', model_weights_name='seq2seq_model_weights.h5'):
        
        super().__init__(num_input_features, num_output_features, num_decoder_features,
               input_sequence_length, target_sequence_length,
              num_steps_to_predict, regulariser = None,
              lr=lr, decay=decay, loss = loss,
              layers=layers)

        self.id_embd = id_embd
        self.time_embd = time_embd
        self.val_loss_list=[]
        self.train_loss_list=[]
        self.current_mean_val_loss = None
        self.early_stop_limit = 10 # with the unit of Iteration Display
        self.EARLY_STOP=False
        self.pred_var_result = []

        self.pi_dic={0.95:1.96, 0.9:1.645, 0.8:1.28, 0.68:1.}
        self.target_list=['t2m','rh2m','w10m']
        self.obs_range_dic={'t2m':[-30,42], # Official value: [-20,42]
                         'rh2m':[0.0,100.0],
                         'w10m':[0.0, 30.0]}
        self.obs_and_output_feature_index_map = {'t2m':0,'rh2m':1,'w10m':2}
        self.ruitu_feature_index_map = {'t2m':1,'rh2m':3,'w10m':4}
        self.model_save_path = model_save_path
        self.model_structure_name=model_structure_name
        self.model_weights_name=model_weights_name 
Example 20
Project: RFMLS-NEU   Author: neu-spiral   File: BaselineModel2D.py    MIT License 5 votes vote down vote up
def getBaselineModel2D(slice_size=64, classes=1000, cnn_stacks=3, fc_stacks=1, channels=128, dropout_flag=True, \
                        fc1=256, fc2=128, batchnorm=False, \
                        #optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False), \
                        loss='categorical_crossentropy'):
    model = models.Sequential()
    model.add(Conv2D(channels,(7, 2),activation='relu', padding='same', input_shape=(slice_size, 2, 1)))
    model.add(Conv2D(channels,(5, 2),activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 1)))
    model.add(Conv2D(channels,(7, 2),activation='relu', padding='same'))
    model.add(Conv2D(channels,(5, 2),activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 1)))
    model.add(Conv2D(channels,(7, 2),activation='relu', padding='same'))
    model.add(Conv2D(channels,(5, 2),activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 1)))
    model.add(Flatten())
    model.add(Dense(fc1, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(fc2, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(classes, activation='softmax'))

    # optimizer = Adam(lr=LR, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
    # model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
    model.summary()

    return model 
Example 21
Project: RFMLS-NEU   Author: neu-spiral   File: HomegrownModel.py    MIT License 5 votes vote down vote up
def getHomegrownModel(slice_size=1024, classes=1000, cnn_stacks=3, fc_stacks=1, channels=128, dropout_flag=True, \
                        flt=[50, 50, 256, 80],k1=[1, 7], k2=[2, 7], batchnorm=False, dr=0.5,\
                        #optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False), \
                        loss='categorical_crossentropy'):
    """Original Homegrown model"""
    in_shp = [2, slice_size]
    model_nn = models.Sequential()
    model_nn.add(Reshape([1] + in_shp, input_shape=in_shp))
    model_nn.add(ZeroPadding2D((0, 2)))
    model_nn.add(Conv2D(flt[0], (k1[0], k1[1]), padding="valid", kernel_initializer="glorot_uniform", name="conv1"))
    if batchnorm:
        model_nn.add(keras.layers.BatchNormalization(momentum=0.9, name='bn_1'))
    model_nn.add(Activation('relu'))
    model_nn.add(ZeroPadding2D((0, 2)))
    model_nn.add(Conv2D(flt[1], (k2[0], k2[1]), padding="valid", kernel_initializer="glorot_uniform", name="conv2"))
    if batchnorm:
        model_nn.add(keras.layers.BatchNormalization(momentum=0.9, name='bn_2'))
    model_nn.add(Activation('relu'))
    model_nn.add(Flatten())
    model_nn.add(Dense(flt[2], kernel_initializer='he_normal', name="dense1"))
    if batchnorm:
        model_nn.add(keras.layers.BatchNormalization(momentum=0.9, name='bn_3'))
    model_nn.add(Activation('relu'))
    model_nn.add(Dropout(dr))
    model_nn.add(Dense(flt[3], kernel_initializer='he_normal', name="dense2"))
    if batchnorm:
        model_nn.add(keras.layers.BatchNormalization(momentum=0.9, name='bn_4'))
    model_nn.add(Activation('relu'))
    model_nn.add(Dropout(dr))
    model_nn.add(Dense(classes, kernel_initializer='he_normal', kernel_regularizer=l2(0.0001), name="dense3"))
    model_nn.add(Activation('softmax'))


    model_nn.summary()




    return model_nn 
Example 22
Project: RFMLS-NEU   Author: neu-spiral   File: BaselineModel.py    MIT License 5 votes vote down vote up
def getBaselineModel(slice_size=64, classes=1000, cnn_stacks=3, fc_stacks=1, channels=128, dropout_flag=True, \
                        fc1=256, fc2=128, batchnorm=False, \
                        #optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False), \
                        loss='categorical_crossentropy'):
    """A dummy model to test the functionalities of the Data Generator"""
    model = models.Sequential()
    model.add(Conv1D(channels,7,activation='relu', padding='same', input_shape=(slice_size, 2)))
    model.add(Conv1D(channels,5, padding='same'))
    if batchnorm:
        model.add(keras.layers.BatchNormalization(momentum=0.9, name='bn_1'))
    model.add(Activation('relu'))
    model.add(MaxPooling1D())
    for i in range(1, cnn_stacks):
        model.add(Conv1D(channels,7,activation='relu', padding='same'))
        model.add(Conv1D(channels,5, padding='same'))
        if batchnorm:
            model.add(keras.layers.BatchNormalization(momentum=0.9, name='bn_'+str(i+1)))
        model.add(Activation('relu'))
        model.add(MaxPooling1D())
    #model.add(Conv1D(128,7,activation='relu', padding='same'))
    #model.add(Conv1D(128,5,activation='relu', padding='same'))
    #model.add(MaxPooling1D())
    model.add(Flatten())
    for j in range(1, fc_stacks):
        model.add(Dense(fc1, activation='relu'))
        if dropout_flag:
            model.add(Dropout(0.5))
    model.add(Dense(fc2, activation='relu'))
    if dropout_flag:
        model.add(Dropout(0.5))
    model.add(Dense(classes, activation='softmax'))

    #optimizer = optimizer
    #optimizer = Adam(lr=lr, beta_1=beta_2, beta_2=beta_2, epsilon=epsilon, decay=decay, amsgrad=amsgrad)
    #model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
    #model.summary()

    return model 
Example 23
Project: RFMLS-NEU   Author: neu-spiral   File: evaluate_model.py    MIT License 5 votes vote down vote up
def getModel(slice_size, classes):
        """A dummy model to test the functionalities of the Data Generator"""
        model = models.Sequential()
        model.add(Conv1D(args.num_filters,7,activation='relu', padding='same', input_shape=(slice_size, 2)))
        model.add(Conv1D(args.num_filters,5,activation='relu', padding='same'))
        model.add(MaxPooling1D())
        model.add(Conv1D(args.num_filters,7,activation='relu', padding='same'))
        model.add(Conv1D(args.num_filters,5,activation='relu', padding='same'))
        model.add(MaxPooling1D())
        model.add(Conv1D(args.num_filters,7,activation='relu', padding='same'))
        model.add(Conv1D(args.num_filters,5,activation='relu', padding='same'))
        model.add(MaxPooling1D())
        model.add(Flatten())
        model.add(Dense(256, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(classes, activation='softmax'))

        optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
        model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
        model.summary()

        return model

    # Take one random 1k dataset just to test the Data Generator 
Example 24
Project: Github-Classifier   Author: Ichaelus   File: Classifier.py    MIT License 5 votes vote down vote up
def train(self, X, Y, nb_epoch=10, batch_size=1, get_accuracy=True):
        assert len(X) == len(Y)
        if type(self.model) == keras.models.Sequential:
            self.model.fit(X, one_hot_encoding(Y), nb_epoch=nb_epoch, batch_size=batch_size, verbose=False)
        else:
            self.model.fit(X, Y)
        print("Trained model successfully")
        print("Final accuracy on training-set: {}".format(self.evaluate(X, Y))) 
Example 25
Project: Github-Classifier   Author: Ichaelus   File: Classifier.py    MIT License 5 votes vote down vote up
def predict(self, x):
        if type(self.model) == keras.models.Sequential:
            return np.argmax(self.model.predict(np.asarray([x]))[0])
        else:
            return self.model.predict([x])[0] 
Example 26
Project: diarization_with_neural_approach   Author: yinruiqing   File: resegment.py    MIT License 5 votes vote down vote up
def load_model(self, train_dir, epoch, compile=True):
            import keras.models
            if train_dir is None:
                train_dir = self.models_dir
            WEIGHTS_H5 = '{train_dir}/weights/{epoch:04d}.h5'
            weights_h5 = WEIGHTS_H5.format(train_dir=train_dir, epoch=epoch)
                
            model = keras.models.load_model(weights_h5,
                custom_objects=CUSTOM_OBJECTS, compile=compile)
            model.epoch = epoch
            return model 
Example 27
Project: sleep-convolutions-tf   Author: cliffordlab   File: model.py    MIT License 5 votes vote down vote up
def save_next(self, summary=None):
        """saves checkpoint to next ckpt-index, and writes summaries"""
        next_ckpt = 1+self.newest_ckpt(self.trial)
        fn = self.model_filename(self.trial, next_ckpt, check_exists=False)
        # self.logger.debug('Saving to %s' % fn)
        self.save(self.session, fn)
        if summary is not None:
            self.writer.add_summary(summary, next_ckpt)


# Patch keras.models.Model.summary 
Example 28
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 5 votes vote down vote up
def get_imagenet_weights(self):
        """Downloads ImageNet trained weights from Keras.
        Returns path to weights file.
        """
        from keras.utils.data_utils import get_file
        TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
                                 'releases/download/v0.2/'\
                                 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
        weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                TF_WEIGHTS_PATH_NO_TOP,
                                cache_subdir='models',
                                md5_hash='a268eb855778b3df3c7506639542a6af')
        return weights_path 
Example 29
Project: typhon   Author: atmtools   File: qrnn.py    MIT License 5 votes vote down vote up
def save(self, path):
        r"""
        Store the QRNN model in a file.

        This stores the model to a file using pickle for all
        attributes that support pickling. The Keras model
        is handled separately, since it can not be pickled.

        .. note:: In addition to the model file with the given filename,
                  additional files suffixed with :code:`_model_i` will be
                  created for each neural network this model consists of.

        Arguments:

            path(str): The path including filename indicating where to
                       store the model.

        """

        f = open(path, "wb")
        filename = os.path.basename(path)
        name = os.path.splitext(filename)[0]
        dirname = os.path.dirname(path)

        self.model_files = []
        for i, m in enumerate(self.models):
            self.model_files += [name + "_model_" + str(i)]
            m.save(os.path.join(dirname, self.model_files[i]))
        pickle.dump(self, f)
        f.close() 
Example 30
Project: typhon   Author: atmtools   File: qrnn.py    MIT License 5 votes vote down vote up
def load(path):
        r"""
        Load a model from a file.

        This loads a model that has been stored using the `save` method.

        Arguments:

            path(str): The path from which to read the model.

        Return:

            The loaded QRNN object.
        """
        filename = os.path.basename(path)
        dirname = os.path.dirname(path)

        f = open(path, "rb")
        qrnn = pickle.load(f)
        qrnn.models = []
        for mf in qrnn.model_files:
            mf = os.path.basename(mf)
            try:
                mp = os.path.join(dirname, os.path.basename(mf))
                qrnn.models += [keras.models.load_model(mp, qrnn.custom_objects)]
            except:
                raise Exception("Error loading the neural network models. " \
                                "Please make sure all files created during the"\
                                " saving are in this folder.")
        f.close()
        return qrnn 
Example 31
Project: typhon   Author: atmtools   File: qrnn.py    MIT License 5 votes vote down vote up
def __getstate__(self):
        dct = copy.copy(self.__dict__)
        dct.pop("models")
        return dct 
Example 32
Project: kutils   Author: subpic   File: model_helper.py    MIT License 5 votes vote down vote up
def clean_outputs(self):
        """
        Delete training logs or models created by the current helper configuration.
        Identifies the logs by the configuration paths and `self.model_name`.
        Asks for user confirmation before deleting any files.
        """
        log_dir = os.path.join(self.params.logs_root, self.model_name())
        model_path = os.path.join(self.params.models_root, self.model_name()) + '*.h5'
        model_path = model_path.replace('[', '[[]')
        model_files = glob.glob(model_path)

        if os.path.exists(log_dir):
            print 'Found logs:'
            print log_dir
            if raw_confirm('Delete?'):
                print 'Deleting', log_dir
                shutil.rmtree(log_dir)
        else:
            print '(No logs found)'

        if model_files:
            print 'Found model(s):'
            print model_files
            if raw_confirm('Delete?'):
                for mf in model_files: 
                    print 'Deleting', mf
                    os.unlink(mf)
        else:
            print '(No models found)' 
Example 33
Project: labelImg   Author: keyuncheng   File: model.py    MIT License 5 votes vote down vote up
def call(self, inputs, training=None):
        return super(self.__class__, self).call(inputs, training=False)


############################################################
#  Resnet Graph
############################################################

# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py 
Example 34
Project: labelImg   Author: keyuncheng   File: model.py    MIT License 5 votes vote down vote up
def get_imagenet_weights(self):
        """Downloads ImageNet trained weights from Keras.
        Returns path to weights file.
        """
        from keras.utils.data_utils import get_file
        TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
                                 'releases/download/v0.2/'\
                                 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
        weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                TF_WEIGHTS_PATH_NO_TOP,
                                cache_subdir='models',
                                md5_hash='a268eb855778b3df3c7506639542a6af')
        return weights_path 
Example 35
Project: Mask-R-CNN-sports-action-fine-tuing   Author: adelmassimo   File: model.py    MIT License 5 votes vote down vote up
def call(self, inputs, training=None):
        return super(self.__class__, self).call(inputs, training=False)


############################################################
#  Resnet Graph
############################################################

# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py 
Example 36
Project: Mask-R-CNN-sports-action-fine-tuing   Author: adelmassimo   File: model.py    MIT License 5 votes vote down vote up
def get_imagenet_weights(self):
        """Downloads ImageNet trained weights from Keras.
        Returns path to weights file.
        """
        from keras.utils.data_utils import get_file
        TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
                                 'releases/download/v0.2/'\
                                 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
        weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                TF_WEIGHTS_PATH_NO_TOP,
                                cache_subdir='models',
                                md5_hash='a268eb855778b3df3c7506639542a6af')
        return weights_path 
Example 37
Project: PruningNeuralNetworks   Author: arturjordao   File: main.py    MIT License 5 votes vote down vote up
def insert_fully(cnn_model, input_shape=(32, 32, 3), num_classes=10):

    inp = Input(input_shape)
    H = Model(cnn_model.input, cnn_model.layers[-1].output)
    H = H(inp)

    H = Flatten()(H)
    H = Dense(512)(H)
    H = Activation('relu')(H)
    H = Dropout(0.5)(H)
    H = Dense(num_classes)(H)
    H = Activation('softmax')(H)

    model = keras.models.Model(inp, H)
    return model 
Example 38
Project: ai-platform   Author: produvia   File: main.py    MIT License 5 votes vote down vote up
def load_pretrained_model():
    weathernet = keras.models.load_model(model_filepath)
    return weathernet
# Predict 
Example 39
Project: PanopticSegmentation   Author: dmechea   File: model.py    MIT License 5 votes vote down vote up
def get_imagenet_weights(self):
        """Downloads ImageNet trained weights from Keras.
        Returns path to weights file.
        """
        from keras.utils.data_utils import get_file
        TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
                                 'releases/download/v0.2/'\
                                 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
        weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                TF_WEIGHTS_PATH_NO_TOP,
                                cache_subdir='models',
                                md5_hash='a268eb855778b3df3c7506639542a6af')
        return weights_path 
Example 40
Project: models   Author: IntelAI   File: model.py    Apache License 2.0 5 votes vote down vote up
def call(self, inputs, training=None):
        return super(self.__class__, self).call(inputs, training=False)


############################################################
#  Resnet Graph
############################################################

# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py 
Example 41
Project: models   Author: IntelAI   File: model.py    Apache License 2.0 5 votes vote down vote up
def get_imagenet_weights(self):
        """Downloads ImageNet trained weights from Keras.
        Returns path to weights file.
        """
        from keras.utils.data_utils import get_file
        TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
                                 'releases/download/v0.2/'\
                                 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
        weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                TF_WEIGHTS_PATH_NO_TOP,
                                cache_subdir='models',
                                md5_hash='a268eb855778b3df3c7506639542a6af')
        return weights_path 
Example 42
Project: RLDonkeycar   Author: downingbots   File: Keras.py    MIT License 5 votes vote down vote up
def load(self, model_path):
        self.model = keras.models.load_model(model_path) 
Example 43
Project: RLDonkeycar   Author: downingbots   File: Keras.py    MIT License 5 votes vote down vote up
def default_categorical():
    from keras.layers import Input, Dense, merge
    from keras.models import Model
    from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
    from keras.layers import Activation, Dropout, Flatten, Dense
    
    img_in = Input(shape=(120, 160, 3), name='img_in')                      # First layer, input layer, Shape comes from camera.py resolution, RGB
    x = img_in
    x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)       # 24 features, 5 pixel x 5 pixel kernel (convolution, feauture) window, 2wx2h stride, relu activation
    x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)       # 32 features, 5px5p kernel window, 2wx2h stride, relu activatiion
    x = Convolution2D(64, (5,5), strides=(2,2), activation='relu')(x)       # 64 features, 5px5p kernal window, 2wx2h stride, relu
    x = Convolution2D(64, (3,3), strides=(2,2), activation='relu')(x)       # 64 features, 3px3p kernal window, 2wx2h stride, relu
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)       # 64 features, 3px3p kernal window, 1wx1h stride, relu

    # Possibly add MaxPooling (will make it less sensitive to position in image).  Camera angle fixed, so may not to be needed

    x = Flatten(name='flattened')(x)                                        # Flatten to 1D (Fully connected)
    x = Dense(100, activation='relu')(x)                                    # Classify the data into 100 features, make all negatives 0
    x = Dropout(.1)(x)                                                      # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting)
    x = Dense(50, activation='relu')(x)                                     # Classify the data into 50 features, make all negatives 0
    x = Dropout(.1)(x)                                                      # Randomly drop out 10% of the neurons (Prevent overfitting)
    #categorical output of the angle
    angle_out = Dense(15, activation='softmax', name='angle_out')(x)        # Connect every input with every output and output 15 hidden units. Use Softmax to give percentage. 15 categories and find best one based off percentage 0.0-1.0
    
    #continous output of throttle
    throttle_out = Dense(1, activation='relu', name='throttle_out')(x)      # Reduce to 1 number, Positive number only
    
    model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])
    model.compile(optimizer='adam',
                  loss={'angle_out': 'categorical_crossentropy', 
                        'throttle_out': 'mean_absolute_error'},
                  loss_weights={'angle_out': 0.9, 'throttle_out': .001})

    return model 
Example 44
Project: RLDonkeycar   Author: downingbots   File: Keras.py    MIT License 5 votes vote down vote up
def default_linear():
    from keras.layers import Input, Dense, merge
    from keras.models import Model
    from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
    from keras.layers import Activation, Dropout, Flatten, Dense
    
    img_in = Input(shape=(120,160,3), name='img_in')
    x = img_in
    x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
    
    x = Flatten(name='flattened')(x)
    x = Dense(100, activation='linear')(x)
    x = Dropout(.1)(x)
    x = Dense(50, activation='linear')(x)
    x = Dropout(.1)(x)
    #categorical output of the angle
    angle_out = Dense(1, activation='linear', name='angle_out')(x)
    
    #continous output of throttle
    throttle_out = Dense(1, activation='linear', name='throttle_out')(x)
    
    model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])
    
    
    model.compile(optimizer='adam',
                  loss={'angle_out': 'mean_squared_error', 
                        'throttle_out': 'mean_squared_error'},
                  loss_weights={'angle_out': 0.5, 'throttle_out': .5})

    return model 
Example 45
Project: MovieTaster-Open   Author: lujiaying   File: keras_item2vec.py    MIT License 5 votes vote down vote up
def skipgram_model(vocab_size, embedding_dim=100, paradigm='Functional'):
    # Sequential paradigm
    if paradigm == 'Sequential':
        target = Sequential()
        target.add(Embedding(vocab_size, embedding_dim, input_length=1))
        context = Sequential()
        context.add(Embedding(vocab_size, embedding_dim, input_length=1))

        # merge the pivot and context models
        model = Sequential()
        model.add(Merge([target, context], mode='dot'))
        model.add(Reshape((1,), input_shape=(1,1)))
        model.add(Activation('sigmoid'))
        model.compile(optimizer='adam', loss='binary_crossentropy')
        return model

    # Functional paradigm
    elif paradigm == 'Functional':
        target = Input(shape=(1,), name='target')
        context = Input(shape=(1,), name='context')
        #print target.shape, context.shape
        shared_embedding = Embedding(vocab_size, embedding_dim, input_length=1, name='shared_embedding')
        embedding_target = shared_embedding(target)
        embedding_context = shared_embedding(context)
        #print embedding_target.shape, embedding_context.shape

        merged_vector = dot([embedding_target, embedding_context], axes=-1)
        reshaped_vector = Reshape((1,), input_shape=(1,1))(merged_vector)
        #print merged_vector.shape
        prediction = Dense(1, input_shape=(1,), activation='sigmoid')(reshaped_vector)
        #print prediction.shape

        model = Model(inputs=[target, context], outputs=prediction)
        model.compile(optimizer='adam', loss='binary_crossentropy')
        return model

    else:
        print('paradigm error')
        return None 
Example 46
Project: MovieTaster-Open   Author: lujiaying   File: keras_item2vec.py    MIT License 5 votes vote down vote up
def train_cbow_base_model():
    min_word_freq = 5
    word_dict = process.get_movie_name_id_dict(min_word_freq=min_word_freq)
    dict_size = len(word_dict)
    emb_size = 100
    context_window_size = 4
    epochs = 20
    batch_size = 128

    model = cbow_base_model(dict_size, emb_size, context_window_size)
    for epoch_id in xrange(epochs):
        # train by batch
        batch_id = 0
        x_batch = []
        y_batch = []
        for movie_ids in process.shuffle(process.reader_creator(word_dict, ngram=context_window_size+1), 10000)():
            batch_id += 1
            if batch_id % (batch_size*50) == 0:
                # Print evaluate log
                score = model.evaluate(np.array(x_batch),
                    keras.utils.to_categorical(y_batch, num_classes=dict_size))
                logger.info('[epoch #%d] batch #%d, train loss:%s' % (epoch_id, batch_id, score))
            if batch_id % batch_size == 0:
                # Convert labels to categorical one-hot encoding
                model.train_on_batch(np.array(x_batch),
                        keras.utils.to_categorical(y_batch, num_classes=dict_size))
                x_batch = []
                y_batch = []
            x = np.array(movie_ids[:context_window_size])
            y = movie_ids[-1]
            x_batch.append(x)
            y_batch.append(y)
    logger.info('model train done')
    # store word embedding
    with open('./models/keras_0804_09_cbow', 'w') as fwrite:
        for idx, vec in enumerate(model.layers[0].get_weights()[0].tolist()):
            fwrite.write('%d %s\n' % (idx, ' '.join([str(_) for _ in vec]))) 
Example 47
Project: HandyNet   Author: arangesh   File: model.py    MIT License 5 votes vote down vote up
def call(self, inputs, training=None):
        return super(self.__class__, self).call(inputs, training=False)


############################################################
#  Resnet Graph
############################################################

# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py 
Example 48
Project: HandyNet   Author: arangesh   File: model.py    MIT License 5 votes vote down vote up
def get_imagenet_weights(self):
        """Downloads ImageNet trained weights from Keras.
        Returns path to weights file.
        """
        from keras.utils.data_utils import get_file
        TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
                                 'releases/download/v0.2/'\
                                 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
        weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                TF_WEIGHTS_PATH_NO_TOP,
                                cache_subdir='models',
                                md5_hash='a268eb855778b3df3c7506639542a6af')
        return weights_path 
Example 49
Project: EasyPR-python   Author: SunskyF   File: model.py    Apache License 2.0 5 votes vote down vote up
def call(self, inputs, training=None):
        return super(self.__class__, self).call(inputs, training=False)


############################################################
#  Resnet Graph
############################################################

# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py 
Example 50
Project: EasyPR-python   Author: SunskyF   File: model.py    Apache License 2.0 5 votes vote down vote up
def get_imagenet_weights(self):
        """Downloads ImageNet trained weights from Keras.
        Returns path to weights file.
        """
        from keras.utils.data_utils import get_file
        TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/' \
                                 'releases/download/v0.2/' \
                                 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
        weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                TF_WEIGHTS_PATH_NO_TOP,
                                cache_subdir='models',
                                md5_hash='a268eb855778b3df3c7506639542a6af')
        return weights_path