Python keras.metrics() Examples

The following are code examples for showing how to use keras.metrics(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: WaveNILM   Author: picagrad   File: waveNILM.py    MIT License 6 votes vote down vote up
def compile_model(model,use_receptive_field_only,loss,all_metrics,noise_mode, use_metric_per_app,app_inds):
	#Fix cost and metrics according to skip_out_of_receptive_field and compile model
	
	optim = make_optimizer()
	
	# Skipping any inputs that may contain zero padded inputs for loss calculation (and performance evaluation)
	if use_receptive_field_only:
		loss = skip_out_of_receptive_field(loss)
		all_metrics = [skip_out_of_receptive_field(m) for m in all_metrics]
	
	# creating specific copy of each metric for each appliance
	if use_metric_per_app and len(app_inds)>1:
		ln = len(all_metrics)
		for i in range(len(app_inds)):
			name = '_for_appliance_%d' % app_inds[i]
			for j in range(ln):
				all_metrics.append(make_class_specific_loss(all_metrics[j],i,name))
			
	model.compile(loss = loss, metrics = all_metrics, optimizer = optim) 
Example 2
Project: urgent-care-comparative   Author: illidanlab   File: deep_models.py    GNU General Public License v3.0 6 votes vote down vote up
def lstm_model(input_shape, hidden = 256, targets = 1, learn_rate = 1e-4, multiclass=False):
    model = Sequential()
    model.add(Bidirectional(LSTM(hidden), merge_mode = 'concat'))
    model.add(Activation('tanh'))
    model.add(Dropout(0.5))

    if (targets > 1) and not multiclass:
        model.add(Bidirectional(LSTM(hidden), merge_mode = 'concat'))
        model.add(Activation('tanh'))
        model.add(Dropout(0.5))
    
    model.add(Dense(targets))
    if multiclass:
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', 
                  optimizer=Adam(lr=learn_rate, beta_1 =.5 ), metrics=['categorical_accuracy'])
    else:
        model.add(Activation ('sigmoid'))
        model.compile(loss='binary_crossentropy', 
                  optimizer=Adam(lr=learn_rate, beta_1 =.5 ), metrics=['accuracy'])
    return (model) 
Example 3
Project: urgent-care-comparative   Author: illidanlab   File: deep_models.py    GNU General Public License v3.0 6 votes vote down vote up
def cnn_model(input_shape, hidden = 256, targets = 1, learn_rate = 1e-4):
    model = Sequential()
    model.add(Convolution1D(input_shape = input_shape, nb_filter = 64, filter_length = 3, border_mode = 'same', activation = 'relu'))
    model.add(MaxPooling1D(pool_length = 3))
    model.add(Bidirectional(LSTM(hidden), merge_mode = 'concat'))
    model.add(Activation('tanh'))
    model.add(Dropout(0.5))
    model.add(Dense(targets))
    if multiclass:
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', 
                  optimizer=Adam(lr=learn_rate, beta_1 =.5 ), metrics=['categorical_accuracy'])
    else:
        model.add(Activation ('sigmoid'))
        model.compile(loss='binary_crossentropy', 
                  optimizer=Adam(lr=learn_rate, beta_1 =.5 ), metrics=['accuracy'])
    return (model) 
Example 4
Project: urgent-care-comparative   Author: illidanlab   File: deep_models.py    GNU General Public License v3.0 6 votes vote down vote up
def mlp_model(input_shape, hidden =512, targets = 1, multiclass = False, learn_rate = 1e-4):
    model = Sequential()
    model.add(Dense(hidden, activation = 'relu', input_shape = input_shape))
    model.add(Dropout(.5))
    model.add(Dense(hidden, activation = 'relu'))
    model.add(Dropout(.5))
    model.add(Dense(hidden, activation = 'relu'))
    model.add(Dropout(.5))
    model.add(Dense(targets))
    if multiclass:
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=learn_rate, beta_1 =.5 ), 
                      metrics=['categorical_accuracy'])
    else:
        model.add(Activation('sigmoid'))
        model.compile(loss='binary_crossentropy', optimizer=Adam(lr=learn_rate, beta_1 =.5 ), metrics=['accuracy'])
    return (model) 
Example 5
Project: urgent-care-comparative   Author: illidanlab   File: deep_models.py    GNU General Public License v3.0 6 votes vote down vote up
def hierarchical_cnn (input_shape, aux_shape, targets = 1, hidden = 256, multiclass = False, learn_rate=1e-4):
    x = Input(shape = input_shape, name = 'x')
    xx = Convolution1D(nb_filter = 64, filter_length = 3, border_mode = 'same', activation = 'relu') (x)
    xx = MaxPooling1D(pool_length = 3) (xx)
    
    xx = Bidirectional(LSTM (256, activation = 'relu'), merge_mode = 'concat') (xx)
    xx = Dropout(0.5)(xx)
    
    dx = Input(shape = aux_shape, name = 'aux')

    xx = concatenate([xx, dx])
    if multiclass:
        y = Dense(targets, activation = 'softmax') (xx)
        model = Model(inputs = [x, dx], outputs = [y])
        model.compile (loss = 'categorical_crossentropy', optimizer = Adam(lr = learn_rate), metrics = ['categorical_accuracy'])
    else:
        y = Dense(targets, activation = 'sigmoid') (xx)
        model = Model(inputs = [x, dx], outputs = [y])
        model.compile (loss = 'binary_crossentropy', optimizer = Adam(lr = learn_rate), metrics = ['accuracy'])
    return (model) 
Example 6
Project: urgent-care-comparative   Author: illidanlab   File: deep_models.py    GNU General Public License v3.0 6 votes vote down vote up
def hierarchical_lstm (input_shape, aux_shape, targets = 1, hidden = 256, multiclass = False, learn_rate = 1e-4):
    x = Input(shape = input_shape, name = 'x')    
    xx = Bidirectional(LSTM (hidden, activation = 'relu'), merge_mode = 'concat') (x)
    xx = Dropout(0.5)(xx)
    
    dx = Input(shape = aux_shape, name = 'aux')

    xx = concatenate([xx, dx])
    xx = Dense(512, activation = 'relu') (xx)
    if multiclass:
        y = Dense(targets, activation = 'softmax') (xx)
        model = Model(inputs = [x, dx], outputs = [y])
        model.compile (loss = 'categorical_crossentropy', optimizer = Adam(lr = learn_rate), metrics = ['categorical_accuracy'])
    else:
        y = Dense(targets, activation = 'sigmoid') (xx)
        model = Model(inputs = [x, dx], outputs = [y])
        model.compile (loss = 'binary_crossentropy', optimizer = Adam(lr = learn_rate), metrics = ['accuracy'])
    return (model) 
Example 7
Project: MODS_ConvNet   Author: santiagolopezg   File: test_lillabcrossval_network.py    MIT License 5 votes vote down vote up
def test_net(i):

	model = get_weights(i, username)
	print 'using weights from net trained on dataset {0}'.format(i)
	history = LossAccHistory()

    	(X_train, y_train), (X_test, y_test) = get_data(i)

    	Y_test = np_utils.to_categorical(y_test, nb_classes)

    	X_test /= 255

    	print(X_test.shape[0], 'test samples')
 
    	model.compile(loss='binary_crossentropy', 
                 optimizer= rmsprop(lr=0.001), #adadelta
		 metrics=['accuracy', 'matthews_correlation', 'precision', 'recall', sens, spec])
          
    	score = model.evaluate(X_test, Y_test, verbose=1)

    	print (model.metrics_names, score)

    	if (len(cvscores[0])==0): #if metric names haven't been saved, do so
		cvscores[0].append(model.metrics_names)
    	else:
		counter = 1
		for k in score: #for each test metric, append it to the cvscores list
			cvscores[counter].append(k)
			counter +=1

    	model.reset_states() 
Example 8
Project: MODS_ConvNet   Author: santiagolopezg   File: test_lillabcrossval_network.py    MIT License 5 votes vote down vote up
def save_metrics(cvscores, test_metrics):
#save test metrics to txt file
	file = open('MODS_test_metrics_labscrossval.txt', 'w')
	for j in cvscores:
		file.write('\n%s\n' % j)
	for i in test_metrics:
		file.write('\n%s\n' % i)
	file.close()

	print test_metrics 
Example 9
Project: MODS_ConvNet   Author: santiagolopezg   File: test_network.py    MIT License 5 votes vote down vote up
def test_net(i):

	model = get_weights(i)
	print 'using weights from net trained on dataset {0}'. format(i)
	history = LossAccHistory()

    	(X_train, y_train), (X_test, y_test) = get_data(i)

    	Y_test = np_utils.to_categorical(y_test, nb_classes)

    	X_test /= 255

    	print(X_test.shape[0], 'test samples')
 
    	model.compile(loss='binary_crossentropy', 
                 optimizer= rmsprop(lr=0.001), #adadelta
		 metrics=['accuracy', 'matthews_correlation', 'precision', 'recall', sens, spec])
          
    	score = model.evaluate(X_test, Y_test, verbose=1)

    	print (model.metrics_names, score)

    	if (len(cvscores[0])==0): #if metric names haven't been saved, do so
		cvscores[0].append(model.metrics_names)
    	else:
		counter = 1
		for k in score: #for each test metric, append it to the cvscores list
			cvscores[counter].append(k)
			counter +=1

    	model.reset_states() 
Example 10
Project: MODS_ConvNet   Author: santiagolopezg   File: test_network.py    MIT License 5 votes vote down vote up
def save_metrics(cvscores, test_metrics):
#save test metrics to txt file
	file = open('MODS_test_metrics.txt', 'w')
	for j in cvscores:
		file.write('\n%s\n' % j)
	for i in test_metrics:
		file.write('\n%s\n' % i)
	file.close()

	print test_metrics 
Example 11
Project: MODS_ConvNet   Author: santiagolopezg   File: test_labcrossval_network.py    MIT License 5 votes vote down vote up
def test_net(i, name):

	model = get_weights(i, name)
	print 'using weights from net trained on dataset {0} for {1}'. format(i, name)
	history = LossAccHistory()

    	(X_train, y_train), (X_test, y_test) = get_data(i)

    	Y_test = np_utils.to_categorical(y_test, nb_classes)

    	X_test /= 255

    	print(X_test.shape[0], 'test samples')
 
    	model.compile(loss='binary_crossentropy', 
                 optimizer= rmsprop(lr=0.001), #adadelta
		 metrics=['accuracy', 'matthews_correlation', 'precision', 'recall', sens, spec])
          
    	score = model.evaluate(X_test, Y_test, verbose=1)

    	print (model.metrics_names, score)

    	if (len(cvscores[0])==0): #if metric names haven't been saved, do so
		cvscores[0].append(model.metrics_names)
    	else:
		counter = 1
		for k in score: #for each test metric, append it to the cvscores list
			cvscores[counter].append(k)
			counter +=1

    	model.reset_states() 
Example 12
Project: MODS_ConvNet   Author: santiagolopezg   File: test_network.py    MIT License 5 votes vote down vote up
def test_net(i, name):

	model = get_weights(i, name)
	print 'using weights from net trained on dataset {0} for {1}'. format(i, name)
	history = LossAccHistory()

    	(X_train, y_train), (X_test, y_test) = get_data(i, name)

    	Y_test = np_utils.to_categorical(y_test, nb_classes)

    	X_test /= 255

    	print(X_test.shape[0], 'test samples')
 
    	model.compile(loss='binary_crossentropy', 
                 optimizer= rmsprop(lr=0.001), #adadelta
		 metrics=['accuracy', 'matthews_correlation', 'precision', 'recall', sens, spec])
        
	ypred = model.predict_classes(X_test, verbose=1)
	ytrue = Y_test
	
	tp, tn, fp, fn = contingency(y_test, ypred)

	print '           |     true label\n---------------------------------'
	print 'pred label |  positive | negative'
	print 'positive   |     ', tp, ' |  ', fp
	print 'negative   |     ', fn, '  |  ', tn 

	prec = float(tp)/(tp+fp)
	se = float(tp) / (tp + fn)
	sp = float(tn) / (fp + tn)
	mcc = float(tp*tn - tp*fn)/(math.sqrt((tp + fp)*(tp+fn)*(tn+fp)*(tn+fn)))
	f1 = (2*prec*se)/(prec+se)
	acc = float(tp+tn)/(tp+tn+fp+fn)
	print '     sens     |     spec     |     mcc      |      f1      |      prec      |     acc       '
	print se, sp, mcc, f1, prec, acc
	
    	model.reset_states()
	return [se, sp, mcc, f1, prec, acc] 
Example 13
Project: MODS_ConvNet   Author: santiagolopezg   File: test_lilfoo.py    MIT License 5 votes vote down vote up
def test_net(i):

	model = get_weights(i)
	print 'using weights from net trained on dataset {0}'. format(i)
	history = LossAccHistory()

    	(X_train, y_train), (X_test, y_test) = get_data(i)

    	Y_test = np_utils.to_categorical(y_test, nb_classes)

    	X_test /= 255

    	print(X_test.shape[0], 'test samples')
 
    	model.compile(loss='binary_crossentropy', 
                 optimizer= rmsprop(lr=0.001), #adadelta
		 metrics=['accuracy', 'matthews_correlation', 'precision', 'recall', sens, spec])
          
    	score = model.evaluate(X_test, Y_test, verbose=1)

    	print (model.metrics_names, score)

    	if (len(cvscores[0])==0): #if metric names haven't been saved, do so
		cvscores[0].append(model.metrics_names)
    	else:
		counter = 1
		for k in score: #for each test metric, append it to the cvscores list
			cvscores[counter].append(k)
			counter +=1

    	model.reset_states() 
Example 14
Project: MODS_ConvNet   Author: santiagolopezg   File: test_lilfoo.py    MIT License 5 votes vote down vote up
def save_metrics(cvscores, test_metrics):
#save test metrics to txt file
	file = open('MODS_lilfoo_test_metrics.txt', 'w')
	for j in cvscores:
		file.write('\n%s\n' % j)
	for i in test_metrics:
		file.write('\n%s\n' % i)
	file.close()

	print test_metrics 
Example 15
Project: MODS_ConvNet   Author: santiagolopezg   File: test_labcrossval_network.py    MIT License 5 votes vote down vote up
def test_net(i, name):

	model = get_weights(i, name)
	print 'using weights from net trained on dataset {0} for {1}'. format(i, name)
	history = LossAccHistory()

    	(X_train, y_train), (X_test, y_test) = get_data(i, name)

    	Y_test = np_utils.to_categorical(y_test, nb_classes)

    	X_test /= 255

    	print(X_test.shape[0], 'test samples')
 
    	model.compile(loss='binary_crossentropy', 
                 optimizer= rmsprop(lr=0.001), #adadelta
		 metrics=['accuracy', 'matthews_correlation', 'precision', 'recall', sens, spec])
        
	ypred = model.predict_classes(X_test, verbose=1)
	ytrue = Y_test	

	
	tp, tn, fp, fn = contingency(y_test, ypred)

	print '           |     true label\n---------------------------------'
	print 'pred label |  positive | negative'
	print 'positive   |     ', tp, ' |  ', fp
	print 'negative   |     ', fn, '  |  ', tn 

	prec = float(tp)/(tp+fp)
	se = float(tp) / (tp + fn)
	sp = float(tn) / (fp + tn)
	mcc = float(tp*tn - tp*fn)/(math.sqrt((tp + fp)*(tp+fn)*(tn+fp)*(tn+fn)))
	f1 = (2*prec*se)/(prec+se)
	acc = float(tp+tn)/(tp+tn+fp+fn)
	print '     sens     |     spec     |     mcc      |      f1      |      prec      |     acc       '
	print se, sp, mcc, f1, prec, acc

    	model.reset_states()
	return [se, sp, mcc, f1, prec, acc] 
Example 16
Project: MODS_ConvNet   Author: santiagolopezg   File: test_labcrossval_network.py    MIT License 5 votes vote down vote up
def save_metrics(cvscores, test_metrics):
#save test metrics to txt file
	file = open('MODS_test_metrics_labscrossval.txt', 'w')
	for j in cvscores:
		file.write('\n%s\n' % j)
	for i in test_metrics:
		file.write('\n%s\n' % i)
	file.close()

	print test_metrics 
Example 17
Project: sisy   Author: qorrect   File: regressor_demo.py    Apache License 2.0 5 votes vote down vote up
def base_model():
    model = Sequential()
    model.add(Dense(14, input_dim=13, init='normal', activation='relu'))
    model.add(Dense(7, init='normal', activation='relu'))
    model.add(Dense(1, init='normal'))
        model.compile(loss='mean_squared_error', optimizer='adam',metrics=['mse']) 
Example 18
Project: WhatsSee   Author: luca-ant   File: whats_see.py    GNU General Public License v3.0 5 votes vote down vote up
def restore_nn(self):
        self.vocabulary, self.word_index_dict, self.index_word_dict, self.max_cap_len = load_vocabulary(self.vocabulary_dir)

        print("VOCABULARY SIZE: " + str(len(self.vocabulary)))
        print("MAX CAPTION LENGTH: " + str(self.max_cap_len))

        self.model = create_NN(len(self.vocabulary), self.max_cap_len)

        self.model.load_weights(self.weights_file)

        #        self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        self.model.summary() 
Example 19
Project: WaveNILM   Author: picagrad   File: waveNILM.py    MIT License 5 votes vote down vote up
def base_config():
	# Data parameters:
	data_len = 1440*720 # Length of data to be used for training and testing
	val_spl = 0.1  # split ration for validation, note that currently, validation is used for testing as well.
	input_path = '../data' # Location of data files
	data_source = 'AMPds2_In_P_Out_P.dat' # Name of default data source file
	agg_ind = [1] # Location of aggregate measurement in source file
	noise_mode = 0 # Noise mode: 0 - denoised, 1 - noisy, 2 - noisy with modeled noise
	noise_scale = 10 # Weight of noise for noise mode 2
	
	app_inds = [6,8,9,12,14] # Which appliances to disaggregate
	# app_inds = [0,1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
	
	#Network Parameters
	nb_filters = [512,256,256,128,128,256,256,256,512] # Number of nodes per layer, use list for varying layer width.
	depth = 9 # Number of dilated convolutions per stack
	stacks = 1 # Number of dilated convolution stacks
	residual = False # Whether network is residual or not (requires uniform number of nodes per layer, for now)
	use_bias = True # Whether or not network uses bias
	activation = ReLU() # activation - use function handle
	callbacks = [LearningRateScheduler(scheduler, verbose = 1)]	 # Callbacks for training
	dropout = 0.1 # Dropout value
	mask = True # Whether to use masked output
	
	# Training Parameters:
	n_epochs = 300 # Number of training epochs
	batch_size = 50 # Number of samples per batch, each sample will have sample_size timesteps
	sample_size = 1440 # Number of timesteps (minutes for AMPds2) per sample
	savepath = '../data/comparison' # Folder to save models during/after training.
	save_flag = False  # Flag to save best model at each iteration of cross validation
	shuffle  = True # Shuffle samples every epoch
	verbose = 2 # Printing verbositiy, because of sacred, use only 0 or 2
	res_l2 = 0. # l2 penalty weight
	use_receptive_field_only = True # Whether or not to ignore the samples without full input receptive field
	loss = objectives.mae # Loss, use function handle
	all_metrics = [estimated_accuracy] #Metrics, use function handle
	optimizer = {} # Placeholder for optimizer
	cross_validate = True # Cross validation parameters
	splice = [0,1,2,3,4,5,6,7,8,9]
	use_metric_per_app = False # Whether to display metrics for each appliances separately 
Example 20
Project: dts   Author: albertogaspar   File: Seq2Seq.py    MIT License 5 votes vote down vote up
def evaluate(self, data, fn_inverse=None, horizon=1, fn_plot=None):
        """
        Evaluate model
        :return:
        """
        encoder_input_data, decoder_input_exog, y = data

        y_hat = self.predict(encoder_inputs=encoder_input_data,
                             pred_steps=horizon,
                             decoder_input_exog=decoder_input_exog)

        if fn_inverse is not None:
            y = fn_inverse(y)
            y_hat = fn_inverse(y_hat)

        y = np.float32(y)
        y_hat = np.float32(y_hat)

        if fn_plot is not None:
            fn_plot([y,y_hat])

        results = []
        for m in self.model.metrics:
            if isinstance(m, str):
                results.append(K.eval(K.mean(get(m)(y, y_hat))))
            else:
                results.append(K.eval(K.mean(m(y, y_hat))))
        return results 
Example 21
Project: dts   Author: albertogaspar   File: Seq2Seq.py    MIT License 5 votes vote down vote up
def evaluate(self, data, fn_inverse=None, fn_plot=None):
        try:
            encoder_inputs, decoder_inputs, decoder_inputs_exog, y = data
            y_hat = self.model.predict([encoder_inputs, decoder_inputs, decoder_inputs_exog])
        except:
            encoder_inputs, decoder_inputs, y = data
            y_hat = self.model.predict([encoder_inputs, decoder_inputs])


        if fn_inverse is not None:
            y = fn_inverse(y)
            y_hat = fn_inverse(y_hat)

        y = np.float32(y)
        y_hat = np.float32(y_hat)

        if fn_plot is not None:
            fn_plot([y, y_hat])

        results = []
        for m in self.model.metrics:
            if isinstance(m, str):
                results.append(K.eval(K.mean(get(m)(y, y_hat))))
            else:
                results.append(K.eval(K.mean(m(y, y_hat))))
        return results 
Example 22
Project: OverwatchML   Author: sshh12   File: OverwatchPredictHeroSR.py    MIT License 4 votes vote down vote up
def get_model(hero):
    
    model = Sequential()
    
    model.add(Dense(160, input_dim=len(specific_stats[hero]), kernel_initializer='normal', activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    
    model.add(Dense(160, kernel_initializer='normal', activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    
    model.add(Dense(160, kernel_initializer='normal', activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    
    model.add(Dense(160, kernel_initializer='normal', activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    
    model.add(Dense(160, kernel_initializer='normal', activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    
    model.add(Dense(100, kernel_initializer='normal', activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    
    model.add(Dense(100, kernel_initializer='normal', activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    
    model.add(Dense(100, kernel_initializer='normal', activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.1))

    model.add(Dense(1))
    
    model.compile(loss='mean_squared_error', optimizer='adam', metrics=[acc_metric])
    
    return model


# In[6]:

# Train wrapper 
Example 23
Project: WhatsSee   Author: luca-ant   File: whats_see.py    GNU General Public License v3.0 4 votes vote down vote up
def start_train(self):
        opt = Adam(lr=0.001)
        self.model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
        # self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

        if self.last_epoch >= self.total_epochs:
            print("LAST EPOCH TOO MUCH BIG")
            return

        if not os.path.isdir(self.weights_dir):
            os.makedirs(self.weights_dir)

        # callbacks
        save_weights_callback = ModelCheckpoint(self.weights_file, monitor='val_acc', save_weights_only=True, verbose=1, mode='auto', period=1)
        save_epoch_callback = EpochSaver(self.last_epoch + 1, self.last_epoch_file)
        save_model_callback = ModelCheckpoint(self.model_file, verbose=1, period=1)

        # params
        steps_train = (len(self.train_captions) // self.batch_size) + 1
        steps_val = (len(self.val_captions) // self.batch_size) + 1
        self.model.summary()

        # prepare train and val data generator
        train_data_generator = data_generator(self.dataset, self.train_captions, self.train_images_as_vector, self.word_index_dict, self.max_cap_len,
                                              len(self.vocabulary), self.batch_size)
        val_data_generator = data_generator(self.dataset, self.val_captions, self.val_images_as_vector, self.word_index_dict, self.max_cap_len, len(self.vocabulary),
                                            self.batch_size)
        print("TRAINING MODEL")
        history = self.model.fit_generator(train_data_generator, epochs=self.total_epochs, steps_per_epoch=steps_train, verbose=2, validation_data=val_data_generator,
                                           validation_steps=steps_val, callbacks=[save_weights_callback, save_model_callback, save_epoch_callback],
                                           initial_epoch=self.last_epoch)

        # for i in range(self.last_epoch, self.total_epochs):
        #     print("EPOCH: " + str(i + 1) + "/" + str(self.total_epochs))
        #     history = self.model.fit_generator(train_data_generator, epochs=1, steps_per_epoch=steps_train, verbose=2, validation_data=val_data_generator,
        #                                        validation_steps=steps_val, use_multiprocessing=True,
        #                                        callbacks=[save_weights_callback, save_model_callback, save_epoch_callback])

        print("SAVING WEIGHTS TO " + self.weights_file)

        self.model.save_weights(self.weights_file, True)
        print("TRAINING COMPLETE!")

        if os.path.isdir(self.train_dir):
            shutil.rmtree(self.train_dir, ignore_errors=True)

        loss = history.history['loss'][-1]
        val_loss = history.history['val_loss'][-1]
        acc = history.history['acc'][-1]
        val_acc = history.history['val_acc'][-1]
        print(
            "LOSS: {:5.2f}".format(loss) + " - ACC: {:5.2f}%".format(100 * acc) + " - VAL_LOSS: {:5.2f}".format(val_loss) + " - VAL_ACC: {:5.2f}%".format(100 * val_acc))
        return history 
Example 24
Project: hackaway   Author: siriusdemon   File: char.py    BSD 2-Clause "Simplified" License 4 votes vote down vote up
def train():
    # model
    X_shape = (FLAGS.image_height, FLAGS.image_width, 3)
    model = Sequential()
    
    model.add(Conv2D(128, kernel_size=3, padding=1, strides=1, input_shape=X_shape))
    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    # conv => RELU => POOL
    model.add(Conv2D(256, kernel_size=3, padding=1, strides=1))
    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    # flatten => RELU layers
    model.add(Conv2D(512, kernel_size=3, padding=1, strides=1))
    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    
    model.add(Conv2D(1024, kernel_size=3, padding=1, strides=1))
    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    
    model.add(Dense(FLAGS.charset_size)
    model.add(Activation('softmax'))

    optimizer = Adam()
    model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])


    for epoch in range(FLAGS.num_epochs):
        for i, (X_train, y_train) in enumerate(train_data_iterator()):
            X_train, y_train = np.array(X_train), np.array(y_train)
            loss = model.train_on_batch(X_train, y_train)
            print(f"epoch: {epoch} step: {i} loss: {loss}")

            if i % 100 == 0:
                X_test, y_test = test_data_helper()
                X_test, y_test = np.array(X_test), np.array(y_test)
                score = model.test_on_batch(X_test, y_test)
                print(f"score: {score}")

if __name__ == '__main__':
    train() 
Example 25
Project: deeputil   Author: Avkash   File: ImportExport.py    Apache License 2.0 4 votes vote down vote up
def import_keras_model_config_and_weight_and_compile(model_config, model_weights,
                                                     model_loss_weights="none",
                                                     sample_weight_mode="none",
                                                     model_loss="categorical_crossentropy",
                                                     model_optimizer="rmsprop",
                                                     model_metrics=["acc"],
                                                     show_info=True
                                                     ):
    """
    This function loads a model config and weights from disk and then compile it from given parameters
    model_config:
    model_weights:
    model_weights_mode:
    loss:
    optimizer:
    metrics:
    :return: model (Keras Model)
    """
    model_local = Model

    #assert model_config
    #assert model_weights
    #assert sample_weight_mode
    #assert model_loss_weights

    # Check if given loss is part of keras.losses
    utils.helper_functions.show_print_message("Losses: " + model_loss, show_info)
    if model_loss not in definitions.Definitions.keras_losses:
        utils.helper_functions.show_print_message("Error: The given loss function is not a keras loss function.", show_info)
        return model_local

    # Check if given optimizer is part of keras.optimizer
    utils.helper_functions.show_print_message("Optimizers: " + model_optimizer, show_info)
    if model_optimizer not in definitions.Definitions.keras_optimizers:
        utils.helper_functions.show_print_message("Error: The given optimizer is not a keras optimizer.", show_info)
        return model_local

    # Check if given metrics is part of keras.metrics
    utils.helper_functions.show_print_message("Metrics: " + str(model_metrics), show_info)
    len(model_metrics)

    for i in range(len(model_metrics)):
        if model_metrics[i] not in definitions.Definitions.keras_metrics:
            utils.helper_functions.show_print_message("Error: The given metrics is not a keras metrics.", show_info)
            return model_local

    model_local = import_keras_model_json_from_disk(model_config, show_info)
    model_local = import_keras_model_weights_from_disk(model_local, model_weights, show_info)
    model_local.compile(loss=model_loss,
              optimizer=model_optimizer,
              metrics=model_metrics)
    utils.helper_functions.show_print_message("Model config and weight import is done along with compile!", show_info)
    return model_local