Python keras.utils.plot_model() Examples

The following are 14 code examples of keras.utils.plot_model(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.utils , or try the search function .
Example #1
Source File: mymodel.py    From AI_for_Wechat_tiaoyitiao with GNU General Public License v3.0 7 votes vote down vote up
def get_model():
    model = models.Sequential()
    model.add(layers.Conv2D(16,(3,3),activation='relu',input_shape=(135,240,3),padding = 'same'))
    model.add(layers.MaxPooling2D((2,2)))
    model.add(layers.Conv2D(32,(3,3),activation='relu',padding = 'same'))
    model.add(layers.MaxPooling2D((2,2)))
    model.add(layers.Conv2D(64,(3,3),activation='relu',padding = 'same'))
    model.add(layers.MaxPooling2D((2,2)))
    model.add(layers.Conv2D(64,(3,3),activation='relu',padding = 'same'))
    model.add(layers.MaxPooling2D((2,2)))
    model.add(layers.Conv2D(128,(3,3),activation='relu',padding = 'same'))
    model.add(layers.MaxPooling2D((2,2)))
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(128,activation="relu"))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(27,activation="softmax"))

    return model

#model.summary()
#plot_model(model, to_file='model.png') 
Example #2
Source File: CoupledCFWrapper.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 7 votes vote down vote up
def save_model(self, folder_path, file_name = None):

        if file_name is None:
            file_name = self.RECOMMENDER_NAME

        self._print("Saving model in file '{}'".format(folder_path + file_name))

        data_dict_to_save = {
                              'learning_rate':self.learning_rate,
                              'num_epochs':self.num_epochs,
                              'num_negatives':self.num_negatives,
                              'dataset_name':self.dataset_name,
                              'number_model':self.number_model,
                              'plot_model':self.plot_model,
                              'current_epoch':self.current_epoch,
                              'verbose':self.verbose,
                              }

        dataIO = DataIO(folder_path=folder_path)
        dataIO.save_data(file_name=file_name, data_dict_to_save = data_dict_to_save)

        self.model.save(folder_path + file_name + "_keras_model.h5")

        self._print("Saving complete") 
Example #3
Source File: plot_model_struct.py    From speaker_recognition with Apache License 2.0 6 votes vote down vote up
def construct_model(classe_nums):
    model = Sequential()

    model.add(
        Conv1D(filters=256, kernel_size=3, strides=1, activation='relu', input_shape=(99, 40), name='block1_conv1'))
    model.add(MaxPool1D(pool_size=2, name='block1_pool1'))
    model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, axis=1))

    model.add(Conv1D(filters=256, kernel_size=3, strides=1, activation='relu', name='block1_conv2'))
    model.add(MaxPool1D(pool_size=2, name='block1_pool2'))

    model.add(Flatten(name='block1_flat1'))
    model.add(Dropout(0.5, name='block1_drop1'))

    model.add(Dense(512, activation='relu', name='block2_dense2'))
    model.add(MaxoutDense(512, nb_feature=4, name="block2_maxout2"))
    model.add(Dropout(0.5, name='block2_drop2'))

    model.add(Dense(512, activation='relu', name='block2_dense3', kernel_regularizer=l2(1e-4)))
    model.add(MaxoutDense(512, nb_feature=4, name="block2_maxout3"))
    model.add(Dense(classe_nums, activation='softmax', name="predict"))

    # plot_model(model, to_file='model_struct.png', show_shapes=True, show_layer_names=False)

    model.summary() 
Example #4
Source File: trainer.py    From delft with Apache License 2.0 5 votes vote down vote up
def train(self, x_train, y_train, x_valid, y_valid, callbacks=None):
        """ 
        Train the instance self.model 
        """
        if 'bert' not in self.model_config.model_type.lower():
            self.model.summary()
            #print("self.model_config.use_crf:", self.model_config.use_crf)

            if self.model_config.use_crf:
                self.model.compile(loss=self.model.crf.loss,
                               optimizer='adam')
            else:
                self.model.compile(loss='categorical_crossentropy',
                               optimizer='adam')
                               #optimizer=Adam(lr=self.training_config.learning_rate))
            # uncomment to plot graph
            #plot_model(self.model, 
            #    to_file='data/models/sequenceLabelling/'+self.model_config.model_name+'_'+self.model_config.model_type+'.png')
            self.model = self.train_model(self.model, x_train, y_train, x_valid, y_valid, 
                                                  self.training_config.max_epoch, callbacks=callbacks)
        else:
            # for BERT architectures, directly call the model trainer
            if self.training_config.early_stop:
                self.model.train(x_train,y_train)
            else:
                self.model.train(np.concatenate([x_train,x_valid]), np.concatenate([y_train,y_valid])) 
Example #5
Source File: wrapper.py    From delft with Apache License 2.0 5 votes vote down vote up
def train(self, x_train, y_train, vocab_init=None, callbacks=None):
        self.model = getModel(self.model_config, self.training_config)

        # bert models
        if self.model_config.model_type.find("bert") != -1:     
            self.model.processor = BERT_classifier_processor(labels=self.model_config.list_classes, x_train=x_train, y_train=y_train)
            self.model.train()
            return

        # create validation set in case we don't use k-folds
        xtr, val_x, y, val_y = train_test_split(x_train, y_train, test_size=0.1)

        training_generator = DataGenerator(xtr, y, batch_size=self.training_config.batch_size, 
            maxlen=self.model_config.maxlen, list_classes=self.model_config.list_classes, 
            embeddings=self.embeddings, shuffle=True)
        validation_generator = DataGenerator(val_x, None, batch_size=self.training_config.batch_size, 
            maxlen=self.model_config.maxlen, list_classes=self.model_config.list_classes, 
            embeddings=self.embeddings, shuffle=False)
        
        # uncomment to plot graph
        #plot_model(self.model, 
        #    to_file='data/models/textClassification/'+self.model_config.model_name+'_'+self.model_config.model_type+'.png')
        self.model, best_roc_auc = train_model(self.model, self.model_config.list_classes, self.training_config.batch_size, 
            self.training_config.max_epoch, self.training_config.use_roc_auc, self.training_config.class_weights, 
            training_generator, validation_generator, val_y, use_ELMo=self.embeddings.use_ELMo, 
            use_BERT=self.embeddings.use_BERT, multiprocessing=self.training_config.multiprocessing, callbacks=callbacks)
        if self.embeddings.use_ELMo:
            self.embeddings.clean_ELMo_cache()
        if self.embeddings.use_BERT:
            self.embeddings.clean_BERT_cache() 
Example #6
Source File: textAnalysis.py    From deep_learning with MIT License 5 votes vote down vote up
def get_model():
    # # 加载网络结构
    # with open('./models/text_lstm.yaml', 'r') as yaml_file:
    #     loaded_model_yaml = yaml_file.read()
    # model = model_from_yaml(loaded_model_yaml)
    # # 加载模型权重
    # model.load_weights("./models/text_lstm.h5")
    # print("model Loaded")
    # model.compile(loss='binary_crossentropy',optimizer='adam', metrics=['accuracy'])
                  
    # utils.plot_model(model,to_file='./models/text_lstm_model.png')

    model = load_model("./models/text_lstm_full.h5")

    return model 
Example #7
Source File: recommend_dnn.py    From deep_learning with MIT License 5 votes vote down vote up
def build_model(x_train,y_train):
    """
    构建网络,训练模型
    """

    print("build network")
    usr_input = Input(shape=(3,))
    usr_x = Embedding(x_train[0].shape[0] + 1, 256, input_length=3)(usr_input)
    print("user_embedding_x:", usr_x.shape)
    usr_x = Flatten()(usr_x)
    usr_x = Dense(128, activation='relu')(usr_x)
    print("user_dense_x:", usr_x.shape)

    mov_input = Input(shape=(3,))
    mov_x = Embedding(x_train[0].shape[0] + 1, 256, input_length=3)(mov_input)
    print("movie_embedding_x:", mov_x.shape)
    mov_x = Flatten()(mov_x)
    mov_x = Dense(128, activation='relu')(mov_x)
    print("movie_dense_x:", mov_x.shape)

    concat_tensor = Concatenate()([usr_x, mov_x])
    print("concat_tensor:", concat_tensor.shape)
    x_tensor = Dense(64, activation='relu')(concat_tensor)
    x_tensor = Dropout(0.5)(x_tensor)
    x_tensor = Dense(32, activation='relu')(x_tensor)
    x_tensor = Dropout(0.3)(x_tensor)
    x_output = Dense(1, activation='linear')(x_tensor)

    print("Model:", usr_input.shape, mov_input.shape, "output_x:", x_output.shape)
    model = Model([usr_input, mov_input], x_output)
    sgd = Adam(lr=0.002)
    model.compile(optimizer=sgd, loss='mse', metrics=['accuracy'])
    model_png='./models/dnn_recomm_model.png'
    # 显示网络结构 
    if not os.path.exists(model_png):
        utils.plot_model(model,to_file='./models/dnn_recomm_model.png')
    callTB = callbacks.TensorBoard(log_dir='./logs/dnn_merge-1')
    print("training model")
    best_model = callbacks.ModelCheckpoint("./models/dnn_recommend_full.h5", monitor='val_loss', verbose=0, save_best_only=True)
    model.fit(x_train, y_train, epochs=64, batch_size=512,callbacks=[callTB, best_model], validation_split=0.2) 
Example #8
Source File: speech_model_02.py    From ASR_WORD with GNU Affero General Public License v3.0 5 votes vote down vote up
def creat_model(self):
        input_data = Input(shape=[self.AUDIO_LENGTH, self.FEATURE_LENGTH, 1], name='Input')
        layer1 = LSTM(units=256 , activation='relu' , return_sequences=True , use_bias=True)


        dense4 = Dense(units=self.MS_OUTPUT_SIZE, use_bias=True, kernel_initializer='he_normal')(dense2)
        y_pred = Activation(activation='softmax', name='activation')(dense4)
        model_data = Model(inputs=input_data, outputs=y_pred)

        # model_data.summary()
        # plot_model(model_data , '/home/zhangwei/01.png')

        labels = Input(shape=[self.label_max_string_length], name='labels', dtype='float32')
        input_length = Input(shape=[1], name='input_length', dtype='int64')
        label_length = Input(shape=[1], name='label_length', dtype='int64')
        loss_out = Lambda(self.ctc_lambda_func, output_shape=[1, ], name='ctc')([y_pred, labels, input_length, label_length])
        model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)

        # model.summary()

        sgd = SGD(lr=0.0005, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
        adam = Adam(lr=0.01, epsilon=1e-6)
        rms = RMSprop(lr=0.01 , rho=0.9 , epsilon=1e-6)

        model.compile(optimizer=rms, loss={'ctc': lambda y_true, y_pred: y_pred})

        print('==========================模型创建成功=================================')
        return model, model_data
        pass 
Example #9
Source File: DeepCFWrapper.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 5 votes vote down vote up
def save_model(self, folder_path, file_name = None):

        if file_name is None:
            file_name = self.RECOMMENDER_NAME

        self._print("Saving model in file '{}'".format(folder_path + file_name))


        data_dict_to_save = {
                              'learning_rate':self.learning_rate,
                              'num_epochs':self.num_epochs,
                              'num_negatives':self.num_negatives,
                              'dataset_name':self.dataset_name,
                              'number_model':self.number_model,
                              'plot_model':self.plot_model,
                              'current_epoch':self.current_epoch,
                              'verbose':self.verbose,
                              }


        dataIO = DataIO(folder_path=folder_path)
        dataIO.save_data(file_name=file_name, data_dict_to_save = data_dict_to_save)

        self.model.save(folder_path + file_name + "_keras_model.h5")

        self._print("Saving complete") 
Example #10
Source File: speech_model_01.py    From ASR_WORD with GNU Affero General Public License v3.0 4 votes vote down vote up
def creat_model(self):
        input_data = Input(shape=[self.AUDIO_LENGTH, self.FEATURE_LENGTH, 1], name='Input')
        conv1 = Conv2D(filters=32, kernel_size=[3, 3], padding='same', activation='relu', use_bias=True,kernel_initializer='TruncatedNormal')(input_data)
        conv1 = BatchNormalization(epsilon=0.0002)(conv1)
        conv2 = Conv2D(filters=32, kernel_size=[3, 3], padding='same', activation='relu', use_bias=True,kernel_initializer='TruncatedNormal')(conv1)
        conv2 = BatchNormalization(epsilon=0.0002)(conv2)
        maxpool1 = MaxPooling2D(pool_size=[2, 2], strides=None, padding='valid')(conv2)

        conv3 = Conv2D(filters=64, kernel_size=[3, 3], padding='same', activation='relu', use_bias=True,kernel_initializer='TruncatedNormal')(maxpool1)
        conv3 = BatchNormalization(epsilon=0.0002)(conv3)
        conv4 = Conv2D(filters=64, kernel_size=[3, 3], padding='same', activation='relu', use_bias=True,kernel_initializer='TruncatedNormal')(conv3)
        conv4 = BatchNormalization(epsilon=0.0002)(conv4)
        maxpool2 = MaxPooling2D(pool_size=[2, 2], strides=None, padding='valid')(conv4)

        conv5 = Conv2D(filters=128, kernel_size=[3, 3], padding='same', activation='relu', use_bias=True,kernel_initializer='TruncatedNormal')(maxpool2)
        conv5 = BatchNormalization(epsilon=0.0002)(conv5)
        conv6 = Conv2D(filters=128, kernel_size=[3, 3], padding='same', activation='relu', use_bias=True,kernel_initializer='TruncatedNormal')(conv5)
        conv6 = BatchNormalization(epsilon=0.0002)(conv6)
        maxpool3 = MaxPooling2D(pool_size=[2, 2], strides=None, padding='valid')(conv6)

        # conv7 = Conv2D(filters=128, kernel_size=[3, 3], padding='same', activation='relu', use_bias=True,kernel_initializer='he_normal')(maxpool3)
        # conv7 = BatchNormalization(epsilon=0.0002)(conv7)
        # conv8 = Conv2D(filters=128 , kernel_size=[3, 3], padding='same', activation='relu', use_bias=True , kernel_initializer='he_normal')(conv7)
        # conv8 = BatchNormalization(epsilon=0.0002)(conv8)
        # maxpool4 = MaxPooling2D(pool_size=[2, 2], strides=None, padding='valid')(conv8)

        reshape = Reshape([250, 3200])(maxpool3)
        dense2 = Dense(units=1024, activation='relu', use_bias=True, kernel_initializer='he_normal')(reshape)
        dense2 = BatchNormalization(epsilon=0.0002)(dense2)
        dense2 = Dropout(0.3)(dense2)

        dense3 = Dense(units=1024, activation='relu', use_bias=True, kernel_initializer='he_normal')(dense2)
        dense3 = BatchNormalization(epsilon=0.0002)(dense3)
        dense3 = Dropout(0.3)(dense3)

        dense4 = Dense(units=self.MS_OUTPUT_SIZE, use_bias=True, kernel_initializer='he_normal')(dense3)
        y_pred = Activation(activation='softmax', name='activation')(dense4)
        model_data = Model(inputs=input_data, outputs=y_pred)

        # model_data.summary()
        # plot_model(model_data , '/home/zhangwei/01.png')

        labels = Input(shape=[self.label_max_string_length], name='labels', dtype='float32')
        input_length = Input(shape=[1], name='input_length', dtype='int64')
        label_length = Input(shape=[1], name='label_length', dtype='int64')
        loss_out = Lambda(self.ctc_lambda_func, output_shape=[1, ], name='ctc')([y_pred, labels, input_length, label_length])
        model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)

        # model.summary()

        sgd = SGD(lr=0.0005, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
        adam = Adam(lr=0.0005, epsilon=1e-6)

        model.compile(optimizer=adam, loss={'ctc': lambda y_true, y_pred: y_pred})

        print('==========================模型创建成功=================================')
        return model, model_data
        pass 
Example #11
Source File: DeepCFWrapper.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 4 votes vote down vote up
def fit(self, learning_rate=0.001,
            epochs=30,
            n_negative_sample=4,
            dataset_name='Movielens1M',
            number_model=3,
            verbose=1,
            plot_model=True,
            temp_file_folder=None,
            **earlystopping_kwargs
            ):


        self.learning_rate = learning_rate
        self.num_epochs = epochs
        self.num_negatives = n_negative_sample
        self.dataset_name = dataset_name
        self.number_model = number_model
        self.plot_model = plot_model
        self.verbose = verbose
        self.current_epoch = 0

        self.temp_file_folder = self._get_unique_temp_folder(input_temp_file_folder=temp_file_folder)

        print("{}: Init model...".format(self.RECOMMENDER_NAME))

        # load model
        self.model = self._get_model(self.n_users, self.n_items, self.dataset_name, self.number_model)

        # compile model
        self.model.compile(optimizer=Adam(lr=self.learning_rate),loss='binary_crossentropy',metrics=['accuracy', 'mae'])

        if self.plot_model:
            utils.plot_model(self.model, show_shapes=True, to_file='CoupledCF_{}_model{}.png'.format(self.dataset_name, self.number_model))

        if self.verbose > 1:
            self.model.summary()

        print("{}: Init model... done!".format(self.RECOMMENDER_NAME))

        print("{}: Training...".format(self.RECOMMENDER_NAME))

        self._update_best_model()

        self._train_with_early_stopping(self.num_epochs,
                                        algorithm_name = self.RECOMMENDER_NAME,
                                        **earlystopping_kwargs)

        self.load_model(self.temp_file_folder, file_name="_best_model")

        print("{}: Tranining complete".format(self.RECOMMENDER_NAME))
        self._clean_temp_folder(temp_file_folder=self.temp_file_folder) 
Example #12
Source File: CoupledCFWrapper.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 4 votes vote down vote up
def fit(self, learning_rate=0.001,
            epochs=30,
            n_negative_sample=4,
            dataset_name='Movielens1M',
            number_model=0,
            verbose=1,
            plot_model=True,
            temp_file_folder=None,
            **earlystopping_kwargs
            ):


        self.learning_rate = learning_rate
        self.num_epochs = epochs
        self.num_negatives = n_negative_sample
        self.dataset_name = dataset_name
        self.number_model = number_model
        self.plot_model = plot_model
        self.verbose = verbose
        self.current_epoch = 0

        self.temp_file_folder = self._get_unique_temp_folder(input_temp_file_folder=temp_file_folder)

        print("{}: Init model for {} ...".format(self.RECOMMENDER_NAME, self.dataset_name))

        # load model
        self.model = self._get_model(self.n_users, self.n_items, self.dataset_name, self.number_model)

        # compile model
        self.model.compile(optimizer=Adam(lr=self.learning_rate),
                           loss='binary_crossentropy',
                           metrics=['accuracy', 'mae'])

        if self.plot_model:
            utils.plot_model(self.model,
                             show_shapes=True,
                             to_file='CoupledCF_{}_model{}.png'.format(self.dataset_name, self.number_model))

        if self.verbose > 1:
            self.model.summary()

        print("{}: Init model... done!".format(self.RECOMMENDER_NAME))

        print("{}: Training...".format(self.RECOMMENDER_NAME))

        self._update_best_model()

        self._train_with_early_stopping(self.num_epochs,
                                        algorithm_name = self.RECOMMENDER_NAME,
                                        **earlystopping_kwargs)

        self.load_model(self.temp_file_folder, file_name="_best_model")

        print("{}: Tranining complete".format(self.RECOMMENDER_NAME))
        self._clean_temp_folder(temp_file_folder=self.temp_file_folder) 
Example #13
Source File: ConGAN.py    From ConGAN with MIT License 4 votes vote down vote up
def __init__(self):
        optimizer = Adam(0.0004, 0.5, clipnorm = 1)
        opt_small = Adam(0.0002, 0.5, clipnorm = 1) 
        
        inputs_real = [position_input, img_ident_input]
        inputs_fake = [position_input, latent_input]

        #main pieces
        if (not os.path.isfile('generator.h5')):
            img_ident_layer = Dense(LATENT_SPACE, activation='tanh')(img_ident_input) 
            self.ident = Model(img_ident_input, img_ident_layer, name = 'IDENT')
            #plot_model(self.ident, to_file='ident.png', show_shapes=True)
            
            self.generator = self.build_generator()
            #plot_model(self.generator, to_file='generator.png', show_shapes=True)
            
            self.discriminator = self.build_discriminator()
            #plot_model(self.discriminator, to_file='discriminator.png', show_shapes=True)
        else:
            self.discriminator = load_model('discriminator.h5')
            self.generator = load_model('generator.h5')
            self.ident = load_model('ident.h5')
        
        
        
        self.ident.trainable = True
        self.generator.trainable = True
        self.generator.compile(loss='mse', optimizer=optimizer)
        self.discriminator.trainable = False
        
        self.generator_real_t = self.generator([position_input, self.ident([img_ident_input])])[0] #Train ident -> pixel as normal model
        self.generator_real = Model(inputs_real, self.generator_real_t, name = 'generator_real')
        self.generator_real.compile(loss='mse', optimizer=optimizer)
        #plot_model(self.generator_real, to_file='generator_real.png', show_shapes=True)

        self.generator_fake_t = self.discriminator(self.generator(inputs_fake)[1])   #Train noise -> 1 on discriminator
        self.generator_fake = Model(inputs_fake, self.generator_fake_t, name = 'generator_fake')
        self.generator_fake.compile(loss='binary_crossentropy', optimizer=opt_small)
        #plot_model(self.generator_fake, to_file='generator_fake.png', show_shapes=True)
        
        
        
        
        self.ident.trainable = False
        self.generator.trainable = False
        self.discriminator.trainable = True

        self.discriminator_real_t = self.discriminator(self.generator([position_input, self.ident([img_ident_input])])[1])   #Train discriminator assign ident -> 1
        self.discriminator_real = Model(inputs_real, self.discriminator_real_t, name = 'discriminator_real')
        self.discriminator_real.compile(loss='binary_crossentropy', optimizer=opt_small)
        #plot_model(self.discriminator_real, to_file='discriminator_real.png', show_shapes=True)

        
        self.discriminator_fake_t = self.discriminator(self.generator(inputs_fake)[1])   #Train discriminator assign noise -> 0
        self.discriminator_fake = Model(inputs_fake, self.discriminator_fake_t, name = 'discriminator_fake')
        self.discriminator_fake.compile(loss='binary_crossentropy', optimizer=opt_small)
        #plot_model(self.discriminator_fake, to_file='discriminator_fake.png', show_shapes=True)
        

    # Do not use Batch Normalization anywhere, it will be harmful for discriminator ability 
    # to distinguish good and bad samples, and as a result it will break the generator 
Example #14
Source File: imdb_bidirectional_lstm.py    From MTSAnomalyDetection with Apache License 2.0 4 votes vote down vote up
def main(args):
    print('Loading data...')
    (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
    print(len(x_train), 'train sequences')
    print(len(x_test), 'test sequences')

    print("Pad sequences (samples x time)")
    x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
    x_test = sequence.pad_sequences(x_test, maxlen=maxlen)

    y_train = np.array(y_train)
    y_test = np.array(y_test)

    print('x_train shape:', x_train.shape)
    print('x_test shape:', x_test.shape)

    if os.path.exists('blstm.h5'):
        model = load_model('blstm.h5')
    else:
        model = Sequential()
        model.add(Embedding(max_features, 128, input_length=maxlen))
        model.add(Bidirectional(LSTM(64)))
        model.add(Dropout(0.5))
        model.add(Dense(1, activation='sigmoid'))

        # try using different optimizers and different optimizer configs
        model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])

        print('Train...')
        model.fit(x_train, y_train,
                  batch_size=batch_size,
                  epochs=4,
                  validation_data=[x_test, y_test], verbose=2)

        model.save('blstm.h5')

    # plot_model(model)
    pred_y = model.predict(x_test)

    plt.figure()
    plt.plot(y_test, 'g')
    plt.plot(pred_y, 'r--')
    plt.show()