Python tensorflow.keras.callbacks.TensorBoard() Examples

The following are 3 code examples of tensorflow.keras.callbacks.TensorBoard(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.callbacks , or try the search function .
Example #1
Source File: tcn.py    From TF.Keras-Commonly-used-models with Apache License 2.0 6 votes vote down vote up
def train():
    depth = 6
    filters = 25
    block_filters = [filters] * depth
    print(block_filters)
    model = build_model(sequence_length=28 * 28,
                            channels=1,
                            num_classes=10,
                            filters=block_filters,
                            kernel_size=8)

    model.compile(optimizer="Adam",
                  metrics=[metrics.SparseCategoricalAccuracy()],
                  loss=losses.SparseCategoricalCrossentropy())

    print(model.summary())

    #train_dataset, test_dataset = load_dataset()
    """
    model.fit(train_dataset.batch(32),
              validation_data=test_dataset.batch(32),
              callbacks=[TensorBoard(str(Path("logs") / datetime.now().strftime("%Y-%m-%dT%H-%M_%S")))],
              epochs=10)

    """ 
Example #2
Source File: siamese_similarity.py    From nlp-journey with Apache License 2.0 5 votes vote down vote up
def train(self, weights_only=True, call_back=False):
        model = self._build_model()

        if call_back:
            early_stopping = EarlyStopping(monitor='val_loss', patience=30)
            stamp = 'lstm_%d' % self.n_hidden
            checkpoint_dir = os.path.join(
                self.model_path, 'checkpoints/' + str(int(time.time())) + '/')
            if not os.path.exists(checkpoint_dir):
                os.makedirs(checkpoint_dir)

            bst_model_path = checkpoint_dir + stamp + '.h5'
            if weights_only:
                model_checkpoint = ModelCheckpoint(
                    bst_model_path, save_best_only=True, save_weights_only=True)
            else:
                model_checkpoint = ModelCheckpoint(
                    bst_model_path, save_best_only=True)
            tensor_board = TensorBoard(
                log_dir=checkpoint_dir + "logs/{}".format(time.time()))
            callbacks = [early_stopping, model_checkpoint, tensor_board]
        else:
            callbacks = None
        model_trained = model.fit([self.x_train['left'], self.x_train['right']],
                                  self.y_train,
                                  batch_size=self.batch_size,
                                  epochs=self.epochs,
                                  validation_data=([self.x_val['left'], self.x_val['right']], self.y_val),
                                  verbose=1,
                                  callbacks=callbacks)
        if weights_only and not call_back:
            model.save_weights(os.path.join(self.model_path, 'weights_only.h5'))
        elif not weights_only and not call_back:
            model.save(os.path.join(self.model_path, 'model.h5'))
        self._save_config()
        plot(model_trained)
        return model 
Example #3
Source File: train.py    From keras-mobile-detectnet with MIT License 4 votes vote down vote up
def main(batch_size: int = 24,
         epochs: int = 384,
         train_path: str = 'train',
         val_path: str = 'val',
         weights=None,
         workers: int = 8):

    # We use an extra input during training to discount bounding box loss when a class is not present in an image.
    discount_input = Input(shape=(7, 7), name='discount')

    keras_model = MobileDetectNetModel.complete_model(extra_inputs=[discount_input])
    keras_model.summary()

    if weights is not None:
        keras_model.load_weights(weights, by_name=True)

    train_seq = MobileDetectNetSequence(train_path, stage="train", batch_size=batch_size)
    val_seq = MobileDetectNetSequence(val_path, stage="val", batch_size=batch_size)

    callbacks = []

    def region_loss(classes):
        def loss_fn(y_true, y_pred):
            # Don't penalize bounding box errors when there is no object present
            return 10 * (classes * K.abs(y_pred[:, :, :, 0] - y_true[:, :, :, 0]) +
                         classes * K.abs(y_pred[:, :, :, 1] - y_true[:, :, :, 1]) +
                         classes * K.abs(y_pred[:, :, :, 2] - y_true[:, :, :, 2]) +
                         classes * K.abs(y_pred[:, :, :, 3] - y_true[:, :, :, 3]))

        return loss_fn

    keras_model.compile(optimizer=Nadam(lr=0.001), loss=['mean_absolute_error',
                                                         region_loss(discount_input),
                                                         'binary_crossentropy'])

    filepath = "weights-{epoch:02d}-{val_loss:.4f}-multi-gpu.hdf5"
    checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
    callbacks.append(checkpoint)

    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=0.00001, verbose=1)
    callbacks.append(reduce_lr)

    try:
        os.mkdir('logs')
    except FileExistsError:
        pass

    tensorboard = TensorBoard(log_dir='logs/%s' % time.strftime("%Y-%m-%d_%H-%M-%S"))
    callbacks.append(tensorboard)

    keras_model.fit_generator(train_seq,
                              validation_data=val_seq,
                              epochs=epochs,
                              steps_per_epoch=np.ceil(len(train_seq) / batch_size),
                              validation_steps=np.ceil(len(val_seq) / batch_size),
                              callbacks=callbacks,
                              use_multiprocessing=True,
                              workers=workers,
                              shuffle=True)