Python keras.losses.categorical_crossentropy() Examples

The following are 30 code examples for showing how to use keras.losses.categorical_crossentropy(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras.losses , or try the search function .

Example 1
Project: Jtyoui   Author: jtyoui   File: HandWritingRecognition.py    License: MIT License 6 votes vote down vote up
def nn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train.reshape(x_train.shape[0], -1) / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)
    # constant(value=1.)自定义常数,constant(value=1.)===one()
    # 创建模型:输入784个神经元,输出10个神经元
    model = Sequential([
        Dense(units=200, input_dim=784, bias_initializer=constant(value=1.), activation=tanh),
        Dense(units=100, bias_initializer=one(), activation=tanh),
        Dense(units=10, bias_initializer=one(), activation=softmax),
    ])

    opt = SGD(lr=0.2, clipnorm=1.)  # 优化器
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['acc', 'mae'])  # 编译
    model.fit(x_train, y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
Example 2
def get_model_compiled(shapeinput, num_class, w_decay=0, lr=1e-3):
    clf = Sequential()
    clf.add(Conv3D(32, kernel_size=(5, 5, 24), input_shape=shapeinput))
    clf.add(BatchNormalization())
    clf.add(Activation('relu'))
    clf.add(Conv3D(64, (5, 5, 16)))
    clf.add(BatchNormalization())
    clf.add(Activation('relu'))
    clf.add(MaxPooling3D(pool_size=(2, 2, 1)))
    clf.add(Flatten())
    clf.add(Dense(300, kernel_regularizer=regularizers.l2(w_decay)))
    clf.add(BatchNormalization())
    clf.add(Activation('relu'))
    clf.add(Dense(num_class, activation='softmax'))
    clf.compile(loss=categorical_crossentropy, optimizer=Adam(lr=lr), metrics=['accuracy'])
    return clf 
Example 3
Project: keras-contrib   Author: keras-team   File: crf_losses.py    License: MIT License 6 votes vote down vote up
def crf_loss(y_true, y_pred):
    """General CRF loss function depending on the learning mode.

    # Arguments
        y_true: tensor with true targets.
        y_pred: tensor with predicted targets.

    # Returns
        If the CRF layer is being trained in the join mode, returns the negative
        log-likelihood. Otherwise returns the categorical crossentropy implemented
        by the underlying Keras backend.

    # About GitHub
        If you open an issue or a pull request about CRF, please
        add `cc @lzfelix` to notify Luiz Felix.
    """
    crf, idx = y_pred._keras_history[:2]
    if crf.learn_mode == 'join':
        return crf_nll(y_true, y_pred)
    else:
        if crf.sparse_target:
            return sparse_categorical_crossentropy(y_true, y_pred)
        else:
            return categorical_crossentropy(y_true, y_pred) 
Example 4
Project: nlp_xiaojiang   Author: yongzhuo   File: keras_bert_layer.py    License: MIT License 6 votes vote down vote up
def crf_loss(y_true, y_pred):
    """General CRF loss function depending on the learning mode.
    # Arguments
        y_true: tensor with true targets.
        y_pred: tensor with predicted targets.
    # Returns
        If the CRF layer is being trained in the join mode, returns the negative
        log-likelihood. Otherwise returns the categorical crossentropy implemented
        by the underlying Keras backend.
    # About GitHub
        If you open an issue or a pull request about CRF, please
        add `cc @lzfelix` to notify Luiz Felix.
    """
    crf, idx = y_pred._keras_history[:2]
    if crf.learn_mode == 'join':
        return crf_nll(y_true, y_pred)
    else:
        if crf.sparse_target:
            return sparse_categorical_crossentropy(y_true, y_pred)
        else:
            return categorical_crossentropy(y_true, y_pred)

# crf_marginal_accuracy, crf_viterbi_accuracy 
Example 5
Project: talos   Author: autonomio   File: params.py    License: MIT License 6 votes vote down vote up
def iris():

    from keras.optimizers import Adam, Nadam
    from keras.losses import logcosh, categorical_crossentropy
    from keras.activations import relu, elu, softmax

    # here use a standard 2d dictionary for inputting the param boundaries
    p = {'lr': (0.5, 5, 10),
         'first_neuron': [4, 8, 16, 32, 64],
         'hidden_layers': [0, 1, 2, 3, 4],
         'batch_size': (2, 30, 10),
         'epochs': [2],
         'dropout': (0, 0.5, 5),
         'weight_regulizer': [None],
         'emb_output_dims':  [None],
         'shapes': ['brick', 'triangle', 0.2],
         'optimizer': [Adam, Nadam],
         'losses': [logcosh, categorical_crossentropy],
         'activation': [relu, elu],
         'last_activation': [softmax]}

    return p 
Example 6
Project: Federated-Learning-Mini-Framework   Author: gaborvecsei   File: models.py    License: MIT License 6 votes vote down vote up
def create_model(input_shape: tuple, nb_classes: int, init_with_imagenet: bool = False, learning_rate: float = 0.01):
    weights = None
    if init_with_imagenet:
        weights = "imagenet"

    model = VGG16(input_shape=input_shape,
                  classes=nb_classes,
                  weights=weights,
                  include_top=False)
    # "Shallow" VGG for Cifar10
    x = model.get_layer('block3_pool').output
    x = layers.Flatten(name='Flatten')(x)
    x = layers.Dense(512, activation='relu')(x)
    x = layers.Dense(nb_classes)(x)
    x = layers.Softmax()(x)
    model = models.Model(model.input, x)

    loss = losses.categorical_crossentropy
    optimizer = optimizers.SGD(lr=learning_rate, decay=0.99)

    model.compile(optimizer, loss, metrics=["accuracy"])
    return model 
Example 7
Project: EmoPy   Author: thoughtworksarts   File: neuralnets.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def _init_model(self):
        """
        Initialize base model from Keras and add top layers to match number of training emotions labels.
        :return:
        """
        base_model = self._get_base_model()

        top_layer_model = base_model.output
        top_layer_model = GlobalAveragePooling2D()(top_layer_model)
        top_layer_model = Dense(1024, activation='relu')(top_layer_model)
        prediction_layer = Dense(output_dim=len(self.emotion_map.keys()), activation='softmax')(top_layer_model)

        model = Model(input=base_model.input, output=prediction_layer)
        print(model.summary())
        for layer in base_model.layers:
            layer.trainable = False
        model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

        self.model = model 
Example 8
Project: EmoPy   Author: thoughtworksarts   File: neuralnets.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def fit(self, features, labels, validation_split, epochs=50):
        """
        Trains the neural net on the data provided.

        :param features: Numpy array of training data.
        :param labels: Numpy array of target (label) data.
        :param validation_split: Float between 0 and 1. Percentage of training data to use for validation
        :param epochs: Max number of times to train over dataset.
        """
        self.model.fit(x=features, y=labels, epochs=epochs, verbose=1,
                       callbacks=[ReduceLROnPlateau(), EarlyStopping(patience=3)], validation_split=validation_split,
                       shuffle=True)

        for layer in self.model.layers[:self._NUM_BOTTOM_LAYERS_TO_RETRAIN]:
            layer.trainable = False
        for layer in self.model.layers[self._NUM_BOTTOM_LAYERS_TO_RETRAIN:]:
            layer.trainable = True

        self.model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
        self.model.fit(x=features, y=labels, epochs=50, verbose=1,
                       callbacks=[ReduceLROnPlateau(), EarlyStopping(patience=3)], validation_split=validation_split,
                       shuffle=True) 
Example 9
Project: Jtyoui   Author: jtyoui   File: HandWritingRecognition.py    License: MIT License 5 votes vote down vote up
def cnn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train.reshape(-1, 28, 28, 1) / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)

    model = Sequential([
        # input_shape:输入平面,就在第一个位置设置
        # filters:卷积核、滤波器
        # kernel_size:卷积核大小
        # strides:步长
        # padding有两种方式:same/valid
        # activation:激活函数
        Convolution2D(input_shape=(28, 28, 1), filters=32, kernel_size=5, strides=1, padding='same', activation=relu),
        MaxPool2D(pool_size=2, strides=2, padding='same'),
        Convolution2D(filters=64, kernel_size=5, padding='same', activation=relu),
        MaxPool2D(pool_size=2, trainable=2, padding='same'),
        Flatten(),  # 扁平化
        Dense(units=1024, activation=relu),
        Dropout(0.5),
        Dense(units=10, activation=softmax),
    ])
    opt = Adam(lr=1e-4)
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['accuracy'])
    model.fit(x=x_train, y=y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
Example 10
Project: Jtyoui   Author: jtyoui   File: HandWritingRecognition.py    License: MIT License 5 votes vote down vote up
def rnn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)

    model = Sequential([
        SimpleRNN(units=50, input_shape=(28, 28)),
        Dense(units=10, activation=softmax),
    ])
    opt = RMSprop(lr=1e-4)
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['accuracy'])
    model.fit(x=x_train, y=y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
Example 11
Project: image-segmentation-keras   Author: divamgupta   File: train.py    License: MIT License 5 votes vote down vote up
def masked_categorical_crossentropy(gt, pr):
    from keras.losses import categorical_crossentropy
    mask = 1 - gt[:, :, 0]
    return categorical_crossentropy(gt, pr) * mask 
Example 12
Project: keras-gcnn   Author: basveeling   File: test_model_saving.py    License: MIT License 5 votes vote down vote up
def test_functional_model_saving():
    img_rows, img_cols = 32, 32
    img_channels = 3

    # Parameters for the DenseNet model builder
    img_dim = (img_channels, img_rows, img_cols) if K.image_data_format() == 'channels_first' else (
        img_rows, img_cols, img_channels)
    depth = 40
    nb_dense_block = 3
    growth_rate = 3  # number of z2 maps equals growth_rate * group_size, so keep this small.
    nb_filter = 16
    dropout_rate = 0.0  # 0.0 for data augmentation
    conv_group = 'D4'  # C4 includes 90 degree rotations, D4 additionally includes reflections in x and y axis.
    use_gcnn = True

    # Create the model (without loading weights)
    model = GDenseNet(mc_dropout=False, padding='same', nb_dense_block=nb_dense_block, growth_rate=growth_rate,
                      nb_filter=nb_filter, dropout_rate=dropout_rate, weights=None, input_shape=img_dim, depth=depth,
                      use_gcnn=use_gcnn, conv_group=conv_group)
    model.compile(loss=losses.categorical_crossentropy,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 32, 32, 3))
    y = np.random.randint(0, 10, 1)
    y = np_utils.to_categorical(y, 10)
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example 13
def get_model_compiled(shapeinput, num_class, w_decay=0):
    clf = Sequential()
    clf.add(Conv2D(50, kernel_size=(5, 5), input_shape=shapeinput))
    clf.add(Activation('relu'))
    clf.add(Conv2D(100, (5, 5)))
    clf.add(Activation('relu'))
    clf.add(MaxPooling2D(pool_size=(2, 2)))
    clf.add(Flatten())
    clf.add(Dense(100, kernel_regularizer=regularizers.l2(w_decay)))
    clf.add(Activation('relu'))
    clf.add(Dense(num_class, activation='softmax'))
    clf.compile(loss=categorical_crossentropy, optimizer=Adam(), metrics=['accuracy'])
    return clf 
Example 14
def get_model_compiled(bands, num_class):
    clf = Sequential()
    clf.add(Conv1D(20, (24), activation='relu', input_shape=(bands,1)))
    clf.add(MaxPooling1D(pool_size=5))
    clf.add(Flatten())
    clf.add(Dense(100))
    clf.add(BatchNormalization())
    clf.add(Activation('relu'))
    clf.add(Dense(num_class, activation='softmax'))
    clf.compile(loss=categorical_crossentropy, optimizer=Adam(), metrics=['accuracy'])
    return clf 
Example 15
def get_model_compiled(feat_size, seq_len, num_class, type_func):
    if type_func == "RNN": func = SimpleRNN
    elif type_func == "GRU": func = CuDNNGRU
    elif type_func == "LSTM": func = CuDNNLSTM
    else: print("NOT RECURRENT FUNC")
    clf = Sequential()
    clf.add(func(64, return_sequences=True, input_shape=(feat_size, seq_len)))
    clf.add(func(64, return_sequences=True))
    clf.add(Flatten())
    clf.add(Dense(num_class, activation='softmax'))
    clf.compile(loss=categorical_crossentropy, optimizer=Adam(), metrics=['accuracy'])
    return clf 
Example 16
def get_model_compiled(args, inputshape, num_class):
    model = Sequential()
    if args.arch == "CNN1D":
        model.add(Conv1D(20, (24), activation='relu', input_shape=inputshape))
        model.add(MaxPooling1D(pool_size=5))
        model.add(Flatten())
        model.add(Dense(100))
    elif "CNN2D" in args.arch:
        model.add(Conv2D(50, kernel_size=(5, 5), input_shape=inputshape))
        model.add(Activation('relu'))
        model.add(Conv2D(100, (5, 5)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dense(100))
    elif args.arch == "CNN3D":
        model.add(Conv3D(32, kernel_size=(5, 5, 24), input_shape=inputshape))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(Conv3D(64, (5, 5, 16)))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling3D(pool_size=(2, 2, 1)))
        model.add(Flatten())
        model.add(Dense(300))
    if args.arch != "CNN2D": model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dense(num_class, activation='softmax'))
    model.compile(loss=categorical_crossentropy, optimizer=Adam(args.lr1), metrics=['accuracy']) 
    return model 
Example 17
def get_model_compiled(n_bands, num_class):
    clf = Sequential()
    clf.add(Dense(int(n_bands*2/3.)+10, activation='relu', input_shape=(n_bands,)))
    clf.add(Dense(num_class, activation='softmax'))
    clf.compile(loss=categorical_crossentropy, optimizer=Adam(), metrics=['accuracy'])
    return clf 
Example 18
Project: coremltools   Author: apple   File: test_keras2.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_updatable_model_flag_off(self):
        """
        Test to ensure that when respect_trainable is off, then we will ignore
        any 'trainable' layers of the original network.
        """
        import coremltools
        from keras.layers import Dense
        from keras.losses import categorical_crossentropy
        from keras.optimizers import SGD

        input = ["data"]
        output = ["output"]
        # First, set respect_trainable to False and then check to make sure the
        # converted model is NOT updatable.
        not_updatable = Sequential()
        not_updatable.add(Dense(128, input_shape=(16,)))
        # layer is updatable, but the flag during convert is false, so that bit
        # must get dropped on the floor.
        not_updatable.add(Dense(10, name="foo", activation="softmax", trainable=True))
        not_updatable.compile(
            loss=categorical_crossentropy, optimizer=SGD(lr=0.01), metrics=["accuracy"]
        )
        cml = coremltools.converters.keras.convert(
            not_updatable, input, output, respect_trainable=False
        )
        spec = cml.get_spec()
        self.assertFalse(spec.isUpdatable)
        layers = spec.neuralNetwork.layers
        self.assertIsNotNone(layers[1].innerProduct)
        self.assertTrue(layers[1].innerProduct)
        self.assertFalse(layers[1].isUpdatable) 
Example 19
Project: coremltools   Author: apple   File: test_keras2.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_updatable_model_flag_cce_sgd(self):
        """
        Test to ensure that respect_trainable is honored during convert of a
        model with categorical cross entropy loss and SGD optimizer.
        """
        import coremltools
        from keras.layers import Dense
        from keras.losses import categorical_crossentropy
        from keras.optimizers import SGD

        input = ["data"]
        output = ["output"]

        # This should result in an updatable model.
        updatable = Sequential()
        updatable.add(Dense(128, input_shape=(16,)))
        updatable.add(Dense(10, name="foo", activation="softmax", trainable=True))
        updatable.compile(
            loss=categorical_crossentropy, optimizer=SGD(lr=1.0), metrics=["accuracy"]
        )
        cml = coremltools.converters.keras.convert(
            updatable, input, output, respect_trainable=True
        )
        spec = cml.get_spec()
        self.assertTrue(spec.isUpdatable)
        layers = spec.neuralNetwork.layers
        self.assertIsNotNone(layers[1].innerProduct)
        self.assertTrue(layers[1].innerProduct)
        self.assertTrue(layers[1].isUpdatable)
        self.assertEqual(len(spec.neuralNetwork.updateParams.lossLayers), 1)
        sgdopt = spec.neuralNetwork.updateParams.optimizer.sgdOptimizer
        self.assertEqual(sgdopt.learningRate.defaultValue, 1.0)
        self.assertEqual(sgdopt.miniBatchSize.defaultValue, 16)
        self.assertEqual(sgdopt.momentum.defaultValue, 0.0) 
Example 20
Project: coremltools   Author: apple   File: test_keras2.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_updatable_model_flag_functional(self):
        """
        Test to ensure that respect_trainable is honored during convert of a
        Keras model defined via the Keras functional API.
        """
        import coremltools
        from keras.layers import Dense, Input
        from keras.losses import categorical_crossentropy
        from keras.optimizers import SGD

        input = ["data"]
        output = ["output"]

        # This should result in an updatable model.
        inputs = Input(shape=(16,))
        d1 = Dense(128)(inputs)
        d2 = Dense(10, name="foo", activation="softmax", trainable=True)(d1)
        kmodel = Model(inputs=inputs, outputs=d2)
        kmodel.compile(
            loss=categorical_crossentropy, optimizer=SGD(lr=1.0), metrics=["accuracy"]
        )
        cml = coremltools.converters.keras.convert(
            kmodel, input, output, respect_trainable=True
        )
        spec = cml.get_spec()
        self.assertTrue(spec.isUpdatable)
        layers = spec.neuralNetwork.layers
        self.assertIsNotNone(layers[1].innerProduct)
        self.assertTrue(layers[1].innerProduct)
        self.assertTrue(layers[1].isUpdatable)
        self.assertEqual(len(spec.neuralNetwork.updateParams.lossLayers), 1)
        sgdopt = spec.neuralNetwork.updateParams.optimizer.sgdOptimizer
        self.assertEqual(sgdopt.learningRate.defaultValue, 1.0)
        self.assertEqual(sgdopt.miniBatchSize.defaultValue, 16)
        self.assertEqual(sgdopt.momentum.defaultValue, 0.0) 
Example 21
Project: coremltools   Author: apple   File: test_keras2.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_updatable_model_flag_cce_sgd_string(self):
        """
        Tests the 'respect_trainable' flag when used along with string
        for the optimizer(keras internally creates an instance, here sgd),
        conversion is successful
        """
        import coremltools
        from keras.layers import Dense, Input
        from keras.losses import categorical_crossentropy

        input = ["data"]
        output = ["output"]

        # This should result in an updatable model.
        inputs = Input(shape=(16,))
        d1 = Dense(128)(inputs)
        d2 = Dense(10, name="foo", activation="softmax", trainable=True)(d1)
        kmodel = Model(inputs=inputs, outputs=d2)
        kmodel.compile(
            loss=categorical_crossentropy, optimizer="sgd", metrics=["accuracy"]
        )
        cml = coremltools.converters.keras.convert(
            kmodel, input, output, respect_trainable=True
        )
        spec = cml.get_spec()
        self.assertTrue(spec.isUpdatable)
        layers = spec.neuralNetwork.layers
        self.assertIsNotNone(layers[1].innerProduct)
        self.assertTrue(layers[1].innerProduct)
        self.assertTrue(layers[1].isUpdatable)
        self.assertEqual(len(spec.neuralNetwork.updateParams.lossLayers), 1)
        sgdopt = spec.neuralNetwork.updateParams.optimizer.sgdOptimizer
        # use almost equal for default verification with at least 5 decimal
        # places of closeness
        self.assertAlmostEqual(sgdopt.learningRate.defaultValue, 0.01, places=5)
        self.assertEqual(sgdopt.miniBatchSize.defaultValue, 16)
        self.assertEqual(sgdopt.momentum.defaultValue, 0.0) 
Example 22
Project: coremltools   Author: apple   File: test_keras2.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_updatable_model_flag_cce_adam_string(self):
        """
        Tests the 'respect_trainable' flag when used along with string
        for the optimizer(keras internally creates an instance, here adam),
        conversion is successful
        """
        import coremltools
        from keras.layers import Dense, Input
        from keras.losses import categorical_crossentropy

        input = ["data"]
        output = ["output"]

        # This should result in an updatable model.
        inputs = Input(shape=(16,))
        d1 = Dense(128)(inputs)
        d2 = Dense(10, name="foo", activation="softmax", trainable=True)(d1)
        kmodel = Model(inputs=inputs, outputs=d2)
        kmodel.compile(
            loss=categorical_crossentropy, optimizer="adam", metrics=["accuracy"]
        )
        cml = coremltools.converters.keras.convert(
            kmodel, input, output, respect_trainable=True
        )
        spec = cml.get_spec()
        self.assertTrue(spec.isUpdatable)
        layers = spec.neuralNetwork.layers
        self.assertIsNotNone(layers[1].innerProduct)
        self.assertTrue(layers[1].innerProduct)
        self.assertTrue(layers[1].isUpdatable)
        self.assertEqual(len(spec.neuralNetwork.updateParams.lossLayers), 1)
        adopt = spec.neuralNetwork.updateParams.optimizer.adamOptimizer
        # use almost equal for default verification with at least 5 decimal
        # places of closeness
        self.assertAlmostEqual(adopt.learningRate.defaultValue, 0.001, places=5)
        self.assertAlmostEqual(adopt.miniBatchSize.defaultValue, 16)
        self.assertAlmostEqual(adopt.beta1.defaultValue, 0.90, places=5)
        self.assertAlmostEqual(adopt.beta2.defaultValue, 0.999, places=5) 
Example 23
Project: DiscriminativeActiveLearning   Author: dsgissin   File: query_methods.py    License: MIT License 5 votes vote down vote up
def compute_egls(self, unlabeled, n_classes):

        # create a function for computing the gradient length:
        self.input_placeholder = K.placeholder(self.model.get_input_shape_at(0))
        self.output_placeholder = K.placeholder(self.model.get_output_shape_at(0))
        predict = self.model.call(self.input_placeholder)
        loss = K.mean(categorical_crossentropy(self.output_placeholder, predict))
        weights = [tensor for tensor in self.model.trainable_weights]
        gradient = self.model.optimizer.get_gradients(loss, weights)
        gradient_flat = [K.flatten(x) for x in gradient]
        gradient_flat = K.concatenate(gradient_flat)
        gradient_length = K.sum(K.square(gradient_flat))
        self.get_gradient_length = K.function([K.learning_phase(), self.input_placeholder, self.output_placeholder], [gradient_length])

        # calculate the expected gradient length of the unlabeled set (iteratively, to avoid memory issues):
        unlabeled_predictions = self.model.predict(unlabeled)
        egls = np.zeros(unlabeled.shape[0])
        for i in range(n_classes):
            calculated_so_far = 0
            while calculated_so_far < unlabeled_predictions.shape[0]:
                if calculated_so_far + 100 >= unlabeled_predictions.shape[0]:
                    next = unlabeled_predictions.shape[0] - calculated_so_far
                else:
                    next = 100

                labels = np.zeros((next, n_classes))
                labels[:,i] = 1
                grads = self.get_gradient_length([0, unlabeled[calculated_so_far:calculated_so_far+next, :], labels])[0]
                grads *= unlabeled_predictions[calculated_so_far:calculated_so_far+next, i]
                egls[calculated_so_far:calculated_so_far+next] += grads

                calculated_so_far += next

        return egls 
Example 24
Project: image-segmentation   Author: nearthlab   File: semantic_model_wrapper.py    License: MIT License 5 votes vote down vote up
def cce_loss_graph(gt, pr):
    return K.mean(categorical_crossentropy(gt, pr)) 
Example 25
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_training.py    License: MIT License 5 votes vote down vote up
def test_weighted_masked_objective():
    a = Input(shape=(3,), name='input_a')

    # weighted_masked_objective
    def mask_dummy(y_true=None, y_pred=None, weight=None):
        return K.placeholder(y_true.shape)

    weighted_function = _weighted_masked_objective(losses.categorical_crossentropy)
    weighted_function(a, a, None) 
Example 26
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_training.py    License: MIT License 5 votes vote down vote up
def test_check_not_failing():
    a = np.random.random((2, 1, 3))
    _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [a.shape])
    _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [(2, None, 3)]) 
Example 27
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_training.py    License: MIT License 5 votes vote down vote up
def test_check_last_is_one():
    a = np.random.random((2, 3, 1))
    with pytest.raises(ValueError) as exc:
        _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [a.shape])

    assert 'You are passing a target array' in str(exc) 
Example 28
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_training.py    License: MIT License 5 votes vote down vote up
def test_check_bad_shape():
    a = np.random.random((2, 3, 5))
    with pytest.raises(ValueError) as exc:
        _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [(2, 3, 6)])

    assert 'targets to have the same shape' in str(exc) 
Example 29
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_training.py    License: MIT License 5 votes vote down vote up
def test_weighted_masked_objective():
    a = Input(shape=(3,), name='input_a')

    # weighted_masked_objective
    def mask_dummy(y_true=None, y_pred=None, weight=None):
        return K.placeholder(y_true.shape)

    weighted_function = _weighted_masked_objective(losses.categorical_crossentropy)
    weighted_function(a, a, None) 
Example 30
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_training.py    License: MIT License 5 votes vote down vote up
def test_check_last_is_one():
    a = np.random.random((2, 3, 1))
    with pytest.raises(ValueError) as exc:
        _check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [a.shape])

    assert 'You are passing a target array' in str(exc)