Python keras.metrics.categorical_accuracy() Examples

The following are 30 code examples of keras.metrics.categorical_accuracy(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.metrics , or try the search function .
Example #1
Source Project: plastering   Author: plastering   File: ir2tagsets_seq.py    License: MIT License 6 votes vote down vote up
def fit_new(self, x, y=None):
        timesteps = x.shape[1]
        input_dim = x.shape[2]
        self.ae = Sequential()
        self.ae.add(Dense(self.latent_dim,
                    input_shape=(timesteps,input_dim,),
                    activation='relu',
                    name='enc'))
        self.ae.add(Dropout(0.2))
        self.ae.add(Dense(input_dim,
                    activation='softmax',
                    name='dec'))

        self.encoder = Model(inputs=self.ae.input,
                             outputs=self.ae.get_layer('enc').output)
        #rmsprop = RMSprop(lr=0.05)
        self.ae.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['categorical_accuracy'],)
        self.ae.fit(x, x, epochs=1) 
Example #2
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 6 votes vote down vote up
def test_functional_model_saving():
    inputs = Input(shape=(3,))
    x = Dense(2)(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #3
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 6 votes vote down vote up
def test_saving_lambda_custom_objects():
    inputs = Input(shape=(3,))
    x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'square_fn': square_fn})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #4
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 6 votes vote down vote up
def test_functional_model_saving():
    inputs = Input(shape=(3,))
    x = Dense(2)(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #5
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 6 votes vote down vote up
def test_saving_lambda_custom_objects():
    inputs = Input(shape=(3,))
    x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'square_fn': square_fn})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #6
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 6 votes vote down vote up
def test_functional_model_saving():
    inputs = Input(shape=(3,))
    x = Dense(2)(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #7
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 6 votes vote down vote up
def test_saving_lambda_custom_objects():
    inputs = Input(shape=(3,))
    x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'square_fn': square_fn})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #8
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 6 votes vote down vote up
def test_functional_model_saving():
    inputs = Input(shape=(3,))
    x = Dense(2)(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #9
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 6 votes vote down vote up
def test_saving_lambda_custom_objects():
    inputs = Input(shape=(3,))
    x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'square_fn': square_fn})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #10
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 6 votes vote down vote up
def test_functional_model_saving():
    inputs = Input(shape=(3,))
    x = Dense(2)(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #11
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 6 votes vote down vote up
def test_saving_lambda_custom_objects():
    inputs = Input(shape=(3,))
    x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'square_fn': square_fn})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #12
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 6 votes vote down vote up
def test_functional_model_saving():
    inputs = Input(shape=(3,))
    x = Dense(2)(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #13
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 6 votes vote down vote up
def test_saving_lambda_custom_objects():
    inputs = Input(shape=(3,))
    x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'square_fn': square_fn})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #14
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 6 votes vote down vote up
def test_functional_model_saving():
    inputs = Input(shape=(3,))
    x = Dense(2)(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #15
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 6 votes vote down vote up
def test_saving_lambda_custom_objects():
    inputs = Input(shape=(3,))
    x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'square_fn': square_fn})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #16
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 6 votes vote down vote up
def test_functional_model_saving():
    inputs = Input(shape=(3,))
    x = Dense(2)(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #17
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 6 votes vote down vote up
def test_saving_lambda_custom_objects():
    inputs = Input(shape=(3,))
    x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'square_fn': square_fn})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #18
Source Project: deep-spell-checkr   Author: vuptran   File: model.py    License: MIT License 5 votes vote down vote up
def truncated_acc(y_true, y_pred):
    y_true = y_true[:, :VAL_MAXLEN, :]
    y_pred = y_pred[:, :VAL_MAXLEN, :]
    
    acc = metrics.categorical_accuracy(y_true, y_pred)
    return K.mean(acc, axis=-1) 
Example #19
Source Project: keras-gcnn   Author: basveeling   File: test_model_saving.py    License: MIT License 5 votes vote down vote up
def test_functional_model_saving():
    img_rows, img_cols = 32, 32
    img_channels = 3

    # Parameters for the DenseNet model builder
    img_dim = (img_channels, img_rows, img_cols) if K.image_data_format() == 'channels_first' else (
        img_rows, img_cols, img_channels)
    depth = 40
    nb_dense_block = 3
    growth_rate = 3  # number of z2 maps equals growth_rate * group_size, so keep this small.
    nb_filter = 16
    dropout_rate = 0.0  # 0.0 for data augmentation
    conv_group = 'D4'  # C4 includes 90 degree rotations, D4 additionally includes reflections in x and y axis.
    use_gcnn = True

    # Create the model (without loading weights)
    model = GDenseNet(mc_dropout=False, padding='same', nb_dense_block=nb_dense_block, growth_rate=growth_rate,
                      nb_filter=nb_filter, dropout_rate=dropout_rate, weights=None, input_shape=img_dim, depth=depth,
                      use_gcnn=use_gcnn, conv_group=conv_group)
    model.compile(loss=losses.categorical_crossentropy,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 32, 32, 3))
    y = np.random.randint(0, 10, 1)
    y = np_utils.to_categorical(y, 10)
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #20
Source Project: Pseudo-Label-Keras   Author: koshian2   File: mobilenet_pseudo_cifar.py    License: MIT License 5 votes vote down vote up
def accuracy(self, y_true, y_pred):
        y_true_item = y_true[:, :self.n_classes]
        return categorical_accuracy(y_true_item, y_pred) 
Example #21
Source Project: Pseudo-Label-Keras   Author: koshian2   File: pseudo_pretrain_cifar.py    License: MIT License 5 votes vote down vote up
def accuracy(self, y_true, y_pred):
        y_true_item = y_true[:, :self.n_classes]
        return categorical_accuracy(y_true_item, y_pred) 
Example #22
Source Project: Pseudo-Label-Keras   Author: koshian2   File: mobilenet_transfer_pseudo_cifar.py    License: MIT License 5 votes vote down vote up
def accuracy(self, y_true, y_pred):
        y_true_item = y_true[:, :self.n_classes]
        return categorical_accuracy(y_true_item, y_pred) 
Example #23
Source Project: Pseudo-Label-Keras   Author: koshian2   File: pseudo_cifar.py    License: MIT License 5 votes vote down vote up
def accuracy(self, y_true, y_pred):
        y_true_item = y_true[:, :self.n_classes]
        return categorical_accuracy(y_true_item, y_pred) 
Example #24
Source Project: plastering   Author: plastering   File: ir2tagsets_seq.py    License: MIT License 5 votes vote down vote up
def fit(self, x, y=None):
        timesteps = x.shape[1]
        input_dim = x.shape[2]
        self.ae = Sequential()
        #m.add(LSTM(latent_dim, input_dim=in_dim, return_sequen|ces=True, name='enc'), )
        self.ae.add(LSTM(self.latent_dim,
                         activation='softsign',
                         input_shape=(timesteps,input_dim,),
                         return_sequences=True,
                         unroll=True,
                         name='enc'), )
        self.ae.add(LSTM(input_dim,
                         activation='softsign',
                         return_sequences=True,
                         unroll=True,
                         name='dec',
                         ))
        self.ae.add(Activation('softmax'))

        self.encoder = Model(inputs=self.ae.input,
                             outputs=self.ae.get_layer('enc').output)
        rmsprop = RMSprop(lr=0.005)
        self.ae.compile(loss='categorical_hinge',
                  optimizer=rmsprop,
                  metrics=['categorical_accuracy', 'binary_accuracy'],)
        self.ae.fit(x, x, epochs=1) 
Example #25
Source Project: Facial-Expression-Recognition   Author: rishabhjainps   File: cnn_major_shallow.py    License: MIT License 5 votes vote down vote up
def baseline_model():
    # Initialising the CNN
    model = Sequential()

    # 1 - Convolution
    model.add(Conv2D(64,(3,3), border_mode='same', input_shape=(48, 48,1)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    # 2nd Convolution layer
    model.add(Conv2D(128,(5,5), border_mode='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))


    # Flattening
    model.add(Flatten())

    # Fully connected layer 1st layer
    model.add(Dense(256))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.25))

    model.add(Dense(num_class, activation='sigmoid'))

    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[categorical_accuracy])
    return model 
Example #26
Source Project: Facial-Expression-Recognition   Author: rishabhjainps   File: cnn_major_shallow.py    License: MIT License 5 votes vote down vote up
def baseline_model_saved():
    #load json and create model
    json_file = open('model_2layer_2_2_pool.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    model = model_from_json(loaded_model_json)
    #load weights from h5 file
    model.load_weights("model_2layer_2_2_pool.h5")
    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[categorical_accuracy])
    return model 
Example #27
Source Project: Facial-Expression-Recognition   Author: rishabhjainps   File: cnn_major.py    License: MIT License 5 votes vote down vote up
def baseline_model_saved():
    #load json and create model
    json_file = open('model_4layer_2_2_pool.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    model = model_from_json(loaded_model_json)
    #load weights from h5 file
    model.load_weights("model_4layer_2_2_pool.h5")
    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[categorical_accuracy])
    return model 
Example #28
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 5 votes vote down vote up
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.add(RepeatVector(3))
    model.add(TimeDistributed(Dense(3)))
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy],
                  sample_weight_mode='temporal')
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    new_model = load_model(fname)
    os.remove(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #29
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 5 votes vote down vote up
def test_model_saving_to_pre_created_h5py_file():
    inputs = Input(shape=(3,))
    x = Dense(2)(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    with h5py.File(fname, mode='r+') as h5file:
        save_model(model, h5file)
        loaded_model = load_model(h5file)
        out2 = loaded_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test non-default options in h5
    with h5py.File('does not matter', driver='core',
                   backing_store=False) as h5file:
        save_model(model, h5file)
        loaded_model = load_model(h5file)
        out2 = loaded_model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #30
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_model_saving.py    License: MIT License 5 votes vote down vote up
def test_model_saving_to_binary_stream():
    inputs = Input(shape=(3,))
    x = Dense(2)(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    with h5py.File(fname, mode='r+') as h5file:
        save_model(model, h5file)
        loaded_model = load_model(h5file)
        out2 = loaded_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # Save the model to an in-memory-only h5 file.
    with h5py.File('does not matter', driver='core',
                   backing_store=False) as h5file:
        save_model(model, h5file)
        h5file.flush()  # Very important! Otherwise you get all zeroes below.
        binary_data = h5file.fid.get_file_image()

        # Make sure the binary data is correct by saving it to a file manually
        # and then loading it the usual way.
        with open(fname, 'wb') as raw_file:
            raw_file.write(binary_data)

    # Load the manually-saved binary data, and make sure the model is intact.
    with h5py.File(fname, mode='r') as h5file:
        loaded_model = load_model(h5file)
        out2 = loaded_model.predict(x)

    assert_allclose(out, out2, atol=1e-05)