Python keras.layers.deserialize() Examples

The following are 4 code examples of keras.layers.deserialize(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source Project: heatmaps   Author: gabrieldemarmiesse   File: heatmap.py    License: MIT License 5 votes vote down vote up
def from_config(layer, config_dic):
    config_correct = {}
    config_correct['class_name'] = str(type(layer))
    config_correct['config'] = config_dic
    return layer_from_config(config_correct, custom_objects={str(type(layer)): layer}) 
Example #2
Source Project: keras-gp   Author: alshedivat   File: assemble.py    License: MIT License 5 votes vote down vote up
def assemble_narx(params, final_reshape=True):
    """Construct a NARX model of the form: X-[H1-H2-...-HN]-Y.
    All the H-layers are Dense and optional, i.e., depend on whether they are
    specified in the params dictionary. Here, X is a sequence.
    """
    # Input layer
    input_shape = params['input_shape']
    inputs = layers.Input(shape=input_shape)

    # Flatten the time dimension
    target_shape = (np.prod(input_shape), )
    previous = layers.Reshape(target_shape)(inputs)

    # Hidden layers
    for layer in params['hidden_layers']:
        Layer = layers.deserialize(
            {'class_name': layer['name'], 'config': layer['config']})
        previous = Layer(previous)
        if 'dropout' in layer and layer['dropout'] is not None:
            previous = layers.Dropout(layer['dropout'])(previous)
        if 'batch_norm' in layer and layer['batch_norm'] is not None:
            previous = layers.BatchNormalization(**layer['batch_norm'])(previous)

    # Output layer
    output_shape = params['output_shape']
    output_dim = np.prod(output_shape)
    outputs = layers.Dense(output_dim)(previous)

    if final_reshape:
        outputs = layers.Reshape(output_shape)(outputs)

    return KerasModel(inputs=inputs, outputs=outputs) 
Example #3
Source Project: keras-gp   Author: alshedivat   File: assemble.py    License: MIT License 5 votes vote down vote up
def assemble_rnn(params, final_reshape=True):
    """Construct an RNN/LSTM/GRU model of the form: X-[H1-H2-...-HN]-Y.
    All the H-layers are optional recurrent layers and depend on whether they
    are specified in the params dictionary.
    """
    # Input layer
    input_shape = params['input_shape']
    inputs = layers.Input(shape=input_shape)
    # inputs = layers.Input(batch_shape=[20] + list(input_shape))

    # Masking layer
    previous = layers.Masking(mask_value=0.0)(inputs)

    # Hidden layers
    for layer in params['hidden_layers']:
        Layer = layers.deserialize(
            {'class_name': layer['name'], 'config': layer['config']})
        previous = Layer(previous)
        if 'dropout' in layer and layer['dropout'] is not None:
            previous = layers.Dropout(layer['dropout'])(previous)
        if 'batch_norm' in layer and layer['batch_norm'] is not None:
            previous = layers.BatchNormalization(**layer['batch_norm'])(previous)

    # Output layer
    output_shape = params['output_shape']
    output_dim = np.prod(output_shape)
    outputs = layers.Dense(output_dim)(previous)

    if final_reshape:
        outputs = layers.Reshape(output_shape)(outputs)

    return KerasModel(inputs=inputs, outputs=outputs) 
Example #4
Source Project: Keras-inference-time-optimizer   Author: ZFTurbo   File: __init__.py    License: MIT License 4 votes vote down vote up
def optimize_separableconv2d_batchnorm_block(m, initial_model, input_layers, conv, bn, verbose=False):
    from keras import layers
    from keras.models import Model

    conv_config = conv.get_config()
    conv_config['use_bias'] = True
    bn_config = bn.get_config()
    if conv_config['activation'] != 'linear':
        print('Only linear activation supported for conv + bn optimization!')
        exit()

    layer_copy = layers.deserialize({'class_name': conv.__class__.__name__, 'config': conv_config})
    # We use batch norm name here to find it later
    layer_copy.name = bn.name

    # Create new model to initialize layer. We need to store other output tensors as well
    output_tensor, output_names = get_layers_without_output(m, verbose)
    input_layer_name = initial_model.layers[input_layers[0]].name
    prev_layer = m.get_layer(name=input_layer_name)
    x = layer_copy(prev_layer.output)

    output_tensor_to_use = [x]
    for i in range(len(output_names)):
        if output_names[i] != input_layer_name:
            output_tensor_to_use.append(output_tensor[i])

    if len(output_tensor_to_use) == 1:
        output_tensor_to_use = output_tensor_to_use[0]

    tmp_model = Model(inputs=m.input, outputs=output_tensor_to_use)

    if conv.get_config()['use_bias']:
        (conv_weights_3, conv_weights_1, conv_bias) = conv.get_weights()
    else:
        (conv_weights_3, conv_weights_1) = conv.get_weights()

    if bn_config['scale']:
        gamma, beta, run_mean, run_std = bn.get_weights()
    else:
        gamma = 1.0
        beta, run_mean, run_std = bn.get_weights()

    eps = bn_config['epsilon']
    A = gamma / np.sqrt(run_std + eps)

    if conv.get_config()['use_bias']:
        B = beta + (gamma * (conv_bias - run_mean) / np.sqrt(run_std + eps))
    else:
        B = beta - ((gamma * run_mean) / np.sqrt(run_std + eps))

    for i in range(conv_weights_1.shape[-1]):
        conv_weights_1[:, :, :, i] *= A[i]

    # print(conv_weights_3.shape, conv_weights_1.shape, A.shape)

    tmp_model.get_layer(layer_copy.name).set_weights((conv_weights_3, conv_weights_1, B))
    return tmp_model