Python keras.layers.InputLayer() Examples

The following are 10 code examples of keras.layers.InputLayer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source Project: faceai   Author: vipstone   File: colorize.py    License: MIT License 7 votes vote down vote up
def build_model():
    model = Sequential()
    model.add(InputLayer(input_shape=(None, None, 1)))
    model.add(Conv2D(8, (3, 3), activation='relu', padding='same', strides=2))
    model.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same', strides=2))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same', strides=2))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))
    # model.compile(optimizer='rmsprop', loss='mse')
    model.compile(optimizer='adam', loss='mse')
    return model


#训练数据 
Example #2
Source Project: srcnn   Author: qobilidop   File: models.py    License: MIT License 6 votes vote down vote up
def fsrcnn(x, d=56, s=12, m=4, scale=3):
    """Build an FSRCNN model.

    See https://arxiv.org/abs/1608.00367
    """
    model = Sequential()
    model.add(InputLayer(input_shape=x.shape[-3:]))
    c = x.shape[-1]
    f = [5, 1] + [3] * m + [1]
    n = [d, s] + [s] * m + [d]
    for ni, fi in zip(n, f):
        model.add(Conv2D(ni, fi, padding='same',
                         kernel_initializer='he_normal', activation='relu'))
    model.add(Conv2DTranspose(c, 9, strides=scale, padding='same',
                              kernel_initializer='he_normal'))
    return model 
Example #3
Source Project: srcnn   Author: qobilidop   File: models.py    License: MIT License 6 votes vote down vote up
def nsfsrcnn(x, d=56, s=12, m=4, scale=3, pos=1):
    """Build an FSRCNN model, but change deconv position.

    See https://arxiv.org/abs/1608.00367
    """
    model = Sequential()
    model.add(InputLayer(input_shape=x.shape[-3:]))
    c = x.shape[-1]
    f1 = [5, 1] + [3] * pos
    n1 = [d, s] + [s] * pos
    f2 = [3] * (m - pos - 1) + [1]
    n2 = [s] * (m - pos - 1) + [d]
    f3 = 9
    n3 = c
    for ni, fi in zip(n1, f1):
        model.add(Conv2D(ni, fi, padding='same',
                         kernel_initializer='he_normal', activation='relu'))
    model.add(Conv2DTranspose(s, 3, strides=scale, padding='same',
                              kernel_initializer='he_normal'))
    for ni, fi in zip(n2, f2):
        model.add(Conv2D(ni, fi, padding='same',
                         kernel_initializer='he_normal', activation='relu'))
    model.add(Conv2D(n3, f3, padding='same',
                         kernel_initializer='he_normal'))
    return model 
Example #4
Source Project: srcnn   Author: qobilidop   File: models.py    License: MIT License 6 votes vote down vote up
def espcn(x, f=[5, 3, 3], n=[64, 32], scale=3):
    """Build an ESPCN model.

    See https://arxiv.org/abs/1609.05158
    """
    assert len(f) == len(n) + 1
    model = Sequential()
    model.add(InputLayer(input_shape=x.shape[1:]))
    c = x.shape[-1]
    for ni, fi in zip(n, f):
        model.add(Conv2D(ni, fi, padding='same',
                         kernel_initializer='he_normal', activation='tanh'))
    model.add(Conv2D(c * scale ** 2, f[-1], padding='same',
                     kernel_initializer='he_normal'))
    model.add(Conv2DSubPixel(scale))
    return model 
Example #5
Source Project: keras_experiments   Author: avolkov1   File: cifar_common.py    License: The Unlicense 6 votes vote down vote up
def make_model_small(train_input, num_classes, weights_file=None):
    '''Return Cifar10 DL model with small number layers.'''
    model = Sequential()

    # model.add(KL.InputLayer(input_shape=inshape[1:]))
    if isinstance(train_input, tf.Tensor):
        model.add(KL.InputLayer(input_tensor=train_input))
    else:
        model.add(KL.InputLayer(input_shape=train_input))

    # if standardize:
    #     model.add(KL.Lambda(stand_img))

    model.add(KL.Conv2D(32, (3, 3), padding='same'))
    model.add(KL.Activation('relu'))
    model.add(KL.Flatten())
    # model.add(Dropout(0.5))
    model.add(KL.Dense(num_classes))
    model.add(KL.Activation('softmax'))

    if weights_file is not None and os.path.exists(weights_file):
        model.load_weights(weights_file)

    return model 
Example #6
Source Project: voxelmorph   Author: voxelmorph   File: layers.py    License: GNU General Public License v3.0 5 votes vote down vote up
def compute_output_shape(self, input_shape):
        return input_shape


# class LocalParam(InputLayer):

#     def __init__(self, shape, mult=1, my_initializer='RandomNormal', **kwargs):
#         super(LocalParam, self).__init__(input_shape=shape, **kwargs)       
       
#         # Create a trainable weight variable for this layer.
#         self.kernel = self.add_weight(name='kernel', 
#                                       shape=tuple(shape),
#                                       initializer=my_initializer,
#                                       trainable=True)
        
#         outputs = self._inbound_nodes[0].output_tensors
#         z = Input(tensor=K.expand_dims(self.kernel, 0)*mult)
#         if len(outputs) == 1:
#             self._inbound_nodes[0].output_tensors[0] = z
#         else:
#             self._inbound_nodes[0].output_tensors = z
      
#     def get_output(self):  # call() would force inputs
#             outputs = self._inbound_nodes[0].output_tensors
#             if len(outputs) == 1:
#                 return outputs[0]
#             else:
#                 return outputs 
Example #7
Source Project: srcnn   Author: qobilidop   File: models.py    License: MIT License 5 votes vote down vote up
def bicubic(x, scale=3):
    model = Sequential()
    model.add(InputLayer(input_shape=x.shape[-3:]))
    model.add(ImageRescale(scale, method=tf.image.ResizeMethod.BICUBIC))
    return model 
Example #8
Source Project: keras_experiments   Author: avolkov1   File: cifar10_cnn_distrib_v2_slurm.py    License: The Unlicense 5 votes vote down vote up
def make_model(inshape, num_classes, weights_file=None):
    model = Sequential()
    model.add(KL.InputLayer(input_shape=inshape[1:]))
    # model.add(KL.Conv2D(32, (3, 3), padding='same', input_shape=inshape[1:]))
    model.add(KL.Conv2D(32, (3, 3), padding='same'))
    model.add(KL.Activation('relu'))
    model.add(KL.Conv2D(32, (3, 3)))
    model.add(KL.Activation('relu'))
    model.add(KL.MaxPooling2D(pool_size=(2, 2)))
    model.add(KL.Dropout(0.25))

    model.add(KL.Conv2D(64, (3, 3), padding='same'))
    model.add(KL.Activation('relu'))
    model.add(KL.Conv2D(64, (3, 3)))
    model.add(KL.Activation('relu'))
    model.add(KL.MaxPooling2D(pool_size=(2, 2)))
    model.add(KL.Dropout(0.25))

    model.add(KL.Flatten())
    model.add(KL.Dense(512))
    model.add(KL.Activation('relu'))
    model.add(KL.Dropout(0.5))
    model.add(KL.Dense(num_classes))
    model.add(KL.Activation('softmax'))

    if weights_file is not None and os.path.exists(weights_file):
        model.load_weights(weights_file)

    return model 
Example #9
Source Project: keras_experiments   Author: avolkov1   File: mnist_tfrecord_mgpu.py    License: The Unlicense 5 votes vote down vote up
def make_model(x_train_input, nclasses):
    '''Non-functional model definition.'''
    model = Sequential()
    model.add(KL.InputLayer(input_tensor=x_train_input))
    ll = cnn_layers_list(nclasses)
    for il in ll:
        model.add(il)

    return model 
Example #10
Source Project: keras_experiments   Author: avolkov1   File: cifar_common.py    License: The Unlicense 4 votes vote down vote up
def make_model_full(train_input, num_classes, weights_file=None):
    '''Return Cifar10 DL model with many layers.

    :param train_input: Either a tf.Tensor input placeholder/pipeline, or a
        tuple input shape.
    '''
    model = Sequential()

    # model.add(KL.InputLayer(input_shape=inshape[1:]))
    if isinstance(train_input, tf.Tensor):
        model.add(KL.InputLayer(input_tensor=train_input))
    else:
        model.add(KL.InputLayer(input_shape=train_input))

    # if standardize:
    #     model.add(KL.Lambda(stand_img))

    model.add(KL.Conv2D(32, (3, 3), padding='same'))
    model.add(KL.Activation('relu'))
    model.add(KL.Conv2D(32, (3, 3)))
    model.add(KL.Activation('relu'))
    model.add(KL.MaxPooling2D(pool_size=(2, 2)))
    model.add(KL.Dropout(0.25))

    model.add(KL.Conv2D(64, (3, 3), padding='same'))
    model.add(KL.Activation('relu'))
    model.add(KL.Conv2D(64, (3, 3)))
    model.add(KL.Activation('relu'))
    model.add(KL.MaxPooling2D(pool_size=(2, 2)))
    model.add(KL.Dropout(0.25))

    model.add(KL.Flatten())
    model.add(KL.Dense(512))
    model.add(KL.Activation('relu'))
    model.add(KL.Dropout(0.5))
    model.add(KL.Dense(num_classes))
    model.add(KL.Activation('softmax'))

    if weights_file is not None and os.path.exists(weights_file):
        model.load_weights(weights_file)

    return model