Python tensorflow.keras.layers() Examples
The following are 30
code examples of tensorflow.keras.layers().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras
, or try the search function
.
Example #1
Source File: train_variational_autoencoder_tensorflow.py From variational-autoencoder with MIT License | 6 votes |
def inference_network(x, latent_dim, hidden_size): """Construct an inference network parametrizing a Gaussian. Args: x: A batch of MNIST digits. latent_dim: The latent dimensionality. hidden_size: The size of the neural net hidden layers. Returns: mu: Mean parameters for the variational family Normal sigma: Standard deviation parameters for the variational family Normal """ inference_net = tfk.Sequential([ tfkl.Flatten(), tfkl.Dense(hidden_size, activation=tf.nn.relu), tfkl.Dense(hidden_size, activation=tf.nn.relu), tfkl.Dense(latent_dim * 2, activation=None) ]) gaussian_params = inference_net(x) # The mean parameter is unconstrained mu = gaussian_params[:, :latent_dim] # The standard deviation must be positive. Parametrize with a softplus sigma = tf.nn.softplus(gaussian_params[:, latent_dim:]) return mu, sigma
Example #2
Source File: utils.py From snn_toolbox with MIT License | 6 votes |
def get_outbound_activation(layer): """ Iterate over 2 outbound layers to find an activation layer. If there is no activation layer, take the activation of the current layer. Parameters ---------- layer: Union[keras.layers.Conv2D, keras.layers.Dense] Layer Returns ------- activation: str Name of outbound activation type. """ activation = layer.activation.__name__ outbound = layer for _ in range(2): outbound = get_outbound_layers(outbound) if len(outbound) == 1 and get_type(outbound[0]) == 'Activation': activation = outbound[0].activation.__name__ return activation
Example #3
Source File: utils.py From snn_toolbox with MIT License | 6 votes |
def get_outbound_layers(layer): """Return outbound layers. Parameters ---------- layer: Keras.layers A Keras layer. Returns ------- : list[Keras.layers] List of outbound layers. """ try: # noinspection PyProtectedMember outbound_nodes = layer._outbound_nodes except AttributeError: # For Keras backward-compatibility. outbound_nodes = layer.outbound_nodes return [on.outbound_layer for on in outbound_nodes]
Example #4
Source File: utils.py From snn_toolbox with MIT License | 6 votes |
def get_inbound_layers_without_params(layer): """Return inbound layers. Parameters ---------- layer: Keras.layers A Keras layer. Returns ------- : list[Keras.layers] List of inbound layers. """ return [layer for layer in get_inbound_layers(layer) if not has_weights(layer)]
Example #5
Source File: imagenet_densenet.py From DeepPoseKit with Apache License 2.0 | 6 votes |
def transition_block(x, reduction, name, pool=True): """A transition block. # Arguments x: input tensor. reduction: float, compression rate at transition layers. name: string, block label. # Returns output tensor for the block. """ bn_axis = 3 if backend.image_data_format() == "channels_last" else 1 x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + "_bn")(x) x = layers.Activation("relu", name=name + "_relu")(x) x = layers.Conv2D( int(backend.int_shape(x)[bn_axis] * reduction), 1, use_bias=False, name=name + "_conv", )(x) if pool: x = layers.AveragePooling2D(2, strides=2, name=name + "_pool")(x) return x
Example #6
Source File: utils.py From snn_toolbox with MIT License | 6 votes |
def has_weights(layer): """Return ``True`` if layer has weights. Parameters ---------- layer : keras.layers.Layer Keras layer Returns ------- : bool ``True`` if layer has weights. """ return len(layer.weights)
Example #7
Source File: utils.py From snn_toolbox with MIT License | 6 votes |
def format_layer_idx(self, idx): """Pad the layer index with the appropriate amount of zeros. The number of zeros used for padding is determined by the maximum index (i.e. the number of layers in the network). Parameters ---------- idx: int Layer index. Returns ------- num_str: str Zero-padded layer index. """ max_idx = len(self.input_model.layers) return str(idx).zfill(len(str(max_idx)))
Example #8
Source File: __init__.py From garbage_classify with Apache License 2.0 | 5 votes |
def inject_keras_modules(func): import keras @functools.wraps(func) def wrapper(*args, **kwargs): kwargs['backend'] = keras.backend kwargs['layers'] = keras.layers kwargs['models'] = keras.models kwargs['utils'] = keras.utils return func(*args, **kwargs) return wrapper
Example #9
Source File: __init__.py From garbage_classify with Apache License 2.0 | 5 votes |
def get_submodules_from_kwargs(kwargs): backend = kwargs.get('backend', _KERAS_BACKEND) layers = kwargs.get('layers', _KERAS_LAYERS) models = kwargs.get('models', _KERAS_MODELS) utils = kwargs.get('utils', _KERAS_UTILS) for key in kwargs.keys(): if key not in ['backend', 'layers', 'models', 'utils']: raise TypeError('Invalid keyword argument: %s', key) return backend, layers, models, utils
Example #10
Source File: imagenet_densenet.py From DeepPoseKit with Apache License 2.0 | 5 votes |
def conv_block(x, growth_rate, name, dilation=1): """A building block for a dense block. # Arguments x: input tensor. growth_rate: float, growth rate at dense layers. name: string, block label. # Returns Output tensor for the block. """ bn_axis = 3 if backend.image_data_format() == "channels_last" else 1 x1 = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + "_0_bn")( x ) x1 = layers.Activation("relu", name=name + "_0_relu")(x1) x1 = layers.Conv2D(4 * growth_rate, 1, use_bias=False, name=name + "_1_conv")(x1) x1 = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + "_1_bn")( x1 ) x1 = layers.Activation("relu", name=name + "_1_relu")(x1) x1 = layers.Conv2D( growth_rate, 3, padding="same", use_bias=False, dilation_rate=dilation, name=name + "_2_conv", )(x1) x = layers.Concatenate(axis=bn_axis, name=name + "_concat")([x, x1]) return x
Example #11
Source File: __init__.py From garbage_classify with Apache License 2.0 | 5 votes |
def inject_tfkeras_modules(func): import tensorflow.keras as tfkeras @functools.wraps(func) def wrapper(*args, **kwargs): kwargs['backend'] = tfkeras.backend kwargs['layers'] = tfkeras.layers kwargs['models'] = tfkeras.models kwargs['utils'] = tfkeras.utils return func(*args, **kwargs) return wrapper
Example #12
Source File: __init__.py From segmentation_models with MIT License | 5 votes |
def inject_global_submodules(func): @functools.wraps(func) def wrapper(*args, **kwargs): kwargs['backend'] = _KERAS_BACKEND kwargs['layers'] = _KERAS_LAYERS kwargs['models'] = _KERAS_MODELS kwargs['utils'] = _KERAS_UTILS return func(*args, **kwargs) return wrapper
Example #13
Source File: __init__.py From segmentation_models with MIT License | 5 votes |
def filter_kwargs(func): @functools.wraps(func) def wrapper(*args, **kwargs): new_kwargs = {k: v for k, v in kwargs.items() if k in ['backend', 'layers', 'models', 'utils']} return func(*args, **new_kwargs) return wrapper
Example #14
Source File: __init__.py From segmentation_models with MIT License | 5 votes |
def get_preprocessing(name): preprocess_input = Backbones.get_preprocessing(name) # add bakcend, models, layers, utils submodules in kwargs preprocess_input = inject_global_submodules(preprocess_input) # delete other kwargs # keras-applications preprocessing raise an error if something # except `backend`, `layers`, `models`, `utils` passed in kwargs preprocess_input = filter_kwargs(preprocess_input) return preprocess_input
Example #15
Source File: __init__.py From EfficientDet with Apache License 2.0 | 5 votes |
def inject_keras_modules(func): import keras @functools.wraps(func) def wrapper(*args, **kwargs): kwargs['backend'] = keras.backend kwargs['layers'] = keras.layers kwargs['models'] = keras.models kwargs['utils'] = keras.utils return func(*args, **kwargs) return wrapper
Example #16
Source File: model.py From ocrd_anybaseocr with Apache License 2.0 | 5 votes |
def get_trainable_layers(self): """Returns a list of layers that have weights.""" layers = [] # Loop through all layers for l in self.keras_model.layers: # If layer is a wrapper, find inner trainable layer l = self.find_trainable_layer(l) # Include layer if it has weights if l.get_weights(): layers.append(l) return layers
Example #17
Source File: model.py From ocrd_anybaseocr with Apache License 2.0 | 5 votes |
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1): """Sets model layers as trainable if their names match the given regular expression. """ # Print message on the first call (but not on recursive calls) if verbose > 0 and keras_model is None: log("Selecting layers to train") keras_model = keras_model or self.keras_model # In multi-GPU training, we wrap the model. Get layers # of the inner model because they have the weights. layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\ else keras_model.layers for layer in layers: # Is the layer a model? if layer.__class__.__name__ == 'Model': print("In model: ", layer.name) self.set_trainable( layer_regex, keras_model=layer, indent=indent + 4) continue if not layer.weights: continue # Is it trainable? trainable = bool(re.fullmatch(layer_regex, layer.name)) # Update layer. If layer is a container, update inner layer. if layer.__class__.__name__ == 'TimeDistributed': layer.layer.trainable = trainable else: layer.trainable = trainable # Print trainable layer names if trainable and verbose > 0: log("{}{:20} ({})".format(" " * indent, layer.name, layer.__class__.__name__))
Example #18
Source File: model.py From ocrd_anybaseocr with Apache License 2.0 | 5 votes |
def resnet_graph(input_image, architecture, stage5=False, train_bn=True): """Build a ResNet graph. architecture: Can be resnet50 or resnet101 stage5: Boolean. If False, stage5 of the network is not created train_bn: Boolean. Train or freeze Batch Norm layers """ assert architecture in ["resnet50", "resnet101"] # Stage 1 x = KL.ZeroPadding2D((3, 3))(input_image) x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x) x = BatchNorm(name='bn_conv1')(x, training=train_bn) x = KL.Activation('relu')(x) C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x) # Stage 2 x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn) C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn) # Stage 3 x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn) x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn) x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn) C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn) # Stage 4 x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn) block_count = {"resnet50": 5, "resnet101": 22}[architecture] for i in range(block_count): x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn) C4 = x # Stage 5 if stage5: x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn) x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn) C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn) else: C5 = None return [C1, C2, C3, C4, C5] ############################################################ # Proposal Layer ############################################################
Example #19
Source File: model.py From ocrd_anybaseocr with Apache License 2.0 | 5 votes |
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2), use_bias=True, train_bn=True): """conv_block is the block that has a conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names use_bias: Boolean. To use or not use a bias in conv layers. train_bn: Boolean. Train or freeze Batch Norm layers Note that from stage 3, the first conv layer at main path is with subsample=(2,2) And the shortcut should have subsample=(2,2) as well """ nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = KL.Conv2D(nb_filter1, (1, 1), strides=strides, name=conv_name_base + '2a', use_bias=use_bias)(input_tensor) x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn) shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides, name=conv_name_base + '1', use_bias=use_bias)(input_tensor) shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn) x = KL.Add()([x, shortcut]) x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x) return x
Example #20
Source File: model.py From ocrd_anybaseocr with Apache License 2.0 | 5 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block, use_bias=True, train_bn=True): """The identity_block is the block that has no conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names use_bias: Boolean. To use or not use a bias in conv layers. train_bn: Boolean. Train or freeze Batch Norm layers """ nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', use_bias=use_bias)(input_tensor) x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn) x = KL.Add()([x, input_tensor]) x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x) return x
Example #21
Source File: model.py From ocrd_anybaseocr with Apache License 2.0 | 5 votes |
def call(self, inputs, training=None): """ Note about training values: None: Train BN layers. This is the normal mode False: Freeze BN layers. Good when batch size is small True: (don't use). Set layer in training mode even when making inferences """ return super(self.__class__, self).call(inputs, training=training)
Example #22
Source File: networks.py From brainstorm with MIT License | 5 votes |
def cvpr2018_net(vol_size, enc_nf, dec_nf, indexing='ij', name="voxelmorph"): """ From https://github.com/voxelmorph/voxelmorph. unet architecture for voxelmorph models presented in the CVPR 2018 paper. You may need to modify this code (e.g., number of layers) to suit your project needs. :param vol_size: volume size. e.g. (256, 256, 256) :param enc_nf: list of encoder filters. right now it needs to be 1x4. e.g. [16,32,32,32] :param dec_nf: list of decoder filters. right now it must be 1x6 (like voxelmorph-1) or 1x7 (voxelmorph-2) :return: the keras model """ import tensorflow.keras.layers as KL ndims = len(vol_size) assert ndims==3, "ndims should be 3. found: %d" % ndims src = Input(vol_size + (1,), name='input_src') tgt = Input(vol_size + (1,), name='input_tgt') input_stack = Concatenate(name='concat_inputs')([src, tgt]) # get the core model x = unet3D(input_stack, img_shape=vol_size, out_im_chans=ndims, nf_enc=enc_nf, nf_dec=dec_nf) # transform the results into a flow field. Conv = getattr(KL, 'Conv%dD' % ndims) flow = Conv(ndims, kernel_size=3, padding='same', name='flow', kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x) # warp the source with the flow y = SpatialTransformer(interp_method='linear', indexing=indexing)([src, flow]) # prepare model model = Model(inputs=[src, tgt], outputs=[y, flow], name=name) return model ############################################################################## # Appearance transform model ##############################################################################
Example #23
Source File: __init__.py From EfficientDet with Apache License 2.0 | 5 votes |
def inject_tfkeras_modules(func): import tensorflow.keras as tfkeras @functools.wraps(func) def wrapper(*args, **kwargs): kwargs['backend'] = tfkeras.backend kwargs['layers'] = tfkeras.layers kwargs['models'] = tfkeras.models kwargs['utils'] = tfkeras.utils return func(*args, **kwargs) return wrapper
Example #24
Source File: utils.py From snn_toolbox with MIT License | 5 votes |
def get_fanin(layer): """ Return fan-in of a neuron in ``layer``. Parameters ---------- layer: Subclass[keras.layers.Layer] Layer. Returns ------- fanin: int Fan-in. """ layer_type = get_type(layer) if 'Conv' in layer_type: ax = 1 if IS_CHANNELS_FIRST else -1 fanin = np.prod(layer.kernel_size) * layer.input_shape[ax] elif 'Dense' in layer_type: fanin = layer.input_shape[1] elif 'Pool' in layer_type: fanin = 0 else: fanin = 0 return fanin
Example #25
Source File: __init__.py From platypush with MIT License | 5 votes |
def _layer_from_dict(layer_type: str, *args, **kwargs) -> Layer: from tensorflow.keras import layers cls = getattr(layers, layer_type) assert issubclass(cls, Layer) return cls(*args, **kwargs)
Example #26
Source File: convert_to_tf_keras.py From keras-contrib with MIT License | 5 votes |
def test_replace_imports(): python_code = """ import keras from keras import backend as K import os import keras_contrib import keras_contrib.layers as lay import keras.layers from keras.layers import Dense if K.backend() == 'tensorflow': import tensorflow as tf function = tf.max """ expected_code = """ from tensorflow import keras from tensorflow.keras import backend as K import os import keras_contrib import keras_contrib.layers as lay import tensorflow.keras.layers from tensorflow.keras.layers import Dense if K.backend() == 'tensorflow': import tensorflow as tf function = tf.max """ code_with_replacement = replace_imports_in_text(python_code, False) assert expected_code == code_with_replacement assert python_code == replace_imports_in_text(code_with_replacement, True)
Example #27
Source File: utils.py From snn_toolbox with MIT License | 5 votes |
def get_layer_iterable(self): """Get an iterable over the layers of the network. Returns ------- layers: list """ pass
Example #28
Source File: utils.py From snn_toolbox with MIT License | 5 votes |
def get_inbound_layers_with_parameters(self, layer): """Iterate until inbound layers are found that have parameters. Parameters ---------- layer: Layer Returns ------- : list List of inbound layers. """ inbound = layer while True: inbound = self.get_inbound_layers(inbound) if len(inbound) == 1: inbound = inbound[0] if self.has_weights(inbound): return [inbound] else: result = [] for inb in inbound: if self.has_weights(inb): result.append(inb) else: result += self.get_inbound_layers_with_parameters(inb) return result
Example #29
Source File: utils.py From snn_toolbox with MIT License | 5 votes |
def get_inbound_names(self, layer, name_map): """Get names of inbound layers. Parameters ---------- layer: Layer name_map: dict Maps the name of a layer to the `id` of the layer object. Returns ------- : list The names of inbound layers. """ inbound = self.get_inbound_layers(layer) for ib in range(len(inbound)): for _ in range(len(self.layers_to_skip)): if self.get_type(inbound[ib]) in self.layers_to_skip: inbound[ib] = self.get_inbound_layers(inbound[ib])[0] else: break if len(self._layer_list) == 0 or \ any([self.get_type(inb) == 'InputLayer' for inb in inbound]): return ['input'] else: inb_idxs = [name_map[str(id(inb))] for inb in inbound] return [self._layer_list[i]['name'] for i in inb_idxs]
Example #30
Source File: utils.py From snn_toolbox with MIT License | 5 votes |
def get_inbound_layers(self, layer): """Get inbound layers of ``layer``. Returns ------- inbound: Sequence """ pass