Python keras.layers.UpSampling2D() Examples
The following are 30
code examples of keras.layers.UpSampling2D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.

Example #1
Source Project: ocsvm-anomaly-detection Author: hiram64 File: model.py License: MIT License | 7 votes |
def build_cae_model(height=32, width=32, channel=3): """ build convolutional autoencoder model """ input_img = Input(shape=(height, width, channel)) # encoder net = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img) net = MaxPooling2D((2, 2), padding='same')(net) net = Conv2D(8, (3, 3), activation='relu', padding='same')(net) net = MaxPooling2D((2, 2), padding='same')(net) net = Conv2D(4, (3, 3), activation='relu', padding='same')(net) encoded = MaxPooling2D((2, 2), padding='same', name='enc')(net) # decoder net = Conv2D(4, (3, 3), activation='relu', padding='same')(encoded) net = UpSampling2D((2, 2))(net) net = Conv2D(8, (3, 3), activation='relu', padding='same')(net) net = UpSampling2D((2, 2))(net) net = Conv2D(16, (3, 3), activation='relu', padding='same')(net) net = UpSampling2D((2, 2))(net) decoded = Conv2D(channel, (3, 3), activation='sigmoid', padding='same')(net) return Model(input_img, decoded)
Example #2
Source Project: faceai Author: vipstone File: colorize.py License: MIT License | 7 votes |
def build_model(): model = Sequential() model.add(InputLayer(input_shape=(None, None, 1))) model.add(Conv2D(8, (3, 3), activation='relu', padding='same', strides=2)) model.add(Conv2D(8, (3, 3), activation='relu', padding='same')) model.add(Conv2D(16, (3, 3), activation='relu', padding='same')) model.add(Conv2D(16, (3, 3), activation='relu', padding='same', strides=2)) model.add(Conv2D(32, (3, 3), activation='relu', padding='same')) model.add(Conv2D(32, (3, 3), activation='relu', padding='same', strides=2)) model.add(UpSampling2D((2, 2))) model.add(Conv2D(32, (3, 3), activation='relu', padding='same')) model.add(UpSampling2D((2, 2))) model.add(Conv2D(16, (3, 3), activation='relu', padding='same')) model.add(UpSampling2D((2, 2))) model.add(Conv2D(2, (3, 3), activation='tanh', padding='same')) # model.compile(optimizer='rmsprop', loss='mse') model.compile(optimizer='adam', loss='mse') return model #训练数据
Example #3
Source Project: Keras-BiGAN Author: manicman1999 File: bigan.py License: MIT License | 6 votes |
def g_block(inp, fil, u = True): if u: out = UpSampling2D(interpolation = 'bilinear')(inp) else: out = Activation('linear')(inp) skip = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out) out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out) out = LeakyReLU(0.2)(out) out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out) out = LeakyReLU(0.2)(out) out = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out) out = add([out, skip]) out = LeakyReLU(0.2)(out) return out
Example #4
Source Project: n2n-watermark-remove Author: zxq2233 File: model.py License: MIT License | 6 votes |
def get_unet_model(input_channel_num=3, out_ch=3, start_ch=64, depth=4, inc_rate=2., activation='relu', dropout=0.5, batchnorm=False, maxpool=True, upconv=True, residual=False): def _conv_block(m, dim, acti, bn, res, do=0): n = Conv2D(dim, 3, activation=acti, padding='same')(m) n = BatchNormalization()(n) if bn else n n = Dropout(do)(n) if do else n n = Conv2D(dim, 3, activation=acti, padding='same')(n) n = BatchNormalization()(n) if bn else n return Concatenate()([m, n]) if res else n def _level_block(m, dim, depth, inc, acti, do, bn, mp, up, res): if depth > 0: n = _conv_block(m, dim, acti, bn, res) m = MaxPooling2D()(n) if mp else Conv2D(dim, 3, strides=2, padding='same')(n) m = _level_block(m, int(inc * dim), depth - 1, inc, acti, do, bn, mp, up, res) if up: m = UpSampling2D()(m) m = Conv2D(dim, 2, activation=acti, padding='same')(m) else: m = Conv2DTranspose(dim, 3, strides=2, activation=acti, padding='same')(m) n = Concatenate()([n, m]) m = _conv_block(n, dim, acti, bn, res) else: m = _conv_block(m, dim, acti, bn, res, do) return m i = Input(shape=(None, None, input_channel_num)) o = _level_block(i, start_ch, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv, residual) o = Conv2D(out_ch, 1)(o) model = Model(inputs=i, outputs=o) return model
Example #5
Source Project: ImageAI Author: OlafenwaMoses File: models.py License: MIT License | 6 votes |
def yolo_main(input, num_anchors, num_classes): darknet_network = Model(input, darknet(input)) network, network_1 = last_layers(darknet_network.output, 512, num_anchors * (num_classes + 5), layer_name="last1") network = NetworkConv2D_BN_Leaky( input=network, channels=256, kernel_size=(1,1)) network = UpSampling2D(2)(network) network = Concatenate()([network, darknet_network.layers[152].output]) network, network_2 = last_layers(network, 256, num_anchors * (num_classes + 5), layer_name="last2") network = NetworkConv2D_BN_Leaky(input=network, channels=128, kernel_size=(1, 1)) network = UpSampling2D(2)(network) network = Concatenate()([network, darknet_network.layers[92].output]) network, network_3 = last_layers(network, 128, num_anchors * (num_classes + 5), layer_name="last3") return Model(input, [network_1, network_2, network_3])
Example #6
Source Project: AnomalyDetectionUsingAutoencoder Author: otenim File: models.py License: MIT License | 6 votes |
def convolutional_autoencoder(): input_shape=(28,28,1) n_channels = input_shape[-1] model = Sequential() model.add(Conv2D(32, (3,3), activation='relu', padding='same', input_shape=input_shape)) model.add(MaxPool2D(padding='same')) model.add(Conv2D(16, (3,3), activation='relu', padding='same')) model.add(MaxPool2D(padding='same')) model.add(Conv2D(8, (3,3), activation='relu', padding='same')) model.add(UpSampling2D()) model.add(Conv2D(16, (3,3), activation='relu', padding='same')) model.add(UpSampling2D()) model.add(Conv2D(32, (3,3), activation='relu', padding='same')) model.add(Conv2D(n_channels, (3,3), activation='sigmoid', padding='same')) return model
Example #7
Source Project: coremltools Author: apple File: test_keras2_numeric.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_tiny_conv_upsample_random(self): np.random.seed(1988) input_dim = 10 input_shape = (input_dim, input_dim, 1) num_kernels = 3 kernel_height = 5 kernel_width = 5 # Define a model model = Sequential() model.add( Conv2D( input_shape=input_shape, filters=num_kernels, kernel_size=(kernel_height, kernel_width), ) ) model.add(UpSampling2D(size=2)) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_model(model)
Example #8
Source Project: coremltools Author: apple File: test_keras2_numeric.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_upsample_layer_params(self): options = dict(size=[(2, 2), (3, 3), (4, 4), (5, 5)]) np.random.seed(1988) input_dim = 10 input_shape = (input_dim, input_dim, 1) X = np.random.rand(1, *input_shape) # Define a function that tests a model def build_model(x): kwargs = dict(zip(options.keys(), x)) model = Sequential() model.add(Conv2D(filters=5, kernel_size=(7, 7), input_shape=input_shape)) model.add(UpSampling2D(**kwargs)) return x, model # Iterate through all combinations product = itertools.product(*options.values()) args = [build_model(p) for p in product] # Test the cases print("Testing a total of %s cases. This could take a while" % len(args)) for param, model in args: self._run_test(model, param)
Example #9
Source Project: DiscriminativeActiveLearning Author: dsgissin File: models.py License: MIT License | 6 votes |
def get_autoencoder_model(input_shape, labels=10): """ An autoencoder for MNIST to be used in the DAL implementation. """ image = Input(shape=input_shape) encoder = Conv2D(32, (3, 3), activation='relu', padding='same')(image) encoder = MaxPooling2D((2, 2), padding='same')(encoder) encoder = Conv2D(8, (3, 3), activation='relu', padding='same')(encoder) encoder = Conv2D(4, (3, 3), activation='relu', padding='same')(encoder) encoder = MaxPooling2D((2, 2), padding='same')(encoder) decoder = UpSampling2D((2, 2), name='embedding')(encoder) decoder = Conv2D(4, (3, 3), activation='relu', padding='same')(decoder) decoder = Conv2D(8, (3, 3), activation='relu', padding='same')(decoder) decoder = UpSampling2D((2, 2))(decoder) decoder = Conv2D(32, (3, 3), activation='relu', padding='same')(decoder) decoder = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(decoder) autoencoder = Model(image, decoder) return autoencoder
Example #10
Source Project: u-net Author: yihui-he File: train_res.py License: MIT License | 6 votes |
def _up_block(block,mrge, nb_filters): up = merge([Convolution2D(2*nb_filters, 2, 2, border_mode='same')(UpSampling2D(size=(2, 2))(block)), mrge], mode='concat', concat_axis=1) # conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(up) conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(up) conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv) # conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(conv) # conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv) # conv = Convolution2D(nb_filters, 1, 1, activation='relu', border_mode='same')(conv) # conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(conv) # conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv) # conv = Convolution2D(nb_filters, 1, 1, activation='relu', border_mode='same')(conv) return conv # http://arxiv.org/pdf/1512.03385v1.pdf # 50 Layer resnet
Example #11
Source Project: SpaceNet_Off_Nadir_Solutions Author: SpaceNetChallenge File: blocks.py License: Apache License 2.0 | 6 votes |
def Upsample2D_block(filters, stage, kernel_size=(3,3), upsample_rate=(2,2), use_batchnorm=False, skip=None): def layer(input_tensor): conv_name, bn_name, relu_name, up_name = handle_block_names(stage) x = UpSampling2D(size=upsample_rate, name=up_name)(input_tensor) if skip is not None: x = Concatenate()([x, skip]) x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm, conv_name=conv_name + '1', bn_name=bn_name + '1', relu_name=relu_name + '1')(x) x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm, conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x) return x return layer
Example #12
Source Project: SpaceNet_Off_Nadir_Solutions Author: SpaceNetChallenge File: blocks.py License: Apache License 2.0 | 6 votes |
def Conv2DUpsample(filters, upsample_rate, kernel_size=(3,3), up_name='up', conv_name='conv', **kwargs): def layer(input_tensor): x = UpSampling2D(upsample_rate, name=up_name)(input_tensor) x = Conv2D(filters, kernel_size, padding='same', name=conv_name, **kwargs)(x) return x return layer
Example #13
Source Project: dfc2019 Author: pubgeo File: blocks.py License: MIT License | 6 votes |
def Upsample2D_block(filters, stage, kernel_size=(3,3), upsample_rate=(2,2), use_batchnorm=False, skip=None): def layer(input_tensor): conv_name, bn_name, relu_name, up_name = handle_block_names(stage) x = UpSampling2D(size=upsample_rate, name=up_name)(input_tensor) if skip is not None: x = Concatenate()([x, skip]) x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm, conv_name=conv_name + '1', bn_name=bn_name + '1', relu_name=relu_name + '1')(x) x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm, conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x) return x return layer
Example #14
Source Project: dfc2019 Author: pubgeo File: blocks.py License: MIT License | 6 votes |
def Upsample2D_block(filters, stage, kernel_size=(3,3), upsample_rate=(2,2), use_batchnorm=False, skip=None): def layer(input_tensor): conv_name, bn_name, relu_name, up_name = handle_block_names(stage) x = UpSampling2D(size=upsample_rate, name=up_name)(input_tensor) if skip is not None: x = Concatenate()([x, skip]) x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm, conv_name=conv_name + '1', bn_name=bn_name + '1', relu_name=relu_name + '1')(x) x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm, conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x) return x return layer
Example #15
Source Project: dfc2019 Author: pubgeo File: blocks.py License: MIT License | 6 votes |
def Conv2DUpsample(filters, upsample_rate, kernel_size=(3,3), up_name='up', conv_name='conv', **kwargs): def layer(input_tensor): x = UpSampling2D(upsample_rate, name=up_name)(input_tensor) x = Conv2D(filters, kernel_size, padding='same', name=conv_name, **kwargs)(x) return x return layer
Example #16
Source Project: dsb2018_topcoders Author: selimsef File: unets.py License: MIT License | 6 votes |
def inception_resnet_v2_fpn(input_shape, channels=1, activation="sigmoid"): inceresv2 = InceptionResNetV2Same(input_shape=input_shape, include_top=False) conv1, conv2, conv3, conv4, conv5 = inceresv2.output P1, P2, P3, P4, P5 = create_pyramid_features(conv1, conv2, conv3, conv4, conv5) x = concatenate( [ prediction_fpn_block(P5, "P5", (8, 8)), prediction_fpn_block(P4, "P4", (4, 4)), prediction_fpn_block(P3, "P3", (2, 2)), prediction_fpn_block(P2, "P2"), ] ) x = conv_bn_relu(x, 256, 3, (1, 1), name="aggregation") x = decoder_block_no_bn(x, 128, conv1, 'up4') x = UpSampling2D()(x) x = conv_relu(x, 64, 3, (1, 1), name="up5_conv1") x = conv_relu(x, 64, 3, (1, 1), name="up5_conv2") if activation == 'softmax': name = 'mask_softmax' x = Conv2D(channels, (1, 1), activation=activation, name=name)(x) else: x = Conv2D(channels, (1, 1), activation=activation, name="mask")(x) model = Model(inceresv2.input, x) return model
Example #17
Source Project: cyclegan_keras Author: alecGraves File: models.py License: The Unlicense | 6 votes |
def mnist_generator(input_shape=(28, 28, 1), scale=1/4): x0 = Input(input_shape) x = Conv2D(int(128*scale), (3, 3), strides=(2, 2), padding='same')(x0) x = InstanceNormalization()(x) x = LeakyReLU()(x) x = Conv2D(int(64*scale), (3, 3), strides=(2, 2), padding='same')(x) x = InstanceNormalization()(x) x = LeakyReLU()(x) x = residual_block(x, scale, num_id=2) x = residual_block(x, scale*2, num_id=3) x = UpSampling2D(size=(2, 2))(x) x = Conv2D(int(1024*scale), (1, 1))(x) x = InstanceNormalization()(x) x = LeakyReLU()(x) x = UpSampling2D(size=(2, 2))(x) x = Conv2D(1, (1, 1), activation='sigmoid')(x) return Model(x0, x)
Example #18
Source Project: Unified-Gesture-and-Fingertip-Detection Author: MahmudulAlam File: network.py License: MIT License | 6 votes |
def model(): model = VGG16(include_top=False, input_shape=(128, 128, 3)) x = model.output y = x x = Flatten()(x) x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) probability = Dense(5, activation='sigmoid', name='probabilistic_output')(x) y = UpSampling2D((3, 3))(y) y = Activation('relu')(y) y = Conv2D(1, (3, 3), activation='linear')(y) position = Reshape(target_shape=(10, 10), name='positional_output')(y) model = Model(input=model.input, outputs=[probability, position]) return model
Example #19
Source Project: Face-skin-hair-segmentaiton-and-skin-color-evaluation Author: JACKYLUO1991 File: lednet.py License: Apache License 2.0 | 5 votes |
def apn_module(self, x): def right(x): x = layers.AveragePooling2D()(x) x = layers.Conv2D(self.classes, kernel_size=1, padding='same')(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) x = layers.UpSampling2D(interpolation='bilinear')(x) return x def conv(x, filters, kernel_size, stride): x = layers.Conv2D(filters, kernel_size=kernel_size, strides=(stride, stride), padding='same')(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) return x x_7 = conv(x, int(x.shape[-1]), 7, stride=2) x_5 = conv(x_7, int(x.shape[-1]), 5, stride=2) x_3 = conv(x_5, int(x.shape[-1]), 3, stride=2) x_3_1 = conv(x_3, self.classes, 3, stride=1) x_3_1_up = layers.UpSampling2D(interpolation='bilinear')(x_3_1) x_5_1 = conv(x_5, self.classes, 5, stride=1) x_3_5 = layers.add([x_5_1, x_3_1_up]) x_3_5_up = layers.UpSampling2D(interpolation='bilinear')(x_3_5) x_7_1 = conv(x_7, self.classes, 3, stride=1) x_3_5_7 = layers.add([x_7_1, x_3_5_up]) x_3_5_7_up = layers.UpSampling2D(interpolation='bilinear')(x_3_5_7) x_middle = conv(x, self.classes, 1, stride=1) x_middle = layers.multiply([x_3_5_7_up, x_middle]) x_right = right(x) x_middle = layers.add([x_middle, x_right]) return x_middle
Example #20
Source Project: Face-skin-hair-segmentaiton-and-skin-color-evaluation Author: JACKYLUO1991 File: lednet.py License: Apache License 2.0 | 5 votes |
def decoder(self, x): x = self.apn_module(x) x = layers.UpSampling2D(size=8, interpolation='bilinear')(x) x = layers.Conv2D(self.classes, kernel_size=3, padding='same')(x) x = layers.BatchNormalization()(x) x = layers.Activation('softmax')(x) return x
Example #21
Source Project: ai-platform Author: produvia File: yolov3_weights_to_keras.py License: MIT License | 5 votes |
def upSampling(x, skip_36, skip_61, layer_idx, num_classes=80): out_filters = 3*(num_classes+5) yolo_83 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx}, {'filter': out_filters, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': layer_idx+1}], skip=False) x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+4}],\ skip = False) x = UpSampling2D(2)(x) x = concatenate([x, skip_61]) x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+7}, {'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+8}, {'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+9}, {'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+10}, {'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+11}], skip=False) # Layer 92 => 94 yolo_95 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, \ 'layer_idx': layer_idx+12}, {'filter': out_filters, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': layer_idx+13}],\ skip=False) # Layer 95 => 98 x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+16}],\ skip=False) x = UpSampling2D(2)(x) x = concatenate([x, skip_36]) # Layer 99 => 106 yolo_107 = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, \ 'layer_idx':layer_idx+19}, {'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+20}, {'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+21}, {'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+22}, {'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+23}, {'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+24}, {'filter': out_filters, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': layer_idx+25}],\ skip=False) return yolo_83, yolo_95, yolo_107 #The midblock is where the spatial pyramid pooling as well as the FC block with change for the YOLOv3-SPP model are reflected
Example #22
Source Project: DigiEncoder Author: akshaybahadur21 File: Coder.py License: MIT License | 5 votes |
def decoder(self, encoded): decoded = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded) decoded = UpSampling2D((2, 2))(decoded) decoded = Conv2D(8, (3, 3), activation='relu', padding='same')(decoded) decoded = UpSampling2D((2, 2))(decoded) decoded = Conv2D(16, (3, 3), activation='relu')(decoded) decoded = UpSampling2D((2, 2))(decoded) decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(decoded) return decoded
Example #23
Source Project: ImageAI Author: OlafenwaMoses File: models.py License: MIT License | 5 votes |
def tiny_yolo_main(input, num_anchors, num_classes): network_1 = NetworkConv2D_BN_Leaky(input=input, channels=16, kernel_size=(3,3) ) network_1 = MaxPool2D(pool_size=(2,2), strides=(2,2), padding="same")(network_1) network_1 = NetworkConv2D_BN_Leaky(input=network_1, channels=32, kernel_size=(3, 3)) network_1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding="same")(network_1) network_1 = NetworkConv2D_BN_Leaky(input=network_1, channels=64, kernel_size=(3, 3)) network_1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding="same")(network_1) network_1 = NetworkConv2D_BN_Leaky(input=network_1, channels=128, kernel_size=(3, 3)) network_1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding="same")(network_1) network_1 = NetworkConv2D_BN_Leaky(input=network_1, channels=256, kernel_size=(3, 3)) network_2 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding="same")(network_1) network_2 = NetworkConv2D_BN_Leaky(input=network_2, channels=512, kernel_size=(3, 3)) network_2 = MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding="same")(network_2) network_2 = NetworkConv2D_BN_Leaky(input=network_2, channels=1024, kernel_size=(3, 3)) network_2 = NetworkConv2D_BN_Leaky(input=network_2, channels=256, kernel_size=(1, 1)) network_3 = NetworkConv2D_BN_Leaky(input=network_2, channels=512, kernel_size=(3, 3)) network_3 = Conv2D(num_anchors * (num_classes + 5), kernel_size=(1,1))(network_3) network_2 = NetworkConv2D_BN_Leaky(input=network_2, channels=128, kernel_size=(1, 1)) network_2 = UpSampling2D(2)(network_2) network_4 = Concatenate()([network_2, network_1]) network_4 = NetworkConv2D_BN_Leaky(input=network_4, channels=256, kernel_size=(3, 3)) network_4 = Conv2D(num_anchors * (num_classes + 5), kernel_size=(1,1))(network_4) return Model(input, [network_3, network_4])
Example #24
Source Project: keras-centernet Author: see-- File: hourglass.py License: MIT License | 5 votes |
def connect_left_right(left, right, num_channels, num_channels_next, name): # left: 2 residual modules left = residual(left, num_channels_next, name=name + 'skip.0') left = residual(left, num_channels_next, name=name + 'skip.1') # up: 2 times residual & nearest neighbour out = residual(right, num_channels, name=name + 'out.0') out = residual(out, num_channels_next, name=name + 'out.1') out = UpSampling2D(name=name + 'out.upsampleNN')(out) out = Add(name=name + 'out.add')([left, out]) return out
Example #25
Source Project: MMdnn Author: microsoft File: keras2_emitter.py License: MIT License | 5 votes |
def emit_UpSampling2D(self, IR_node, in_scope=False): code = "{:<15} = layers.UpSampling2D(name='{}', size= ({}), data_format = 'channels_last')({})".format( IR_node.variable_name, IR_node.name, IR_node.get_attr('scales'), self.parent_variable_name(IR_node)) return code
Example #26
Source Project: CycleGAN-Keras Author: simontomaskarlsson File: model.py License: GNU General Public License v3.0 | 5 votes |
def uk(self, x, k): # (up sampling followed by 1x1 convolution <=> fractional-strided 1/2) if self.use_resize_convolution: x = UpSampling2D(size=(2, 2))(x) # Nearest neighbor upsampling x = ReflectionPadding2D((1, 1))(x) x = Conv2D(filters=k, kernel_size=3, strides=1, padding='valid')(x) else: x = Conv2DTranspose(filters=k, kernel_size=3, strides=2, padding='same')(x) # this matches fractinoally stided with stride 1/2 x = self.normalization(axis=3, center=True, epsilon=1e-5)(x, training=True) x = Activation('relu')(x) return x #=============================================================================== # Models
Example #27
Source Project: coremltools Author: apple File: test_keras.py License: BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_upsample(self): """ Test the conversion of 2D convolutional layer + upsample """ from keras.layers import Convolution2D, UpSampling2D # Create a simple Keras model model = Sequential() model.add( Convolution2D(input_shape=(64, 64, 3), nb_filter=32, nb_row=5, nb_col=5) ) model.add(UpSampling2D(size=(2, 2))) input_names = ["input"] output_names = ["output"] spec = keras.convert(model, input_names, output_names).get_spec() self.assertIsNotNone(spec) # Test the model class self.assertIsNotNone(spec.description) self.assertTrue(spec.HasField("neuralNetwork")) # Test the inputs and outputs self.assertEquals(len(spec.description.input), len(input_names)) six.assertCountEqual( self, input_names, [x.name for x in spec.description.input] ) self.assertEquals(len(spec.description.output), len(output_names)) six.assertCountEqual( self, output_names, [x.name for x in spec.description.output] ) # Test the layer parameters. layers = spec.neuralNetwork.layers layer_0 = layers[0] self.assertIsNotNone(layer_0.convolution) layer_1 = layers[1] self.assertIsNotNone(layer_1.upsample)
Example #28
Source Project: keras_BEGAN Author: mokemokechicken File: models.py License: MIT License | 5 votes |
def convolution_image_for_decoding(x, filters, upsample=None, name=None, n_layer=2): for i in range(1, n_layer+1): x = Convolution2D(filters, (3, 3), activation="elu", padding="same", name="%s/Conv%d" % (name, i))(x) if upsample: x = UpSampling2D()(x) return x
Example #29
Source Project: Pseudo-Label-Keras Author: koshian2 File: mobilenet_pseudo_cifar.py License: MIT License | 5 votes |
def create_cnn(): net = MobileNet(input_shape=(128,128,3), weights=None, include_top=False) # upsampling(32->128) input = Input((32,32,3)) x = UpSampling2D(4)(input) x = net(x) x = GlobalAveragePooling2D()(x) x = Dense(10, activation="softmax")(x) model = Model(input, x) model.summary() return model
Example #30
Source Project: Pseudo-Label-Keras Author: koshian2 File: mobilenet_transfer_pseudo_cifar.py License: MIT License | 5 votes |
def create_cnn(): net = MobileNet(input_shape=(128,128,3), include_top=False) # conv_pw_6から訓練させる(41) for i in range(41): net.layers[i].trainable = False # upsampling(32->128) input = Input((32,32,3)) x = UpSampling2D(4)(input) x = net(x) x = GlobalAveragePooling2D()(x) x = Dense(10, activation="softmax")(x) model = Model(input, x) model.summary() return model