Python keras.backend.conv2d() Examples

The following are 30 code examples of keras.backend.conv2d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: tensorflow_backend.py    From keras-contrib with MIT License 6 votes vote down vote up
def _postprocess_conv2d_output(x, data_format):
    """Transpose and cast the output from conv2d if needed.

    # Arguments
        x: A tensor.
        data_format: string, `"channels_last"` or `"channels_first"`.

    # Returns
        A tensor.
    """

    if data_format == 'channels_first':
        x = tf.transpose(x, (0, 3, 1, 2))

    if K.floatx() == 'float64':
        x = tf.cast(x, 'float64')
    return x 
Example #2
Source File: binary_layers.py    From nn_playground with MIT License 6 votes vote down vote up
def call(self, inputs):
        binary_kernel = binarize(self.kernel, H=self.H) 
        outputs = K.conv2d(
            inputs,
            binary_kernel,
            strides=self.strides,
            padding=self.padding,
            data_format=self.data_format,
            dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #3
Source File: layers.py    From FC-AIDE-Keras with MIT License 6 votes vote down vote up
def call(self, inputs):
        if self.rank == 2:
            outputs = K.conv2d(
                inputs,
                self.kernel*self.mask, ### add mask multiplication
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #4
Source File: model.py    From 2018DSB with MIT License 6 votes vote down vote up
def inst_weight(output_y, output_x, output_dr, output_dl, config=None):
    dy = output_y[:,2:,2:]-output_y[:, :-2,2:] + \
         2*(output_y[:,2:,1:-1]- output_y[:,:-2,1:-1]) + \
         output_y[:,2:,:-2]-output_y[:,:-2,:-2]
    dx = output_x[:,2:,2:]- output_x[:,2:,:-2] + \
         2*( output_x[:,1:-1,2:]- output_x[:,1:-1,:-2]) +\
         output_x[:,:-2,2:]- output_x[:,:-2,:-2]
    ddr=  (output_dr[:,2:,2:]-output_dr[:,:-2,:-2] +\
           output_dr[:,1:-1,2:]-output_dr[:,:-2,1:-1]+\
           output_dr[:,2:,1:-1]-output_dr[:,1:-1,:-2])*K.constant(2)
    ddl=  (output_dl[:,2:,:-2]-output_dl[:,:-2,2:] +\
           output_dl[:,2:,1:-1]-output_dl[:,1:-1,2:]+\
           output_dl[:,1:-1,:-2]-output_dl[:,:-2,1:-1])*K.constant(2)
    dpred = K.concatenate([dy,dx,ddr,ddl],axis=-1)
    dpred = K.spatial_2d_padding(dpred)
    weight_fg = K.cast(K.all(dpred>K.constant(config.GRADIENT_THRES), axis=3, 
                          keepdims=True), K.floatx())
    
    weight = K.clip(K.sqrt(weight_fg*K.prod(dpred, axis=3, keepdims=True)), 
                    config.WEIGHT_AREA/config.CLIP_AREA_HIGH, 
                    config.WEIGHT_AREA/config.CLIP_AREA_LOW)
    weight +=(1-weight_fg)*config.WEIGHT_AREA/config.BG_AREA
    weight = K.conv2d(weight, K.constant(config.GAUSSIAN_KERNEL),
                      padding='same')
    return K.stop_gradient(weight) 
Example #5
Source File: core.py    From enet-keras with MIT License 6 votes vote down vote up
def call(self, inputs, **kwargs):
        outputs = K.conv2d(
            inputs,
            self.kernel,
            strides=self.strides,
            padding=self.padding,
            data_format=self.data_format,
            dilation_rate=self.dilation_rate)
        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)
        outputs = BatchNormalization(momentum=self.momentum)(outputs)
        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #6
Source File: layers.py    From FC-AIDE-Keras with MIT License 6 votes vote down vote up
def call(self, inputs):
        if self.rank == 2:
            outputs = K.conv2d(
                inputs,
                self.kernel*self.mask, ### add mask multiplication
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #7
Source File: qrnn.py    From nn_playground with MIT License 6 votes vote down vote up
def preprocess_input(self, inputs, training=None):
        if self.window_size > 1:
            inputs = K.temporal_padding(inputs, (self.window_size-1, 0))
        inputs = K.expand_dims(inputs, 2)  # add a dummy dimension

        output = K.conv2d(inputs, self.kernel, strides=self.strides,
                          padding='valid',
                          data_format='channels_last')
        output = K.squeeze(output, 2)  # remove the dummy dimension
        if self.use_bias:
            output = K.bias_add(output, self.bias, data_format='channels_last')

        if self.dropout is not None and 0. < self.dropout < 1.:
            z = output[:, :, :self.units]
            f = output[:, :, self.units:2 * self.units]
            o = output[:, :, 2 * self.units:]
            f = K.in_train_phase(1 - _dropout(1 - f, self.dropout), f, training=training)
            return K.concatenate([z, f, o], -1)
        else:
            return output 
Example #8
Source File: layers.py    From FC-AIDE-Keras with MIT License 6 votes vote down vote up
def call(self, inputs):
        if self.rank == 2:
            outputs = K.conv2d(
                inputs,
                self.kernel*self.mask, ### add mask multiplication
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #9
Source File: tensorflow_backend.py    From keras-contrib with MIT License 6 votes vote down vote up
def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format='channels_first',
           image_shape=None, filter_shape=None):
    """2D convolution.

    # Arguments
        x: Input tensor
        kernel: kernel tensor.
        strides: strides tuple.
        padding: string, "same" or "valid".
        data_format: 'channels_first' or 'channels_last'.
            Whether to use Theano or TensorFlow dimension
            ordering in inputs/kernels/ouputs.
        image_shape: Optional, the input tensor shape
        filter_shape: Optional, the kernel shape.

    # Returns
        x convolved with the kernel.

    # Raises
        Exception: In case of invalid border mode or data format.
    """
    return K.conv2d(x, kernel, strides, padding, data_format) 
Example #10
Source File: tensorflow_backend.py    From keras-contrib with MIT License 6 votes vote down vote up
def _preprocess_conv2d_input(x, data_format):
    """Transpose and cast the input before the conv2d.

    # Arguments
        x: input tensor.
        data_format: string, `"channels_last"` or `"channels_first"`.

    # Returns
        A tensor.
    """
    if K.dtype(x) == 'float64':
        x = tf.cast(x, 'float32')
    if data_format == 'channels_first':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        x = tf.transpose(x, (0, 2, 3, 1))
    return x 
Example #11
Source File: qrnn.py    From embedding-as-service with MIT License 6 votes vote down vote up
def preprocess_input(self, inputs, training=None):
        if self.window_size > 1:
            inputs = K.temporal_padding(inputs, (self.window_size - 1, 0))
        inputs = K.expand_dims(inputs, 2)  # add a dummy dimension

        output = K.conv2d(inputs, self.kernel, strides=self.strides,
                          padding='valid',
                          data_format='channels_last')
        output = K.squeeze(output, 2)  # remove the dummy dimension
        if self.use_bias:
            output = K.bias_add(output, self.bias, data_format='channels_last')

        if self.dropout is not None and 0. < self.dropout < 1.:
            z = output[:, :, :self.units]
            f = output[:, :, self.units:2 * self.units]
            o = output[:, :, 2 * self.units:]
            f = K.in_train_phase(1 - _dropout(1 - f, self.dropout), f, training=training)
            return K.concatenate([z, f, o], -1)
        else:
            return output 
Example #12
Source File: binary_layers.py    From nn_playground with MIT License 6 votes vote down vote up
def call(self, inputs):
        binary_kernel = binarize(self.kernel, H=self.H) 
        outputs = K.conv2d(
            inputs,
            binary_kernel,
            strides=self.strides,
            padding=self.padding,
            data_format=self.data_format,
            dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #13
Source File: ternary_layers.py    From nn_playground with MIT License 6 votes vote down vote up
def call(self, inputs):
        ternary_kernel = ternarize(self.kernel, H=self.H) 
        outputs = K.conv2d(
            inputs,
            ternary_kernel,
            strides=self.strides,
            padding=self.padding,
            data_format=self.data_format,
            dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #14
Source File: capslayers.py    From deepcaps with MIT License 6 votes vote down vote up
def call(self, inputs):
        if self.r_num == 1:
            # if there is no routing (and this is so when r_num is 1 and all c are equal)
            # then this is a common convolution
            outputs = K.conv2d(K.reshape(inputs, (-1, self.h_i, self.w_i,
                                                  self.ch_i * self.n_i)),
                               K.reshape(self.w, self.kernel_size +
                                         (self.ch_i * self.n_i, self.ch_j * self.n_j)),
                               data_format='channels_last',
                               strides=self.strides,
                               padding=self.padding,
                               dilation_rate=self.dilation_rate)

            outputs = squeeze(K.reshape(outputs, ((-1, self.h_j, self.w_j,
                                                   self.ch_j, self.n_j))))

        return outputs 
Example #15
Source File: layers.py    From FC-AIDE-Keras with MIT License 6 votes vote down vote up
def call(self, inputs):
        if self.rank == 2:
            outputs = K.conv2d(
                inputs,
                self.kernel*self.mask, ### add mask multiplication
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #16
Source File: layers.py    From FC-AIDE-Keras with MIT License 6 votes vote down vote up
def call(self, inputs):
        if self.rank == 2:
            outputs = K.conv2d(
                inputs,
                self.kernel*self.mask, ### add mask multiplication
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #17
Source File: quantized_layers.py    From QuantizedNeuralNetworks-Keras-Tensorflow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def call(self, inputs):
        quantized_kernel = quantize(self.kernel, nb=self.nb)

        inverse_kernel_lr_multiplier = 1./self.kernel_lr_multiplier
        inputs_qnn_gradient = (inputs - (1. - 1./inverse_kernel_lr_multiplier) * K.stop_gradient(inputs))\
                  * inverse_kernel_lr_multiplier

        outputs_qnn_gradient = K.conv2d(
            inputs_qnn_gradient,
            quantized_kernel,
            strides=self.strides,
            padding=self.padding,
            data_format=self.data_format,
            dilation_rate=self.dilation_rate)

        outputs = (outputs_qnn_gradient - (1. - 1./self.kernel_lr_multiplier) * K.stop_gradient(outputs_qnn_gradient))\
                  * self.kernel_lr_multiplier


        #outputs = outputs*K.mean(K.abs(self.kernel))

        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs 
Example #18
Source File: attention.py    From Coloring-greyscale-images with MIT License 5 votes vote down vote up
def call(self, x):
        def hw_flatten(x):
            return K.reshape(x, shape=[K.shape(x)[0], K.shape(x)[1]*K.shape(x)[2], K.shape(x)[-1]])

        f = K.conv2d(x,
                     kernel=self.kernel_f,
                     strides=(1, 1), padding='same')  # [bs, h, w, c']
        f = K.bias_add(f, self.bias_f)
        g = K.conv2d(x,
                     kernel=self.kernel_g,
                     strides=(1, 1), padding='same')  # [bs, h, w, c']
        g = K.bias_add(g, self.bias_g)
        h = K.conv2d(x,
                     kernel=self.kernel_h,
                     strides=(1, 1), padding='same')  # [bs, h, w, c]
        h = K.bias_add(h, self.bias_h)

        s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True)  # # [bs, N, N]

        beta = K.softmax(s, axis=-1)  # attention map

        o = K.batch_dot(beta, hw_flatten(h))  # [bs, N, C]

        o = K.reshape(o, shape=K.shape(x))  # [bs, h, w, C]
        x = self.gamma * o + x

        return x 
Example #19
Source File: binary_layers.py    From QuantizedNeuralNetworks-Keras-Tensorflow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def call(self, inputs):
        binary_kernel = binarize(self.kernel, H=self.H)

        inverse_kernel_lr_multiplier = 1./self.kernel_lr_multiplier
        inputs_bnn_gradient = (inputs - (1. - 1./inverse_kernel_lr_multiplier) * K.stop_gradient(inputs))\
                  * inverse_kernel_lr_multiplier

        outputs_bnn_gradient = K.conv2d(
            inputs_bnn_gradient,
            binary_kernel,
            strides=self.strides,
            padding=self.padding,
            data_format=self.data_format,
            dilation_rate=self.dilation_rate)

        outputs = (outputs_bnn_gradient - (1. - 1./self.kernel_lr_multiplier) * K.stop_gradient(outputs_bnn_gradient))\
                  * self.kernel_lr_multiplier


        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #20
Source File: CoarseNet_utils.py    From MinutiaeNet with MIT License 5 votes vote down vote up
def ori_highest_peak(y_pred, length=180):
    glabel = gausslabel(length=length,stride=2).astype(np.float32)
    y_pred = tf.convert_to_tensor(y_pred, np.float32)
    ori_gau = K.conv2d(y_pred,glabel,padding='same')
    return ori_gau 
Example #21
Source File: CoarseNet_utils.py    From MinutiaeNet with MIT License 5 votes vote down vote up
def orientation(image, stride=8, window=17):
    with K.tf.name_scope('orientation'):
        assert image.get_shape().as_list()[3] == 1, 'Images must be grayscale'
        strides = [1, stride, stride, 1]
        E = np.ones([window, window, 1, 1])
        sobelx = np.reshape(np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=float), [3, 3, 1, 1])
        sobely = np.reshape(np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype=float), [3, 3, 1, 1])
        gaussian = np.reshape(gaussian2d((5, 5), 1), [5, 5, 1, 1])
        with K.tf.name_scope('sobel_gradient'):
            Ix = K.tf.nn.conv2d(image, sobelx, strides=[1,1,1,1], padding='SAME', name='sobel_x')
            Iy = K.tf.nn.conv2d(image, sobely, strides=[1,1,1,1], padding='SAME', name='sobel_y')
        with K.tf.name_scope('eltwise_1'):
            Ix2 = K.tf.multiply(Ix, Ix, name='IxIx')
            Iy2 = K.tf.multiply(Iy, Iy, name='IyIy')
            Ixy = K.tf.multiply(Ix, Iy, name='IxIy')
        with K.tf.name_scope('range_sum'):
            Gxx = K.tf.nn.conv2d(Ix2, E, strides=strides, padding='SAME', name='Gxx_sum')
            Gyy = K.tf.nn.conv2d(Iy2, E, strides=strides, padding='SAME', name='Gyy_sum')
            Gxy = K.tf.nn.conv2d(Ixy, E, strides=strides, padding='SAME', name='Gxy_sum')
        with K.tf.name_scope('eltwise_2'):
            Gxx_Gyy = K.tf.subtract(Gxx, Gyy, name='Gxx_Gyy')
            theta = atan2([2*Gxy, Gxx_Gyy]) + np.pi
        # two-dimensional low-pass filter: Gaussian filter here
        with K.tf.name_scope('gaussian_filter'):
            phi_x = K.tf.nn.conv2d(K.tf.cos(theta), gaussian, strides=[1,1,1,1], padding='SAME', name='gaussian_x')
            phi_y = K.tf.nn.conv2d(K.tf.sin(theta), gaussian, strides=[1,1,1,1], padding='SAME', name='gaussian_y')
            theta = atan2([phi_y, phi_x])/2
    return theta 
Example #22
Source File: LossFunctions.py    From MinutiaeNet with MIT License 5 votes vote down vote up
def orientation_loss(y_true, y_pred, lamb=1.):
    # clip
    y_pred = K.tf.clip_by_value(y_pred, K.epsilon(), 1 - K.epsilon())
    # get ROI
    label_seg = K.sum(y_true, axis=-1, keepdims=True)
    label_seg = K.tf.cast(K.tf.greater(label_seg, 0), K.tf.float32)
    # weighted cross entropy loss
    lamb_pos, lamb_neg = 1., 1.
    logloss = lamb_pos*y_true*K.log(y_pred)+lamb_neg*(1-y_true)*K.log(1-y_pred)
    logloss = logloss*label_seg # apply ROI
    logloss = -K.sum(logloss) / (K.sum(label_seg) + K.epsilon())

    # coherence loss, nearby ori should be as near as possible
    # Oritentation coherence loss

    # 3x3 ones kernel
    mean_kernal = np.reshape(np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.float32)/8, [3, 3, 1, 1])

    sin2angle_ori, cos2angle_ori, modulus_ori = ori2angle(y_pred)
    sin2angle = K.conv2d(sin2angle_ori, mean_kernal, padding='same')
    cos2angle = K.conv2d(cos2angle_ori, mean_kernal, padding='same')
    modulus = K.conv2d(modulus_ori, mean_kernal, padding='same')

    coherence = K.sqrt(K.square(sin2angle) + K.square(cos2angle)) / (modulus + K.epsilon())
    coherenceloss = K.sum(label_seg) / (K.sum(coherence*label_seg) + K.epsilon()) - 1
    loss = logloss + lamb*coherenceloss
    return loss 
Example #23
Source File: LossFunctions.py    From MinutiaeNet with MIT License 5 votes vote down vote up
def segmentation_loss(y_true, y_pred, lamb=1.):
    # clip
    y_pred = K.tf.clip_by_value(y_pred, K.epsilon(), 1 - K.epsilon())
    # weighted cross entropy loss
    total_elements = K.sum(K.tf.ones_like(y_true))
    label_pos = K.tf.cast(K.tf.greater(y_true, 0.0), K.tf.float32)
    lamb_pos = 0.5 * total_elements / K.sum(label_pos)
    lamb_neg = 1 / (2 - 1/lamb_pos)
    logloss = lamb_pos*y_true*K.log(y_pred)+lamb_neg*(1-y_true)*K.log(1-y_pred)
    logloss = -K.mean(K.sum(logloss, axis=-1))
    # smooth loss
    smooth_kernal = np.reshape(np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype=np.float32)/8, [3, 3, 1, 1])
    smoothloss = K.mean(K.abs(K.conv2d(y_pred, smooth_kernal)))
    loss = logloss + lamb*smoothloss
    return loss 
Example #24
Source File: qrnn.py    From qrnn with MIT License 5 votes vote down vote up
def preprocess_input(self, x):
        if self.bias:
            weights = zip(self.trainable_weights[0:3], self.trainable_weights[3:])
        else:
            weights = self.trainable_weights

        if self.window_size > 1:
            x = K.asymmetric_temporal_padding(x, self.window_size-1, 0)
        x = K.expand_dims(x, 2)  # add a dummy dimension

        # z, f, o
        outputs = []
        for param in weights:
            if self.bias:
               W, b = param
            else:
               W = param
            output = K.conv2d(x, W, strides=self.subsample,
                              border_mode='valid',
                              dim_ordering='tf')
            output = K.squeeze(output, 2)  # remove the dummy dimension
            if self.bias:
                output += K.reshape(b, (1, 1, self.output_dim))

            outputs.append(output)

        if self.dropout is not None and 0. < self.dropout < 1.:
            f = K.sigmoid(outputs[1])
            outputs[1] = K.in_train_phase(1 - _dropout(1 - f, self.dropout), f)

        return K.concatenate(outputs, 2) 
Example #25
Source File: capsule_layers.py    From SegCaps with Apache License 2.0 5 votes vote down vote up
def call(self, input_tensor, training=None):

        input_transposed = tf.transpose(input_tensor, [3, 0, 1, 2, 4])
        input_shape = K.shape(input_transposed)
        input_tensor_reshaped = K.reshape(input_transposed, [
            input_shape[0] * input_shape[1], self.input_height, self.input_width, self.input_num_atoms])
        input_tensor_reshaped.set_shape((None, self.input_height, self.input_width, self.input_num_atoms))

        conv = K.conv2d(input_tensor_reshaped, self.W, (self.strides, self.strides),
                        padding=self.padding, data_format='channels_last')

        votes_shape = K.shape(conv)
        _, conv_height, conv_width, _ = conv.get_shape()

        votes = K.reshape(conv, [input_shape[1], input_shape[0], votes_shape[1], votes_shape[2],
                                 self.num_capsule, self.num_atoms])
        votes.set_shape((None, self.input_num_capsule, conv_height.value, conv_width.value,
                         self.num_capsule, self.num_atoms))

        logit_shape = K.stack([
            input_shape[1], input_shape[0], votes_shape[1], votes_shape[2], self.num_capsule])
        biases_replicated = K.tile(self.b, [conv_height.value, conv_width.value, 1, 1])

        activations = update_routing(
            votes=votes,
            biases=biases_replicated,
            logit_shape=logit_shape,
            num_dims=6,
            input_dim=self.input_num_capsule,
            output_dim=self.num_capsule,
            num_routing=self.routings)

        return activations 
Example #26
Source File: gram.py    From subjective-functions with MIT License 5 votes vote down vote up
def reduce_layer(a=0.4, padding_mode='valid'):
    # A 5-tap Gaussian pyramid generating kernel from Burt & Adelson 1983.
    kernel_1d = [0.25 - a/2, 0.25, a, 0.25, 0.25 - a/2]
    
    #kernel_2d = np.outer(kernel_1d, kernel_1d)
    
    # This doesn't seem very computationally bright; but there you have it.
    #kernel_4d = np.zeros((5, 5, 3, 3), 'float32')
    #kernel_4d[:,:,0,0] = kernel_2d
    #kernel_4d[:,:,1,1] = kernel_2d
    #kernel_4d[:,:,2,2] = kernel_2d
    kernel_3d = np.zeros((5, 1, 3, 3), 'float32')
    kernel_3d[:, 0, 0, 0] = kernel_1d
    kernel_3d[:, 0, 1, 1] = kernel_1d
    kernel_3d[:, 0, 2, 2] = kernel_1d

    def fn(x):
        return K.conv2d(K.conv2d(x, kernel_3d, strides=(2,1)),
                K.permute_dimensions(kernel_3d, (1, 0, 2, 3)),
                strides = (1, 2))
    
    def shape(input_shape):
        assert len(input_shape) == 4
        assert K.image_data_format() == 'channels_last'
        space = input_shape[1:-1]
        new_space = []
        for i, dim in enumerate(space):
            new_dim = conv_utils.conv_output_length(
                dim,
                5,
                padding=padding_mode,
                stride=2)
            new_space.append(new_dim)
        return (input_shape[0],) + tuple(new_space) + (input_shape[3],)
    
    return Lambda(fn, shape) 
Example #27
Source File: weight_norm_layers.py    From nn_playground with MIT License 5 votes vote down vote up
def call(self, x):
        kernel = self.kernel * self.g / K.sqrt(K.sum(K.square(self.kernel), axis=[0, 1, 2], keepdims=True))
        output = K.conv2d(x, kernel, strides=self.strides,
                          padding=self.padding,
                          data_format=self.data_format)
        if self.use_bias:
            output = K.bias_add(output, self.bias, data_format=self.data_format)
        if self.activation is not None:
            output = self.activation(output)
        return output 
Example #28
Source File: attn_utils.py    From Music-Transcription-with-Semantic-Segmentation with GNU General Public License v3.0 5 votes vote down vote up
def gather_indices_2d(x, block_shape, block_stride):
    kernel = K.eye(block_shape[0]*block_shape[1])
    #kernel = K.reshape(kernel, [block_shape[0], block_shape[1], 1, block_shape[0]*block_shape[1]])
    kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1])

    x_shape = K.shape(x)
    indices = K.arange(x_shape[2]*x_shape[3])
    indices = K.reshape(indices, [1, x_shape[2], x_shape[3], 1])    
    indices = K.conv2d(tf.cast(indices, tf.float32), kernel, strides=(block_stride[0], block_stride[1]))

    i_shape = K.shape(indices)[:3]
    n_blocks = tf.reduce_prod(i_shape)
    indices = K.reshape(indices, [n_blocks, -1])
    return tf.cast(indices, tf.int32) 
Example #29
Source File: patches.py    From keras-rtst with MIT License 5 votes vote down vote up
def find_patch_matches(a, a_norm, b):
    '''For each patch in A, find the best matching patch in B'''
    # we want cross-correlation here so flip the kernels
    convs = K.conv2d(a, b[:, :, ::-1, ::-1], border_mode='valid')
    argmax = K.argmax(convs / a_norm, axis=1)
    return argmax 
Example #30
Source File: conv.py    From deep_complex_networks with MIT License 5 votes vote down vote up
def call(self, inputs):
        input_shape = K.shape(inputs)
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[channel_axis]
        ker_shape = self.kernel_size + (input_dim, self.filters)
        nb_kernels = ker_shape[-2] * ker_shape[-1]
        kernel_shape_4_norm = (np.prod(self.kernel_size), nb_kernels)
        reshaped_kernel = K.reshape(self.kernel, kernel_shape_4_norm)
        normalized_weight = K.l2_normalize(reshaped_kernel, axis=0, epsilon=self.epsilon)
        normalized_weight = K.reshape(self.gamma, (1, ker_shape[-2] * ker_shape[-1])) * normalized_weight
        shaped_kernel = K.reshape(normalized_weight, ker_shape)
        shaped_kernel._keras_shape = ker_shape
        
        convArgs = {"strides":       self.strides[0]       if self.rank == 1 else self.strides,
                    "padding":       self.padding,
                    "data_format":   self.data_format,
                    "dilation_rate": self.dilation_rate[0] if self.rank == 1 else self.dilation_rate}
        convFunc = {1: K.conv1d,
                    2: K.conv2d,
                    3: K.conv3d}[self.rank]
        output = convFunc(inputs, shaped_kernel, **convArgs)

        if self.use_bias:
            output = K.bias_add(
                output,
                self.bias,
                data_format=self.data_format
            )

        if self.activation is not None:
            output = self.activation(output)

        return output