Python theano.tensor.squeeze() Examples

The following are 25 code examples of theano.tensor.squeeze(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.tensor , or try the search function .
Example #1
Source File: theano_backend.py    From reading-text-in-the-wild with GNU General Public License v3.0 5 votes vote down vote up
def squeeze(x, axis):
    '''Remove a 1-dimension from the tensor at index "axis".
    '''
    x = T.addbroadcast(x, axis)
    return T.squeeze(x) 
Example #2
Source File: layers_lscnn.py    From ShapeNet with GNU General Public License v3.0 5 votes vote down vote up
def fwd_old(self, x, V, A, L):
    """
    x : signal
    V : eigenvectors
    A : area 
    L : eigenvalues
    """
    V = V[:,:self.K]
    L = L[:self.K]

    sampleLoc = (L.dimshuffle(0,'x') - self.evalSamples.dimshuffle('x',0)) / self.dEval
    basis = self.cubicBSpline(sampleLoc)
    basis = basis.dimshuffle('x',0,1)

    rho = T.sqrt(T.sum(A))

    # weight the basis columns for each input function to generate a ghat
    # Q x K, a window for each input function
    ghat = T.squeeze(T.batched_dot(
            T.tile(basis, [self.nin, 1, 1]), 
            self.beta)[:,:,0]) # crazy stuff here, why doesn't squeeze work?
    # Q x K x N
    V_ = T.tile(V.dimshuffle('x',1,0), [self.nin, 1, 1])
    # Q x K x N
    tmp = (ghat.dimshuffle(0,'x',1) * V).dimshuffle(0,2,1)
    # Q x N x N
    transl = rho * T.batched_dot(V_.dimshuffle(0,2,1), tmp)
    transl = A.dimshuffle('x',0,'x') * transl
    # Q x K x N
    tmp = (V.dimshuffle(0,'x',1) * x.dimshuffle(0,1,'x')).dimshuffle(1,2,0)
    # Q x K x N
    desc = rho * T.batched_dot(tmp, transl)
    desc = T.abs_(desc)
    
    desc = desc.dimshuffle(2,0,'x',1) # BC01 format : N x Q x 1 x K
    return self.activation(theano.tensor.nnet.conv.conv2d(desc, self.W).flatten(2) + self.b) 
Example #3
Source File: layers_lscnn.py    From ShapeNet with GNU General Public License v3.0 5 votes vote down vote up
def fwd(self, x, V, A, L):
    """
    x : signal
    V : eigenvectors
    A : area 
    L : eigenvalues
    """
    V = V[:,:self.K]
    L = L[:self.K]
    
    sampleLoc = (L.dimshuffle(0,'x') - self.evalSamples.dimshuffle('x',0)) / self.dEval
    basis = self.cubicBSpline(sampleLoc)
    basis = basis.dimshuffle('x',0,1)
  
    rho = T.sqrt(T.sum(A))
  
    def step(f, beta,   rho, A, V):
      ghat = T.dot(basis, beta.squeeze()).flatten()
      transl = rho * T.dot(V, ghat.dimshuffle(0,'x') * V.T)
      return rho * T.dot((V * f.dimshuffle(0,'x')).T, A.dimshuffle(0,'x') * transl)    # N x K
  
    desc, _ = theano.scan(fn=step, non_sequences=[rho,A,V], 
        sequences=[x.T,self.beta])
    desc = desc.dimshuffle(2,0,'x',1) # BC01 format : N x Q x 1 x K
    desc = T.abs_(desc)
    return self.activation(theano.tensor.nnet.conv.conv2d(desc, self.W).flatten(2) + self.b) 
Example #4
Source File: theano_backend.py    From keras-lambda with MIT License 5 votes vote down vote up
def squeeze(x, axis):
    """Remove a 1-dimension from the tensor at index "axis".
    """
    shape = list(x.shape)
    shape.pop(axis)
    y = T.reshape(x, tuple(shape))
    if hasattr(x, '_keras_shape'):
        kshape = list(x._keras_shape)
        kshape.pop(axis)
        y._keras_shape = tuple(kshape)
    return y 
Example #5
Source File: theano_backend.py    From KerasNeuralFingerprint with MIT License 5 votes vote down vote up
def squeeze(x, axis):
    '''Remove a 1-dimension from the tensor at index "axis".
    '''
    broadcastable = x.broadcastable[:axis] + x.broadcastable[axis+1:]
    x = T.patternbroadcast(x, [i == axis for i in range(x.type.ndim)])
    x = T.squeeze(x)
    x = T.patternbroadcast(x, broadcastable)
    return x 
Example #6
Source File: theano_backend.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def squeeze(x, axis):
    """Remove a 1-dimension from the tensor at index "axis".
    """
    shape = list(x.shape)
    shape.pop(axis)
    y = T.reshape(x, tuple(shape))
    if hasattr(x, '_keras_shape'):
        kshape = list(x._keras_shape)
        kshape.pop(axis)
        y._keras_shape = tuple(kshape)
    return y 
Example #7
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def squeeze(x, axis):
    """Remove a 1-dimension from the tensor at index "axis".
    """
    shape = list(x.shape)
    shape.pop(axis)
    y = T.reshape(x, tuple(shape))
    if hasattr(x, '_keras_shape'):
        kshape = list(x._keras_shape)
        kshape.pop(axis)
        y._keras_shape = tuple(kshape)
    return y 
Example #8
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def squeeze(x, axis):
    """Remove a 1-dimension from the tensor at index "axis".
    """
    shape = list(x.shape)
    shape.pop(axis)
    y = T.reshape(x, tuple(shape))
    if hasattr(x, '_keras_shape'):
        kshape = list(x._keras_shape)
        kshape.pop(axis)
        y._keras_shape = tuple(kshape)
    return y 
Example #9
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def squeeze(x, axis):
    """Remove a 1-dimension from the tensor at index "axis".
    """
    shape = list(x.shape)
    shape.pop(axis)
    y = T.reshape(x, tuple(shape))
    if hasattr(x, '_keras_shape'):
        kshape = list(x._keras_shape)
        kshape.pop(axis)
        y._keras_shape = tuple(kshape)
    return y 
Example #10
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def squeeze(x, axis):
    """Remove a 1-dimension from the tensor at index "axis".
    """
    shape = list(x.shape)
    shape.pop(axis)
    y = T.reshape(x, tuple(shape))
    if hasattr(x, '_keras_shape'):
        kshape = list(x._keras_shape)
        kshape.pop(axis)
        y._keras_shape = tuple(kshape)
    return y 
Example #11
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def squeeze(x, axis):
    """Remove a 1-dimension from the tensor at index "axis".
    """
    shape = list(x.shape)
    shape.pop(axis)
    y = T.reshape(x, tuple(shape))
    if hasattr(x, '_keras_shape'):
        kshape = list(x._keras_shape)
        kshape.pop(axis)
        y._keras_shape = tuple(kshape)
    return y 
Example #12
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def squeeze(x, axis):
    """Remove a 1-dimension from the tensor at index "axis".
    """
    shape = list(x.shape)
    shape.pop(axis)
    y = T.reshape(x, tuple(shape))
    if hasattr(x, '_keras_shape'):
        kshape = list(x._keras_shape)
        kshape.pop(axis)
        y._keras_shape = tuple(kshape)
    return y 
Example #13
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def squeeze(x, axis):
    """Remove a 1-dimension from the tensor at index "axis".
    """
    shape = list(x.shape)
    shape.pop(axis)
    y = T.reshape(x, tuple(shape))
    if hasattr(x, '_keras_shape'):
        kshape = list(x._keras_shape)
        kshape.pop(axis)
        y._keras_shape = tuple(kshape)
    return y 
Example #14
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def squeeze(x, axis):
    """Remove a 1-dimension from the tensor at index "axis".
    """
    shape = list(x.shape)
    shape.pop(axis)
    y = T.reshape(x, tuple(shape))
    if hasattr(x, '_keras_shape'):
        kshape = list(x._keras_shape)
        kshape.pop(axis)
        y._keras_shape = tuple(kshape)
    return y 
Example #15
Source File: theano_backend.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def squeeze(x, axis):
    """Remove a 1-dimension from the tensor at index "axis".
    """
    shape = list(x.shape)
    shape.pop(axis)
    y = T.reshape(x, tuple(shape))
    if hasattr(x, '_keras_shape'):
        kshape = list(x._keras_shape)
        kshape.pop(axis)
        y._keras_shape = tuple(kshape)
    return y 
Example #16
Source File: theano_backend.py    From Att-ChemdNER with Apache License 2.0 5 votes vote down vote up
def squeeze(x, axis):
    '''Remove a 1-dimension from the tensor at index "axis".
    '''
    # TODO: `keras_shape` inference.
    shape = list(x.shape)
    shape.pop(axis)
    return T.reshape(x, tuple(shape)) 
Example #17
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def conv1d(x, kernel, strides=1, padding='valid',
           data_format=None, dilation_rate=1):
    """1D convolution.

    # Arguments
        kernel: kernel tensor.
        strides: stride integer.
        padding: string, `"same"`, `"causal"` or `"valid"`.
        data_format: string, one of "channels_last", "channels_first"
        dilation_rate: integer.
    """
    if data_format is None:
        data_format = image_data_format()
    if data_format not in {'channels_first', 'channels_last'}:
        raise ValueError('Unknown data_format ', data_format)

    if hasattr(kernel, '_keras_shape'):
        kernel_shape = kernel._keras_shape
    else:
        kernel_shape = None
    if padding == 'causal':
        # causal (dilated) convolution:
        if not kernel_shape:
            raise AttributeError('Causal padding requires kernel._keras_shape set.')
        left_pad = dilation_rate * (kernel_shape[0] - 1)
        x = temporal_padding(x, (left_pad, 0))
        padding = 'valid'
    if hasattr(x, '_keras_shape'):
        shape = x._keras_shape
    else:
        shape = None
    if data_format == 'channels_last':
        # original shape: (batch, length, input_dim)
        # add dim to x to have (batch, length, 1, input_dim)
        x = expand_dims(x, 2)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], 1, shape[2])
    else:
        # original shape: (batch, input_dim, length)
        # add dim to x to have (batch, input_dim, length, 1)
        x = expand_dims(x, 3)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], shape[2], 1)
    # update dilation rate, strides
    dilation_rate = (dilation_rate, 1)
    strides = (strides, 1)
    # add dim to kernel (always same format independently of data_format)
    # i.e. (rows, 1, input_depth, depth)
    kernel = expand_dims(kernel, 1)
    output = conv2d(x, kernel,
                    strides=strides, padding=padding,
                    data_format=data_format, dilation_rate=dilation_rate)
    # remove added dim
    if data_format == 'channels_last':
        output = squeeze(output, 2)
    else:
        output = squeeze(output, 3)
    return output 
Example #18
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def conv1d(x, kernel, strides=1, padding='valid',
           data_format=None, dilation_rate=1):
    """1D convolution.

    # Arguments
        kernel: kernel tensor.
        strides: stride integer.
        padding: string, `"same"`, `"causal"` or `"valid"`.
        data_format: string, one of "channels_last", "channels_first"
        dilation_rate: integer.
    """
    if data_format is None:
        data_format = image_data_format()
    if data_format not in {'channels_first', 'channels_last'}:
        raise ValueError('Unknown data_format ', data_format)

    if hasattr(kernel, '_keras_shape'):
        kernel_shape = kernel._keras_shape
    else:
        kernel_shape = None
    if padding == 'causal':
        # causal (dilated) convolution:
        if not kernel_shape:
            raise AttributeError('Causal padding requires kernel._keras_shape set.')
        left_pad = dilation_rate * (kernel_shape[0] - 1)
        x = temporal_padding(x, (left_pad, 0))
        padding = 'valid'
    if hasattr(x, '_keras_shape'):
        shape = x._keras_shape
    else:
        shape = None
    if data_format == 'channels_last':
        # original shape: (batch, length, input_dim)
        # add dim to x to have (batch, length, 1, input_dim)
        x = expand_dims(x, 2)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], 1, shape[2])
    else:
        # original shape: (batch, input_dim, length)
        # add dim to x to have (batch, input_dim, length, 1)
        x = expand_dims(x, 3)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], shape[2], 1)
    # update dilation rate, strides
    dilation_rate = (dilation_rate, 1)
    strides = (strides, 1)
    # add dim to kernel (always same format independently of data_format)
    # i.e. (rows, 1, input_depth, depth)
    kernel = expand_dims(kernel, 1)
    output = conv2d(x, kernel,
                    strides=strides, padding=padding,
                    data_format=data_format, dilation_rate=dilation_rate)
    # remove added dim
    if data_format == 'channels_last':
        output = squeeze(output, 2)
    else:
        output = squeeze(output, 3)
    return output 
Example #19
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def conv1d(x, kernel, strides=1, padding='valid',
           data_format=None, dilation_rate=1):
    """1D convolution.

    # Arguments
        kernel: kernel tensor.
        strides: stride integer.
        padding: string, `"same"`, `"causal"` or `"valid"`.
        data_format: string, one of "channels_last", "channels_first"
        dilation_rate: integer.
    """
    if data_format is None:
        data_format = image_data_format()
    if data_format not in {'channels_first', 'channels_last'}:
        raise ValueError('Unknown data_format ', data_format)

    if hasattr(kernel, '_keras_shape'):
        kernel_shape = kernel._keras_shape
    else:
        kernel_shape = None
    if padding == 'causal':
        # causal (dilated) convolution:
        if not kernel_shape:
            raise AttributeError('Causal padding requires kernel._keras_shape set.')
        left_pad = dilation_rate * (kernel_shape[0] - 1)
        x = temporal_padding(x, (left_pad, 0))
        padding = 'valid'
    if hasattr(x, '_keras_shape'):
        shape = x._keras_shape
    else:
        shape = None
    if data_format == 'channels_last':
        # original shape: (batch, length, input_dim)
        # add dim to x to have (batch, length, 1, input_dim)
        x = expand_dims(x, 2)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], 1, shape[2])
    else:
        # original shape: (batch, input_dim, length)
        # add dim to x to have (batch, input_dim, length, 1)
        x = expand_dims(x, 3)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], shape[2], 1)
    # update dilation rate, strides
    dilation_rate = (dilation_rate, 1)
    strides = (strides, 1)
    # add dim to kernel (always same format independently of data_format)
    # i.e. (rows, 1, input_depth, depth)
    kernel = expand_dims(kernel, 1)
    output = conv2d(x, kernel,
                    strides=strides, padding=padding,
                    data_format=data_format, dilation_rate=dilation_rate)
    # remove added dim
    if data_format == 'channels_last':
        output = squeeze(output, 2)
    else:
        output = squeeze(output, 3)
    return output 
Example #20
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def conv1d(x, kernel, strides=1, padding='valid',
           data_format=None, dilation_rate=1):
    """1D convolution.

    # Arguments
        kernel: kernel tensor.
        strides: stride integer.
        padding: string, `"same"`, `"causal"` or `"valid"`.
        data_format: string, one of "channels_last", "channels_first"
        dilation_rate: integer.
    """
    if data_format is None:
        data_format = image_data_format()
    if data_format not in {'channels_first', 'channels_last'}:
        raise ValueError('Unknown data_format ', data_format)

    if hasattr(kernel, '_keras_shape'):
        kernel_shape = kernel._keras_shape
    else:
        kernel_shape = None
    if padding == 'causal':
        # causal (dilated) convolution:
        if not kernel_shape:
            raise AttributeError('Causal padding requires kernel._keras_shape set.')
        left_pad = dilation_rate * (kernel_shape[0] - 1)
        x = temporal_padding(x, (left_pad, 0))
        padding = 'valid'
    if hasattr(x, '_keras_shape'):
        shape = x._keras_shape
    else:
        shape = None
    if data_format == 'channels_last':
        # original shape: (batch, length, input_dim)
        # add dim to x to have (batch, length, 1, input_dim)
        x = expand_dims(x, 2)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], 1, shape[2])
    else:
        # original shape: (batch, input_dim, length)
        # add dim to x to have (batch, input_dim, length, 1)
        x = expand_dims(x, 3)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], shape[2], 1)
    # update dilation rate, strides
    dilation_rate = (dilation_rate, 1)
    strides = (strides, 1)
    # add dim to kernel (always same format independently of data_format)
    # i.e. (rows, 1, input_depth, depth)
    kernel = expand_dims(kernel, 1)
    output = conv2d(x, kernel,
                    strides=strides, padding=padding,
                    data_format=data_format, dilation_rate=dilation_rate)
    # remove added dim
    if data_format == 'channels_last':
        output = squeeze(output, 2)
    else:
        output = squeeze(output, 3)
    return output 
Example #21
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def conv1d(x, kernel, strides=1, padding='valid',
           data_format=None, dilation_rate=1):
    """1D convolution.

    # Arguments
        kernel: kernel tensor.
        strides: stride integer.
        padding: string, `"same"`, `"causal"` or `"valid"`.
        data_format: string, one of "channels_last", "channels_first"
        dilation_rate: integer.
    """
    if data_format is None:
        data_format = image_data_format()
    if data_format not in {'channels_first', 'channels_last'}:
        raise ValueError('Unknown data_format ', data_format)

    if hasattr(kernel, '_keras_shape'):
        kernel_shape = kernel._keras_shape
    else:
        kernel_shape = None
    if padding == 'causal':
        # causal (dilated) convolution:
        if not kernel_shape:
            raise AttributeError('Causal padding requires kernel._keras_shape set.')
        left_pad = dilation_rate * (kernel_shape[0] - 1)
        x = temporal_padding(x, (left_pad, 0))
        padding = 'valid'
    if hasattr(x, '_keras_shape'):
        shape = x._keras_shape
    else:
        shape = None
    if data_format == 'channels_last':
        # original shape: (batch, length, input_dim)
        # add dim to x to have (batch, length, 1, input_dim)
        x = expand_dims(x, 2)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], 1, shape[2])
    else:
        # original shape: (batch, input_dim, length)
        # add dim to x to have (batch, input_dim, length, 1)
        x = expand_dims(x, 3)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], shape[2], 1)
    # update dilation rate, strides
    dilation_rate = (dilation_rate, 1)
    strides = (strides, 1)
    # add dim to kernel (always same format independently of data_format)
    # i.e. (rows, 1, input_depth, depth)
    kernel = expand_dims(kernel, 1)
    output = conv2d(x, kernel,
                    strides=strides, padding=padding,
                    data_format=data_format, dilation_rate=dilation_rate)
    # remove added dim
    if data_format == 'channels_last':
        output = squeeze(output, 2)
    else:
        output = squeeze(output, 3)
    return output 
Example #22
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def conv1d(x, kernel, strides=1, padding='valid',
           data_format=None, dilation_rate=1):
    """1D convolution.

    # Arguments
        kernel: kernel tensor.
        strides: stride integer.
        padding: string, `"same"`, `"causal"` or `"valid"`.
        data_format: string, one of "channels_last", "channels_first"
        dilation_rate: integer.
    """
    if data_format is None:
        data_format = image_data_format()
    if data_format not in {'channels_first', 'channels_last'}:
        raise ValueError('Unknown data_format ', data_format)

    if hasattr(kernel, '_keras_shape'):
        kernel_shape = kernel._keras_shape
    else:
        kernel_shape = None
    if padding == 'causal':
        # causal (dilated) convolution:
        if not kernel_shape:
            raise AttributeError('Causal padding requires kernel._keras_shape set.')
        left_pad = dilation_rate * (kernel_shape[0] - 1)
        x = temporal_padding(x, (left_pad, 0))
        padding = 'valid'
    if hasattr(x, '_keras_shape'):
        shape = x._keras_shape
    else:
        shape = None
    if data_format == 'channels_last':
        # original shape: (batch, length, input_dim)
        # add dim to x to have (batch, length, 1, input_dim)
        x = expand_dims(x, 2)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], 1, shape[2])
    else:
        # original shape: (batch, input_dim, length)
        # add dim to x to have (batch, input_dim, length, 1)
        x = expand_dims(x, 3)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], shape[2], 1)
    # update dilation rate, strides
    dilation_rate = (dilation_rate, 1)
    strides = (strides, 1)
    # add dim to kernel (always same format independently of data_format)
    # i.e. (rows, 1, input_depth, depth)
    kernel = expand_dims(kernel, 1)
    output = conv2d(x, kernel,
                    strides=strides, padding=padding,
                    data_format=data_format, dilation_rate=dilation_rate)
    # remove added dim
    if data_format == 'channels_last':
        output = squeeze(output, 2)
    else:
        output = squeeze(output, 3)
    return output 
Example #23
Source File: theano_backend.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def conv1d(x, kernel, strides=1, padding='valid',
           data_format=None, dilation_rate=1):
    """1D convolution.

    # Arguments
        kernel: kernel tensor.
        strides: stride integer.
        padding: string, `"same"`, `"causal"` or `"valid"`.
        data_format: string, one of "channels_last", "channels_first"
        dilation_rate: integer.
    """
    if data_format is None:
        data_format = image_data_format()
    if data_format not in {'channels_first', 'channels_last'}:
        raise ValueError('Unknown data_format ', data_format)

    if hasattr(kernel, '_keras_shape'):
        kernel_shape = kernel._keras_shape
    else:
        kernel_shape = None
    if padding == 'causal':
        # causal (dilated) convolution:
        if not kernel_shape:
            raise AttributeError('Causal padding requires kernel._keras_shape set.')
        left_pad = dilation_rate * (kernel_shape[0] - 1)
        x = temporal_padding(x, (left_pad, 0))
        padding = 'valid'
    if hasattr(x, '_keras_shape'):
        shape = x._keras_shape
    else:
        shape = None
    if data_format == 'channels_last':
        # original shape: (batch, length, input_dim)
        # add dim to x to have (batch, length, 1, input_dim)
        x = expand_dims(x, 2)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], 1, shape[2])
    else:
        # original shape: (batch, input_dim, length)
        # add dim to x to have (batch, input_dim, length, 1)
        x = expand_dims(x, 3)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], shape[2], 1)
    # update dilation rate, strides
    dilation_rate = (dilation_rate, 1)
    strides = (strides, 1)
    # add dim to kernel (always same format independently of data_format)
    # i.e. (rows, 1, input_depth, depth)
    kernel = expand_dims(kernel, 1)
    output = conv2d(x, kernel,
                    strides=strides, padding=padding,
                    data_format=data_format, dilation_rate=dilation_rate)
    # remove added dim
    if data_format == 'channels_last':
        output = squeeze(output, 2)
    else:
        output = squeeze(output, 3)
    return output 
Example #24
Source File: theano_backend.py    From GraphicDesignPatternByPython with MIT License 4 votes vote down vote up
def conv1d(x, kernel, strides=1, padding='valid',
           data_format=None, dilation_rate=1):
    """1D convolution.

    # Arguments
        kernel: kernel tensor.
        strides: stride integer.
        padding: string, `"same"`, `"causal"` or `"valid"`.
        data_format: string, one of "channels_last", "channels_first"
        dilation_rate: integer.
    """
    data_format = normalize_data_format(data_format)

    kernel_shape = int_shape(kernel)
    if padding == 'causal':
        # causal (dilated) convolution:
        if not kernel_shape:
            raise AttributeError('Causal padding requires kernel._keras_shape set.')
        left_pad = dilation_rate * (kernel_shape[0] - 1)
        x = temporal_padding(x, (left_pad, 0))
        padding = 'valid'
    shape = int_shape(x)
    if data_format == 'channels_last':
        # original shape: (batch, length, input_dim)
        # add dim to x to have (batch, length, 1, input_dim)
        x = expand_dims(x, 2)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], 1, shape[2])
    else:
        # original shape: (batch, input_dim, length)
        # add dim to x to have (batch, input_dim, length, 1)
        x = expand_dims(x, 3)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], shape[2], 1)
    # update dilation rate, strides
    dilation_rate = (dilation_rate, 1)
    strides = (strides, 1)
    # add dim to kernel (always same format independently of data_format)
    # i.e. (rows, 1, input_depth, depth)
    kernel = expand_dims(kernel, 1)
    output = conv2d(x, kernel,
                    strides=strides, padding=padding,
                    data_format=data_format, dilation_rate=dilation_rate)
    # remove added dim
    if data_format == 'channels_last':
        output = squeeze(output, 2)
    else:
        output = squeeze(output, 3)
    return output 
Example #25
Source File: theano_backend.py    From keras-lambda with MIT License 4 votes vote down vote up
def conv1d(x, kernel, strides=1, padding='valid',
           data_format=None, dilation_rate=1):
    """1D convolution.

    # Arguments
        kernel: kernel tensor.
        strides: stride integer.
        padding: string, `"same"`, `"causal"` or `"valid"`.
        data_format: string, one of "channels_last", "channels_first"
        dilation_rate: integer.
    """
    if data_format is None:
        data_format = image_data_format()
    if data_format not in {'channels_first', 'channels_last'}:
        raise ValueError('Unknown data_format ', data_format)

    if hasattr(kernel, '_keras_shape'):
        kernel_shape = kernel._keras_shape
    else:
        kernel_shape = None
    if padding == 'causal':
        # causal (dilated) convolution:
        if not kernel_shape:
            raise AttributeError('Causal padding requires kernel._keras_shape set.')
        left_pad = dilation_rate * (kernel_shape[0] - 1)
        x = temporal_padding(x, (left_pad, 0))
        padding = 'valid'
    if hasattr(x, '_keras_shape'):
        shape = x._keras_shape
    else:
        shape = None
    if data_format == 'channels_last':
        # original shape: (batch, length, input_dim)
        # add dim to x to have (batch, length, 1, input_dim)
        x = expand_dims(x, 2)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], 1, shape[2])
    else:
        # original shape: (batch, input_dim, length)
        # add dim to x to have (batch, input_dim, length, 1)
        x = expand_dims(x, 3)
        # update x._keras_shape
        if shape is not None:
            x._keras_shape = (shape[0], shape[1], shape[2], 1)
    # update dilation rate, strides
    dilation_rate = (dilation_rate, 1)
    strides = (strides, 1)
    # add dim to kernel (always same format independently of data_format)
    # i.e. (rows, 1, input_depth, depth)
    kernel = expand_dims(kernel, 1)
    output = conv2d(x, kernel,
                    strides=strides, padding=padding,
                    data_format=data_format, dilation_rate=dilation_rate)
    # remove added dim
    if data_format == 'channels_last':
        output = squeeze(output, 2)
    else:
        output = squeeze(output, 3)
    return output