Python theano.tensor.as_tensor() Examples

The following are 13 code examples of theano.tensor.as_tensor(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.tensor , or try the search function .
Example #1
Source File: fourier.py    From D-VAE with MIT License 6 votes vote down vote up
def make_node(self, frames, n, axis):
        """
        Compute an n-point fft of frames along given axis.

        """
        _frames = tensor.as_tensor(frames, ndim=2)
        _n = tensor.as_tensor(n, ndim=0)
        _axis = tensor.as_tensor(axis, ndim=0)
        if self.half and _frames.type.dtype.startswith('complex'):
            raise TypeError('Argument to HalfFFT must not be complex', frames)
        spectrogram = tensor.zmatrix()
        buf = generic()
        # The `buf` output is present for future work
        # when we call FFTW directly and re-use the 'plan' that FFTW creates.
        # In that case, buf would store a CObject encapsulating the plan.
        rval = Apply(self, [_frames, _n, _axis], [spectrogram, buf])
        return rval 
Example #2
Source File: fourier.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def make_node(self, frames, n, axis):
        """
        Compute an n-point fft of frames along given axis.

        """
        _frames = tensor.as_tensor(frames, ndim=2)
        _n = tensor.as_tensor(n, ndim=0)
        _axis = tensor.as_tensor(axis, ndim=0)
        if self.half and _frames.type.dtype.startswith('complex'):
            raise TypeError('Argument to HalfFFT must not be complex', frames)
        spectrogram = tensor.zmatrix()
        buf = generic()
        # The `buf` output is present for future work
        # when we call FFTW directly and re-use the 'plan' that FFTW creates.
        # In that case, buf would store a CObject encapsulating the plan.
        rval = Apply(self, [_frames, _n, _axis], [spectrogram, buf])
        return rval 
Example #3
Source File: aggregation.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def get_aggregator(self):
        initialized = shared_like(0.)
        expression_acc = shared_like(self.expression)

        # Dummy default expression to use as the previously-accumulated
        # value, that has the same shape as the new result
        expression_zeros = tensor.as_tensor(self.expression).zeros_like()

        conditional_update_expr = self.expression + ifelse(initialized,
                                                           expression_acc,
                                                           expression_zeros)

        initialization_updates = [(expression_acc,
                                   tensor.zeros_like(expression_acc)),
                                  (initialized, 0.)]
        accumulation_updates = [(expression_acc,
                                 conditional_update_expr),
                                (initialized, 1.)]
        aggregator = Aggregator(aggregation_scheme=self,
                                initialization_updates=initialization_updates,
                                accumulation_updates=accumulation_updates,
                                readout_variable=(expression_acc))
        return aggregator 
Example #4
Source File: theano_graph_pro.py    From gempy with GNU Lesser General Public License v3.0 6 votes vote down vote up
def set_rest_ref_matrix(self, number_of_points_per_surface):
        ref_positions = T.cumsum(T.concatenate((T.stack([0]), number_of_points_per_surface[:-1] + 1)))
        cum_rep = T.cumsum(T.concatenate((T.stack([0]), number_of_points_per_surface)))

        ref_points_init = T.zeros((cum_rep[-1], 3))
        ref_points_loop, update_ = theano.scan(self.repeat_list,
                                               outputs_info=[ref_points_init],
                                               sequences=[self.surface_points_all[ref_positions],
                                                          dict(input=cum_rep, taps=[0, 1])],
                                               non_sequences=[T.as_tensor(3)],

                                               return_list=False)

        #   ref_points_loop = theano.printing.Print('loop')(ref_points_loop)
        ref_points = ref_points_loop[-1]
        #  ref_points = T.repeat(self.surface_points_all[ref_positions], number_of_points_per_surface, axis=0)

        rest_mask = T.ones(T.stack([self.surface_points_all.shape[0]]), dtype='int16')
        rest_mask = T.set_subtensor(rest_mask[ref_positions], 0)
        rest_mask = T.nonzero(rest_mask)[0]
        rest_points = self.surface_points_all[rest_mask]
        return [ref_points, rest_points, ref_positions, rest_mask] 
Example #5
Source File: theano_graph_pro.py    From gempy with GNU Lesser General Public License v3.0 6 votes vote down vote up
def set_nugget_surface_points(self, ref_positions, rest_mask, number_of_points_per_surface):
        # ref_nugget = T.repeat(self.nugget_effect_scalar_T[ref_positions], number_of_points_per_surface)
        cum_rep = T.cumsum(T.concatenate((T.stack([0]), number_of_points_per_surface)))
        ref_nugget_init = T.zeros((cum_rep[-1], 1))
        ref_nugget_loop, update_ = theano.scan(self.repeat_list,
                                               outputs_info=[ref_nugget_init],
                                               sequences=[self.nugget_effect_scalar_T[ref_positions],
                                                          dict(input=cum_rep, taps=[0, 1])],
                                               non_sequences=[T.as_tensor(1)],
                                               return_list=False)

        # ref_nugget_loop = theano.printing.Print('loop')(ref_nugget_loop)
        ref_nugget = ref_nugget_loop[-1]

        rest_nugget = self.nugget_effect_scalar_T[rest_mask]
        nugget_rest_ref = ref_nugget.reshape((1, -1))[0] + rest_nugget
        return nugget_rest_ref 
Example #6
Source File: aggregation.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def get_aggregator(self):
        initialized = shared_like(0.)
        numerator_acc = shared_like(self.numerator)
        denominator_acc = shared_like(self.denominator)

        # Dummy default expression to use as the previously-accumulated
        # value, that has the same shape as the new result
        numerator_zeros = tensor.as_tensor(self.numerator).zeros_like()
        denominator_zeros = tensor.as_tensor(self.denominator).zeros_like()

        conditional_update_num = self.numerator + ifelse(initialized,
                                                         numerator_acc,
                                                         numerator_zeros)
        conditional_update_den = self.denominator + ifelse(initialized,
                                                           denominator_acc,
                                                           denominator_zeros)

        initialization_updates = [(numerator_acc,
                                   tensor.zeros_like(numerator_acc)),
                                  (denominator_acc,
                                   tensor.zeros_like(denominator_acc)),
                                  (initialized, 0.)]
        accumulation_updates = [(numerator_acc,
                                 conditional_update_num),
                                (denominator_acc,
                                 conditional_update_den),
                                (initialized, 1.)]
        aggregator = Aggregator(aggregation_scheme=self,
                                initialization_updates=initialization_updates,
                                accumulation_updates=accumulation_updates,
                                readout_variable=(numerator_acc /
                                                  denominator_acc))
        return aggregator 
Example #7
Source File: integration.py    From starry with MIT License 5 votes vote down vote up
def infer_shape(self, node, shapes):
        return [shapes[0] + (tt.as_tensor(self.N),)] 
Example #8
Source File: integration.py    From starry with MIT License 5 votes vote down vote up
def infer_shape(self, node, shapes):
        return [
            shapes[0] + (tt.as_tensor(self.N),),
            shapes[0] + (tt.as_tensor(self.N),),
            shapes[0] + (tt.as_tensor(self.N),),
        ] 
Example #9
Source File: integration.py    From starry with MIT License 5 votes vote down vote up
def infer_shape(self, node, shapes):
        return [
            shapes[0] + (tt.as_tensor(self.N),),
            shapes[0] + (tt.as_tensor(self.N),),
            shapes[0] + (tt.as_tensor(self.N),),
            shapes[0] + (tt.as_tensor(self.N),),
            shapes[0] + (tt.as_tensor(self.N),),
            shapes[0] + (tt.as_tensor(self.N),),
        ] 
Example #10
Source File: sp.py    From D-VAE with MIT License 4 votes vote down vote up
def max_pool(images, imgshp, maxpoolshp):
    """Implements a max pooling layer

    Takes as input a 2D tensor of shape batch_size x img_size and
    performs max pooling.  Max pooling downsamples by taking the max
    value in a given area, here defined by maxpoolshp. Outputs a 2D
    tensor of shape batch_size x output_size.

    :param images: 2D tensor containing images on which to apply convolution.
                   Assumed to be of shape batch_size x img_size
    :param imgshp: tuple containing image dimensions
    :param maxpoolshp: tuple containing shape of area to max pool over

    :return: out1, symbolic result (2D tensor)
    :return: out2, logical shape of the output
    """
    N = numpy
    poolsize = N.int64(N.prod(maxpoolshp))

    # imgshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)
    # in the first case, default nfeatures to 1
    if N.size(imgshp) == 2:
        imgshp = (1,) + imgshp

    # construct indices and index pointers for sparse matrix, which,
    # when multiplied with input images will generate a stack of image
    # patches
    indices, indptr, spmat_shape, sptype, outshp = \
            convolution_indices.conv_eval(imgshp, maxpoolshp,
                                          maxpoolshp, mode='valid')

#    print 'XXXXXXXXXXXXXXXX MAX POOLING LAYER XXXXXXXXXXXXXXXXXXXX'
#    print 'imgshp = ', imgshp
#    print 'maxpoolshp = ', maxpoolshp
#    print 'outshp = ', outshp

    # build sparse matrix, then generate stack of image patches
    csc = theano.sparse.CSM(sptype)(N.ones(indices.size), indices,
                                    indptr, spmat_shape)
    patches = sparse.structured_dot(csc, images.T).T

    pshape = tensor.stack([images.shape[0] *\
                               tensor.as_tensor(N.prod(outshp)),
                           tensor.as_tensor(imgshp[0]),
                           tensor.as_tensor(poolsize)])
    patch_stack = tensor.reshape(patches, pshape, ndim=3)

    out1 = tensor.max(patch_stack, axis=2)

    pshape = tensor.stack([images.shape[0],
                           tensor.as_tensor(N.prod(outshp)),
                           tensor.as_tensor(imgshp[0])])
    out2 = tensor.reshape(out1, pshape, ndim=3)

    out3 = tensor.DimShuffle(out2.broadcastable, (0, 2, 1))(out2)

    return tensor.flatten(out3, 2), outshp 
Example #11
Source File: sp.py    From attention-lvcsr with MIT License 4 votes vote down vote up
def max_pool(images, imgshp, maxpoolshp):
    """Implements a max pooling layer

    Takes as input a 2D tensor of shape batch_size x img_size and
    performs max pooling.  Max pooling downsamples by taking the max
    value in a given area, here defined by maxpoolshp. Outputs a 2D
    tensor of shape batch_size x output_size.

    :param images: 2D tensor containing images on which to apply convolution.
                   Assumed to be of shape batch_size x img_size
    :param imgshp: tuple containing image dimensions
    :param maxpoolshp: tuple containing shape of area to max pool over

    :return: out1, symbolic result (2D tensor)
    :return: out2, logical shape of the output
    """
    N = numpy
    poolsize = N.int64(N.prod(maxpoolshp))

    # imgshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)
    # in the first case, default nfeatures to 1
    if N.size(imgshp) == 2:
        imgshp = (1,) + imgshp

    # construct indices and index pointers for sparse matrix, which,
    # when multiplied with input images will generate a stack of image
    # patches
    indices, indptr, spmat_shape, sptype, outshp = \
            convolution_indices.conv_eval(imgshp, maxpoolshp,
                                          maxpoolshp, mode='valid')

#    print 'XXXXXXXXXXXXXXXX MAX POOLING LAYER XXXXXXXXXXXXXXXXXXXX'
#    print 'imgshp = ', imgshp
#    print 'maxpoolshp = ', maxpoolshp
#    print 'outshp = ', outshp

    # build sparse matrix, then generate stack of image patches
    csc = theano.sparse.CSM(sptype)(N.ones(indices.size), indices,
                                    indptr, spmat_shape)
    patches = sparse.structured_dot(csc, images.T).T

    pshape = tensor.stack([images.shape[0] *\
                               tensor.as_tensor(N.prod(outshp)),
                           tensor.as_tensor(imgshp[0]),
                           tensor.as_tensor(poolsize)])
    patch_stack = tensor.reshape(patches, pshape, ndim=3)

    out1 = tensor.max(patch_stack, axis=2)

    pshape = tensor.stack([images.shape[0],
                           tensor.as_tensor(N.prod(outshp)),
                           tensor.as_tensor(imgshp[0])])
    out2 = tensor.reshape(out1, pshape, ndim=3)

    out3 = tensor.DimShuffle(out2.broadcastable, (0, 2, 1))(out2)

    return tensor.flatten(out3, 2), outshp 
Example #12
Source File: poollayer.py    From deep-prior with GNU General Public License v3.0 4 votes vote down vote up
def __init__(self, rng, inputVar, cfgParams, copyLayer=None, layerNum=None):
        """
        Allocate a PoolLayer with shared variable internal parameters.

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type inputVar: theano.tensor.dtensor4
        :param inputVar: symbolic image tensor, of shape image_shape

        :type cfgParams: PoolLayerParams
        """

        floatX = theano.config.floatX  # @UndefinedVariable

        outputDim = cfgParams.outputDim
        poolsize = cfgParams.poolsize
        inputDim = cfgParams.inputDim
        activation = cfgParams.activation
        poolType = cfgParams.poolType

        self.cfgParams = cfgParams
        self.layerNum = layerNum

        self.inputVar = inputVar

        if inputVar.type.ndim != 4:
            raise TypeError()

        self.params = []
        self.weights = []

        # downsample each feature map individually, using maxpooling
        if poolType == 0:
            # use maxpooling
            pooled_out = pool_2d(input=self.inputVar, ds=poolsize, ignore_border=True)
        elif poolType == 1:
            # use average pooling
            pooled_out = theano.sandbox.neighbours.images2neibs(ten4=self.inputVar, neib_shape=poolsize, mode='ignore_borders').mean(axis=-1)
            new_shape = T.cast(T.join(0, self.inputVar.shape[:-2], T.as_tensor([self.inputVar.shape[2]//poolsize[0]]), T.as_tensor([self.inputVar.shape[3]//poolsize[1]])), 'int64')
            pooled_out = T.reshape(pooled_out, new_shape, ndim=4)
        elif poolType == 3:
            # use subsampling and ignore border
            pooled_out = self.inputVar[:, :, :(inputDim[2]//poolsize[0])*poolsize[0], :(inputDim[3]//poolsize[1])*poolsize[1]][:, :, ::poolsize[0], ::poolsize[1]]
        elif poolType == -1:
            # no pooling at all
            pooled_out = self.inputVar
        else:
            raise ValueError("Unknown pool type!")

        self.output = (pooled_out if activation is None
                       else activation(pooled_out))

        self.output.name = 'output_layer_{}'.format(self.layerNum) 
Example #13
Source File: rbm.py    From TextDetector with GNU General Public License v3.0 4 votes vote down vote up
def updates(self, gradients):
        """
        Return symbolic updates to apply given a set of gradients
        on the parameters being optimized.

        Parameters
        ----------
        gradients : list of tensor_likes
            List of symbolic gradients for the parameters contained
            in self.params, in the same order as in self.params.

        Returns
        -------
        updates : dict
            A dictionary with the shared variables in self.params as keys
            and a symbolic expression of how they are to be updated each
            SGD step as values.

        Notes
        -----
        `cost_updates` is a convenient helper function that takes all
        necessary gradients with respect to a given symbolic cost.
        """
        ups = {}
        # Add the learning rate/iteration updates
        l_ups, learn_rates = self.learning_rate_updates(gradients)
        safe_update(ups, l_ups)

        # Get the updates from sgd_updates, a PyLearn library function.
        p_up = dict(self.sgd_updates(self.params, gradients, learn_rates))

        # Add the things in p_up to ups
        safe_update(ups, p_up)

        # Clip the values if needed.
        # We do not want the clipping values to force an upcast
        # of the update: updates should have the same type as params
        for param, (p_min, p_max) in six.iteritems(self.clipping_values):
            p_min = tensor.as_tensor(p_min)
            p_max = tensor.as_tensor(p_max)
            dtype = param.dtype
            if p_min.dtype != dtype:
                p_min = tensor.cast(p_min, dtype)
            if p_max.dtype != dtype:
                p_max = tensor.cast(p_max, dtype)
            ups[param] = tensor.clip(ups[param], p_min, p_max)

        # Return the updates dictionary.
        return ups