Python theano.tensor.stack() Examples

The following are 30 code examples of theano.tensor.stack(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.tensor , or try the search function .
Example #1
Source File: fourier.py    From D-VAE with MIT License 6 votes vote down vote up
def infer_shape(self, node, in_shapes):
        shape_a = in_shapes[0]
        n = node.inputs[1]
        axis = node.inputs[2]
        if len(shape_a) == 1:
            return [(n,)]
        elif isinstance(axis, tensor.TensorConstant):
            out_shape = (list(shape_a[0: axis.data.item()]) + [n] +
                         list(shape_a[axis.data + 1:]))
        else:
            l = len(shape_a)
            shape_a = tensor.stack(shape_a)
            out_shape = tensor.concatenate((shape_a[0: axis], [n],
                                            shape_a[axis + 1:]))
            n_splits = [1] * l
            out_shape = tensor.split(out_shape, n_splits, l)
            out_shape = [a[0] for a in out_shape]
        return [out_shape] 
Example #2
Source File: basic.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def grad(self, inputs, gout):
        (gz,) = gout
        is_continuous = [(inputs[i].dtype in tensor.continuous_dtypes)
                         for i in range(len(inputs))]

        if _is_sparse_variable(gz):
            gz = dense_from_sparse(gz)

        split = tensor.Split(len(inputs))(gz, 1,
                                          tensor.stack(
                                              [x.shape[1]
                                               for x in inputs]))
        if not isinstance(split, list):
            split = [split]

        derivative = [SparseFromDense(self.format)(s) for s in split]

        def choose(continuous, derivative):
            if continuous:
                return derivative
            else:
                return None
        return [choose(c, d) for c, d in zip(is_continuous, derivative)] 
Example #3
Source File: basic.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def grad(self, inputs, gout):
        (gz,) = gout
        is_continuous = [(inputs[i].dtype in tensor.continuous_dtypes)
                         for i in range(len(inputs))]

        if _is_sparse_variable(gz):
            gz = dense_from_sparse(gz)

        split = tensor.Split(len(inputs))(gz, 0,
                                          tensor.stack(
                                              [x.shape[0]
                                               for x in inputs]))
        if not isinstance(split, list):
            split = [split]

        derivative = [SparseFromDense(self.format)(s) for s in split]

        def choose(continuous, derivative):
            if continuous:
                return derivative
            else:
                return None
        return [choose(c, d) for c, d in zip(is_continuous, derivative)] 
Example #4
Source File: fourier.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def infer_shape(self, node, in_shapes):
        shape_a = in_shapes[0]
        n = node.inputs[1]
        axis = node.inputs[2]
        if len(shape_a) == 1:
            return [(n,)]
        elif isinstance(axis, tensor.TensorConstant):
            out_shape = (list(shape_a[0: axis.data.item()]) + [n] +
                         list(shape_a[axis.data + 1:]))
        else:
            l = len(shape_a)
            shape_a = tensor.stack(shape_a)
            out_shape = tensor.concatenate((shape_a[0: axis], [n],
                                            shape_a[axis + 1:]))
            n_splits = [1] * l
            out_shape = tensor.split(out_shape, n_splits, l)
            out_shape = [a[0] for a in out_shape]
        return [out_shape] 
Example #5
Source File: Model4DistancePrediction.py    From RaptorX-Contact with GNU General Public License v3.0 6 votes vote down vote up
def errors4one(self, z, out, weight=None, distLabelType='12C'):
	distBins = config.distCutoffs[distLabelType]
	label8 = DistanceUtils.LabelsOfOneDistance(config.ContactDefinition, distBins)
	label15 = DistanceUtils.LabelsOfOneDistance(config.InteractionLimit, distBins)

	z3C = T.cast( T.ge(z, label8), 'int32') + T.cast( T.ge(z, label15), 'int32')
	o3C = T.cast( T.ge(out, label8), 'int32') + T.cast( T.ge(out, label15), 'int32')

	if weight is not None:
            err = T.sum( T.mul(weight, T.neq(o3C, z3C) ) )*1./T.sum(weight)
	else:
            err = T.mean( T.neq(o3C , z3C) ) 

	## err is s scalar, convert it to a tensor with ndim=1
	return T.stack([err] )

    ## this function returns a vector of errors, the size of this vector is equal to the sum of ValueDims for all the responses 
Example #6
Source File: dmn_batch.py    From Dynamic-memory-networks-in-Theano with MIT License 6 votes vote down vote up
def new_episode(self, mem):
        g, g_updates = theano.scan(fn=self.new_attention_step,
            sequences=self.inp_c,
            non_sequences=[mem, self.q_q],
            outputs_info=T.zeros_like(self.inp_c[0][0])) 
        
        if (self.normalize_attention):
            g = nn_utils.softmax(g)
        
        e, e_updates = theano.scan(fn=self.new_episode_step,
            sequences=[self.inp_c, g],
            outputs_info=T.zeros_like(self.inp_c[0]))
        
        e_list = []
        for index in range(self.batch_size):
            e_list.append(e[self.fact_count_var[index] - 1, :, index])
        return T.stack(e_list).dimshuffle((1, 0)) 
Example #7
Source File: theano_graph_pro.py    From gempy with GNU Lesser General Public License v3.0 6 votes vote down vote up
def set_nugget_surface_points(self, ref_positions, rest_mask, number_of_points_per_surface):
        # ref_nugget = T.repeat(self.nugget_effect_scalar_T[ref_positions], number_of_points_per_surface)
        cum_rep = T.cumsum(T.concatenate((T.stack([0]), number_of_points_per_surface)))
        ref_nugget_init = T.zeros((cum_rep[-1], 1))
        ref_nugget_loop, update_ = theano.scan(self.repeat_list,
                                               outputs_info=[ref_nugget_init],
                                               sequences=[self.nugget_effect_scalar_T[ref_positions],
                                                          dict(input=cum_rep, taps=[0, 1])],
                                               non_sequences=[T.as_tensor(1)],
                                               return_list=False)

        # ref_nugget_loop = theano.printing.Print('loop')(ref_nugget_loop)
        ref_nugget = ref_nugget_loop[-1]

        rest_nugget = self.nugget_effect_scalar_T[rest_mask]
        nugget_rest_ref = ref_nugget.reshape((1, -1))[0] + rest_nugget
        return nugget_rest_ref 
Example #8
Source File: theano_graph_pro.py    From gempy with GNU Lesser General Public License v3.0 6 votes vote down vote up
def set_rest_ref_matrix(self, number_of_points_per_surface):
        ref_positions = T.cumsum(T.concatenate((T.stack([0]), number_of_points_per_surface[:-1] + 1)))
        cum_rep = T.cumsum(T.concatenate((T.stack([0]), number_of_points_per_surface)))

        ref_points_init = T.zeros((cum_rep[-1], 3))
        ref_points_loop, update_ = theano.scan(self.repeat_list,
                                               outputs_info=[ref_points_init],
                                               sequences=[self.surface_points_all[ref_positions],
                                                          dict(input=cum_rep, taps=[0, 1])],
                                               non_sequences=[T.as_tensor(3)],

                                               return_list=False)

        #   ref_points_loop = theano.printing.Print('loop')(ref_points_loop)
        ref_points = ref_points_loop[-1]
        #  ref_points = T.repeat(self.surface_points_all[ref_positions], number_of_points_per_surface, axis=0)

        rest_mask = T.ones(T.stack([self.surface_points_all.shape[0]]), dtype='int16')
        rest_mask = T.set_subtensor(rest_mask[ref_positions], 0)
        rest_mask = T.nonzero(rest_mask)[0]
        rest_points = self.surface_points_all[rest_mask]
        return [ref_points, rest_points, ref_positions, rest_mask] 
Example #9
Source File: basic.py    From D-VAE with MIT License 6 votes vote down vote up
def grad(self, inputs, gout):
        (gz,) = gout
        is_continuous = [(inputs[i].dtype in tensor.continuous_dtypes)
                         for i in range(len(inputs))]

        if _is_sparse_variable(gz):
            gz = dense_from_sparse(gz)

        split = tensor.Split(len(inputs))(gz, 0,
                                          tensor.stack(
                                              [x.shape[0]
                                               for x in inputs]))
        if not isinstance(split, list):
            split = [split]

        derivative = [SparseFromDense(self.format)(s) for s in split]

        def choose(continuous, derivative):
            if continuous:
                return derivative
            else:
                return None
        return [choose(c, d) for c, d in zip(is_continuous, derivative)] 
Example #10
Source File: basic.py    From D-VAE with MIT License 6 votes vote down vote up
def grad(self, inputs, gout):
        (gz,) = gout
        is_continuous = [(inputs[i].dtype in tensor.continuous_dtypes)
                         for i in range(len(inputs))]

        if _is_sparse_variable(gz):
            gz = dense_from_sparse(gz)

        split = tensor.Split(len(inputs))(gz, 1,
                                          tensor.stack(
                                              [x.shape[1]
                                               for x in inputs]))
        if not isinstance(split, list):
            split = [split]

        derivative = [SparseFromDense(self.format)(s) for s in split]

        def choose(continuous, derivative):
            if continuous:
                return derivative
            else:
                return None
        return [choose(c, d) for c, d in zip(is_continuous, derivative)] 
Example #11
Source File: theano_util.py    From cpae with MIT License 6 votes vote down vote up
def parameter_stats(parameters, algorithm):
    vars_ = []
    for name, param in parameters.items():
        num_elements = numpy.product(param.get_value().shape)
        norm = param.norm(2) / num_elements ** 0.5
        trained_param = param in algorithm.gradients
        if trained_param:
            grad_norm = algorithm.gradients[param].norm(2) / num_elements ** 0.5
            step_norm = algorithm.steps[param].norm(2) / num_elements ** 0.5
            relative_step_norm = step_norm / grad_norm
        else:
            grad_norm = 0.
            step_norm = 0.
            relative_step_norm = 0.
        stats = tensor.stack(norm, grad_norm, step_norm, relative_step_norm)
        stats.name = name + '_stats'
        vars_.append(stats)
    return vars_ 
Example #12
Source File: theano_backend.py    From KerasNeuralFingerprint with MIT License 6 votes vote down vote up
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=0.0001):
    '''Compute mean and std for batch then apply batch_normalization on batch.
    '''
    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var 
Example #13
Source File: mujoco_costs.py    From adversarial-policies with MIT License 6 votes vote down vote up
def __init__(self):
        def f(x, u, i, terminal):
            if terminal:
                ctrl_cost = T.zeros_like(x[..., 0])
            else:
                ctrl_cost = T.square(u).sum(axis=-1)

            # x: (batch_size, 8)
            # x[..., 0:4]: qpos
            # x[..., 4:8]: qvel, time derivatives of qpos, not used in the cost.
            theta = x[..., 0]  # qpos[0]: angle of joint 0
            phi = x[..., 1]  # qpos[1]: angle of joint 1
            target_xpos = x[..., 2:4]  # qpos[2:4], target x & y coordinate
            body1_xpos = 0.1 * T.stack([T.cos(theta), T.sin(theta)], axis=1)
            tip_xpos_incr = 0.11 * T.stack([T.cos(phi), T.sin(phi)], axis=1)
            tip_xpos = body1_xpos + tip_xpos_incr
            delta = tip_xpos - target_xpos

            state_cost = T.sqrt(T.sum(delta * delta, axis=-1))
            cost = state_cost + ctrl_cost

            return cost

        super().__init__(f, state_size=8, action_size=2) 
Example #14
Source File: cbc_hb.py    From lifestyles with MIT License 6 votes vote down vote up
def _create_observation_variable(individual_selections, choices, partsworth):
    """
    This function handles creating the PyMC3 observation variables.  It also gracefully handles missing observations in individual selections.

    `individual_selections` is a Series of the individuals selections made, starting from 0. It can contain NaNs which represent answer was not provided.

    `choices` is a DataFrame with a hierarchical index: level=0 enumerates the choices, and level=1 displays the profile at a specific choice.
    It's size is (n_questions, n_choices_per_question).

    `partsworth` is a slice of PyMC3 matrix. It represents the partsworth variables of a individual. Size is (n_profiles,)

    This computes the values exp(partsworth * profile_j) / sum[ exp(partsworth * profile_k ] for all j.
    """
    nan_mask = pd.notnull(individual_selections)
    return pm.Categorical("Obs_%s" % individual_selections.name,
                          tt.nnet.softmax(tt.stack([
                            tt.dot(choice.values, partsworth) for _, choice in choices[nan_mask.values].groupby(axis=1, level=0)
                          ], axis=0).T),
                          observed=individual_selections[nan_mask.values].values) 
Example #15
Source File: theano_export.py    From gempy with GNU Lesser General Public License v3.0 6 votes vote down vote up
def select_finite_faults(self):
        fault_points = T.vertical_stack(T.stack([self.ref_layer_points[0]], axis=0), self.rest_layer_points).T
        ctr = T.mean(fault_points, axis=1)
        x = fault_points - ctr.reshape((-1, 1))
        M = T.dot(x, x.T)
        U = T.nlinalg.svd(M)[2]
        rotated_x = T.dot(self.x_to_interpolate(), U)
        rotated_fault_points = T.dot(fault_points.T, U)
        rotated_ctr = T.mean(rotated_fault_points, axis=0)
        a_radius = (rotated_fault_points[:, 0].max() - rotated_fault_points[:, 0].min()) / 2 + self.inf_factor[
            self.n_surface_op[0] - 1]
        b_radius = (rotated_fault_points[:, 1].max() - rotated_fault_points[:, 1].min()) / 2 + self.inf_factor[
            self.n_surface_op[0] - 1]
        sel = T.lt((rotated_x[:, 0] - rotated_ctr[0]) ** 2 / a_radius ** 2 + (
                    rotated_x[:, 1] - rotated_ctr[1]) ** 2 / b_radius ** 2,
                   1)

        if "select_finite_faults" in self.verbose:
            sel = theano.printing.Print("scalar_field_iter")(sel)

        return sel 
Example #16
Source File: dmn_smooth.py    From Dynamic-memory-networks-in-Theano with MIT License 5 votes vote down vote up
def new_attention_step(self, ct, prev_g, mem, q_q):
        #cWq = T.stack([T.dot(T.dot(ct, self.W_b), q_q)])
        #cWm = T.stack([T.dot(T.dot(ct, self.W_b), mem)])
        z = T.concatenate([ct, mem, q_q, ct * q_q, ct * mem, (ct - q_q) ** 2, (ct - mem) ** 2])#, cWq, cWm])
        
        l_1 = T.dot(self.W_1, z) + self.b_1
        l_1 = T.tanh(l_1)
        l_2 = T.dot(self.W_2, l_1) + self.b_2
        G = T.nnet.sigmoid(l_2)[0]
        return G 
Example #17
Source File: theano_util.py    From cpae with MIT License 5 votes vote down vote up
def get_dropout_mask(var, drop_prob, rng=None, seed=None):
    if not rng and not seed:
        seed = config.default_seed
    if not rng:
        rng = MRG_RandomStreams(seed)
    # we assume that the batch dimension is the first one
    mask_shape = tensor.stack([var.shape[0], var.shape[-1]])
    return rng.binomial(mask_shape, p=1 - drop_prob,
                        dtype=theano.config.floatX) 
Example #18
Source File: theano_backend.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def stack(x, axis=0):
    return T.stack(x, axis=axis) 
Example #19
Source File: dmn_basic.py    From Dynamic-memory-networks-in-Theano with MIT License 5 votes vote down vote up
def new_attention_step(self, ct, prev_g, mem, q_q):
        cWq = T.stack([T.dot(T.dot(ct, self.W_b), q_q)])
        cWm = T.stack([T.dot(T.dot(ct, self.W_b), mem)])
        z = T.concatenate([ct, mem, q_q, ct * q_q, ct * mem, T.abs_(ct - q_q), T.abs_(ct - mem), cWq, cWm])
        
        l_1 = T.dot(self.W_1, z) + self.b_1
        l_1 = T.tanh(l_1)
        l_2 = T.dot(self.W_2, l_1) + self.b_2
        G = T.nnet.sigmoid(l_2)[0]
        return G 
Example #20
Source File: theano_backend.py    From KerasNeuralFingerprint with MIT License 5 votes vote down vote up
def pack(x):
    return T.stack(*x) 
Example #21
Source File: images2D_carrier.py    From lddmm-ot with MIT License 5 votes vote down vote up
def _dirac_truncated_rfft(self, point) :
		"""
		Returns the truncated real FFT of a dirac at position 'point',
		as a (2+1)-d array of size "K.shape//2+1" + (4,),.
		See real_fft._irfft_2d to understand the format of the output.
		The code may seem quite circonvoluted but hey, it's not my fault
		if theano forces us to use real-valued FFT...
		"""
		su, di = self._phase_shifts(point)
		re_re = T.cos(di) + T.cos(su) # 2 cos(a)cos(b) = cos(a-b) + cos(a+b)
		re_im = T.sin(su) + T.sin(di) # 2 sin(a)cos(b) = sin(a+b) + sin(a-b)
		im_re = T.sin(su) - T.sin(di) # 2 cos(a)sin(b) = sin(a+b) - sin(a-b)
		im_im = T.cos(di) - T.cos(su) # 2 sin(a)sin(b) = cos(a-b) - cos(a+b)
		return .5 * T.stack([re_re, re_im, im_re, im_im], axis=2) # Don't forget the .5 ! 
Example #22
Source File: rotconv.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def rot_filters(self, theta):
        fsize = self.filter_size[0];
        ind = T.as_tensor_variable(np.indices((fsize, fsize)) - (fsize - 1.0) / 2.0);
        rotate = T.stack(T.cos(theta), -T.sin(theta), T.sin(theta), T.cos(theta)).reshape((2, 2));
        ind_rot = T.tensordot(rotate, ind, axes=((0, 0))) + (fsize - 1.0) / 2.0;
        transy = T.clip(ind_rot[0], 0, fsize - 1 - .00001);
        transx = T.clip(ind_rot[1], 0, fsize - 1 - .00001);
        vert = T.iround(transy);
        horz = T.iround(transx);
        return self.W[:, :, vert, horz]; 
Example #23
Source File: rotconv.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def rot_filters(self, theta):
        fsize = self.filter_size[0];
        ind = T.as_tensor_variable(np.indices((fsize, fsize)) - (fsize - 1.0) / 2.0);
        rotate = T.stack(T.cos(theta), -T.sin(theta), T.sin(theta), T.cos(theta)).reshape((2, 2));
        ind_rot = T.tensordot(rotate, ind, axes=((0, 0))) + (fsize - 1.0) / 2.0;
        transy = T.clip(ind_rot[0], 0, fsize - 1 - .00001);
        transx = T.clip(ind_rot[1], 0, fsize - 1 - .00001);
        vert = T.iround(transy);
        horz = T.iround(transx);
        return self.W[:, :, vert, horz]; 
Example #24
Source File: rotconv.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def rot_filters(self, theta):
        fsize = self.filter_size[0];
        ind = T.as_tensor_variable(np.indices((fsize, fsize)) - (fsize - 1.0) / 2.0);
        rotate = T.stack(T.cos(theta), -T.sin(theta), T.sin(theta), T.cos(theta)).reshape((2, 2));
        ind_rot = T.tensordot(rotate, ind, axes=((0, 0))) + (fsize - 1.0) / 2.0;
        transy = T.clip(ind_rot[0], 0, fsize - 1 - .00001);
        transx = T.clip(ind_rot[1], 0, fsize - 1 - .00001);
        vert = T.iround(transy);
        horz = T.iround(transx);
        return self.W[:, :, vert, horz]; 
Example #25
Source File: rotconv.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def rot_filters(self, theta):
        fsize = self.filter_size[0];
        ind = T.as_tensor_variable(np.indices((fsize, fsize)) - (fsize - 1.0) / 2.0);
        rotate = T.stack(T.cos(theta), -T.sin(theta), T.sin(theta), T.cos(theta)).reshape((2, 2));
        ind_rot = T.tensordot(rotate, ind, axes=((0, 0))) + (fsize - 1.0) / 2.0;
        transy = T.clip(ind_rot[0], 0, fsize - 1 - .00001);
        transx = T.clip(ind_rot[1], 0, fsize - 1 - .00001);
        vert = T.iround(transy);
        horz = T.iround(transx);
        return self.W[:, :, vert, horz]; 
Example #26
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def stack(x, axis=0):
    return T.stack(x, axis=axis) 
Example #27
Source File: layers.py    From crfrnn_layer with MIT License 5 votes vote down vote up
def get_output_for(self, inputs, **kwargs):
        unary, ref = inputs

        N, _, H, W = ref.shape
        yx = tt.cast(tt.stack(tt.mgrid[0:H, 0:W]), "float32")
        grid = tt.alloc(yx[np.newaxis, :, :, :], N, 2, H, W)
        stacked = tt.concatenate([grid, ref], axis=1)

        def _bilateral(V, R):
            o = tt.ones((1, V.shape[1], V.shape[2]), "float32")
            norm = tt.sqrt(gaussian_filter(R, o, self.kstd_bf,
                                           self.ref_dim)) + 1e-8
            return gaussian_filter(R, V/norm, self.kstd_bf, self.ref_dim,
                                   self.val_dim) / norm

        def _step(prev_q, U, ref, normalize=True):
            qbf = _bilateral(prev_q, ref,)
            qsf = tt.nnet.conv2d(prev_q[np.newaxis, :, :, :],
                                 self.W_spatial, border_mode="half")[0]

            q_hat = -self.compat_bf * qbf + -self.compat_spatial * qsf
            q_hat = U - q_hat

            return softmax(q_hat, axis=0) if normalize else q_hat

        def _inference(unary_i, ref_i):
            U = tt.log(tt.clip(unary_i, 1e-5, 1))
            prev_q = softmax(U, axis=0)

            # This is faster than using scan.
            for i in range(self.num_iter):
                normalize = self.normalize_final_iter or i < self.num_iter-1
                prev_q = _step(prev_q, U, ref_i, normalize)
            return prev_q

        return theano.scan(fn=_inference, sequences=[unary, stacked],
                           outputs_info=None)[0] 
Example #28
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def stack(x, axis=0):
    return T.stack(x, axis=axis) 
Example #29
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def stack(x, axis=0):
    return T.stack(x, axis=axis) 
Example #30
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def stack(x, axis=0):
    return T.stack(x, axis=axis)