Python theano.tensor.eq() Examples

The following are 30 code examples of theano.tensor.eq(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.tensor , or try the search function .
Example #1
Source File: ctc_cost.py    From CTC-Connectionist-Temporal-Classification with Apache License 2.0 6 votes vote down vote up
def recurrence_relation(y, y_mask, blank_symbol):
        n_y = y.shape[0]
        blanks = tensor.zeros((2, y.shape[1])) + blank_symbol
        ybb = tensor.concatenate((y, blanks), axis=0).T
        sec_diag = (tensor.neq(ybb[:, :-2], ybb[:, 2:]) *
                    tensor.eq(ybb[:, 1:-1], blank_symbol) *
                    y_mask.T)

        # r1: LxL
        # r2: LxL
        # r3: LxLxB
        r2 = tensor.eye(n_y, k=1)
        r3 = (tensor.eye(n_y, k=2).dimshuffle(0, 1, 'x') *
              sec_diag.dimshuffle(1, 'x', 0))

        return r2, r3 
Example #2
Source File: metrics.py    From ntm-one-shot with MIT License 6 votes vote down vote up
def accuracy_instance(predictions, targets, n=[1, 2, 3, 4, 5, 10], \
        nb_classes=5, nb_samples_per_class=10, batch_size=1):
    accuracy_0 = theano.shared(np.zeros((batch_size, nb_samples_per_class), \
        dtype=theano.config.floatX))
    indices_0 = theano.shared(np.zeros((batch_size, nb_classes), \
        dtype=np.int32))
    batch_range = T.arange(batch_size)
    def step_(p, t, acc, idx):
        acc = T.inc_subtensor(acc[batch_range, idx[batch_range, t]], T.eq(p, t))
        idx = T.inc_subtensor(idx[batch_range, t], 1)
        return (acc, idx)
    (raw_accuracy, _), _ = theano.foldl(step_, sequences=[predictions.dimshuffle(1, 0), \
        targets.dimshuffle(1, 0)], outputs_info=[accuracy_0, indices_0])
    accuracy = T.mean(raw_accuracy / nb_classes, axis=0)

    return accuracy 
Example #3
Source File: cov.py    From kusanagi with MIT License 6 votes vote down vote up
def Noise(hyp, X1, X2=None, all_pairs=True):
    ''' Noise kernel. Takes as an input a distance matrix D
    and creates a new matrix as Kij = sn2 if Dij == 0 else 0'''
    if X2 is None:
        X2 = X1

    sn2 = hyp**2
    if all_pairs and X1 is X2:
        # D = (X1[:,None,:] - X2[None,:,:]).sum(2)
        K = tt.eye(X1.shape[0])*sn2
        return K
    else:
        # D = (X1 - X2).sum(1)
        if X1 is X2:
            K = tt.ones((X1.shape[0],))*sn2
        else:
            K = 0
        return K

    # K = tt.eq(D,0)*sn2
    # return K 
Example #4
Source File: noise.py    From OpenDeep with Apache License 2.0 6 votes vote down vote up
def salt_and_pepper(input, noise_level=0.2, mrg=None):
    """
    This applies salt and pepper noise to the input tensor - randomly setting bits to 1 or 0.

    Parameters
    ----------
    input : tensor
        The tensor to apply salt and pepper noise to.
    noise_level : float
        The amount of salt and pepper noise to add.
    mrg : random
        Random number generator with .binomial method.

    Returns
    -------
    tensor
        Tensor with salt and pepper noise applied.
    """
    if mrg is None:
        mrg = theano_random
    # salt and pepper noise
    a = mrg.binomial(size=input.shape, n=1, p=(1 - noise_level), dtype=theano.config.floatX)
    b = mrg.binomial(size=input.shape, n=1, p=0.5, dtype=theano.config.floatX)
    c = T.eq(a, 0) * b
    return input * a + c 
Example #5
Source File: layers.py    From gated_word_char_rlm with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def gate_layer(tparams, X_word, X_char, options, prefix, pretrain_mode, activ='lambda x: x', **kwargs):
    """ 
    compute the forward pass for a gate layer

    Parameters
    ----------
    tparams        : OrderedDict of theano shared variables, {parameter name: value}
    X_word         : theano 3d tensor, word input, dimensions: (num of time steps, batch size, dim of vector)
    X_char         : theano 3d tensor, char input, dimensions: (num of time steps, batch size, dim of vector)
    options        : dictionary, {hyperparameter: value}
    prefix         : string, layer name
    pretrain_mode  : theano shared scalar, 0. = word only, 1. = char only, 2. = word & char
    activ          : string, activation function: 'liner', 'tanh', or 'rectifier'

    Returns
    -------
    X              : theano 3d tensor, final vector, dimensions: (num of time steps, batch size, dim of vector)

    """      
    # compute gating values, Eq.(3)
    G = tensor.nnet.sigmoid(tensor.dot(X_word, tparams[p_name(prefix, 'v')]) + tparams[p_name(prefix, 'b')][0])
    X = ifelse(tensor.le(pretrain_mode, numpy.float32(1.)),  
               ifelse(tensor.eq(pretrain_mode, numpy.float32(0.)), X_word, X_char),
               G[:, :, None] * X_char + (1. - G)[:, :, None] * X_word)   
    return eval(activ)(X) 
Example #6
Source File: layers.py    From gated_word_char_rlm with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def concat_layer(tparams, X_word, X_char, options, prefix, pretrain_mode, activ='lambda x: x', **kwargs):
    """ 
    compute the forward pass for a concat layer

    Parameters
    ----------
    tparams        : OrderedDict of theano shared variables, {parameter name: value}
    X_word         : theano 3d tensor, word input, dimensions: (num of time steps, batch size, dim of vector)
    X_char         : theano 3d tensor, char input, dimensions: (num of time steps, batch size, dim of vector)
    options        : dictionary, {hyperparameter: value}
    prefix         : string,  layer name
    pretrain_mode  : theano shared scalar, 0. = word only, 1. = char only, 2. = word & char
    activ          : string, activation function: 'liner', 'tanh', or 'rectifier'

    Returns
    -------
    X              : theano 3d tensor, final vector, dimensions: (num of time steps, batch size, dim of vector)

    """
    X = ifelse(tensor.le(pretrain_mode, numpy.float32(1.)),
               ifelse(tensor.eq(pretrain_mode, numpy.float32(0.)), X_word, X_char),
               tensor.dot(tensor.concatenate([X_word, X_char], axis=2), tparams[p_name(prefix, 'W')]) + tparams[p_name(prefix, 'b')]) 
    return eval(activ)(X) 
Example #7
Source File: DropoutHiddenLayer.py    From SurvivalNet with Apache License 2.0 6 votes vote down vote up
def __init__(self, rng, input, n_in, n_out, is_train,
                 activation, dropout_rate, mask=None, W=None, b=None):
        super(DropoutHiddenLayer, self).__init__(
                rng=rng, input=input, n_in=n_in, n_out=n_out, W=W, b=b,
                activation=activation)

        self.dropout_rate = dropout_rate
        self.srng = T.shared_randomstreams.RandomStreams(rng.randint(999999))
        self.mask = mask
        self.layer = self.output

        # Computes outputs for train and test phase applying dropout when needed.
        train_output = self.layer * T.cast(self.mask, theano.config.floatX)
        test_output = self.output * (1 - dropout_rate)
        self.output = ifelse(T.eq(is_train, 1), train_output, test_output)
        return 
Example #8
Source File: theanobigramoptimizer.py    From theanolm with Apache License 2.0 6 votes vote down vote up
def _create_class_size_function(self):
        """Creates a function that calculates the number of words in a class.

        :type class_id: int
        :param class_id: ID of a class

        :rtype: int
        :returns: number of words in the class
        """

        class_id = tensor.scalar('class_id', dtype=self._count_type)
        class_id.tag.test_value = 0

        result = tensor.eq(self._word_to_class, class_id).sum()

        self._class_size = theano.function(
            [class_id],
            result,
            name='class_size') 
Example #9
Source File: DropoutHiddenLayer.py    From Deep-Neural-Networks-HealthCare with MIT License 6 votes vote down vote up
def __init__(self, rng, input, n_in, n_out, is_train,
                 activation, dropout_rate, mask=None, W=None, b=None):
        super(DropoutHiddenLayer, self).__init__(
                rng=rng, input=input, n_in=n_in, n_out=n_out, W=W, b=b,
                activation=activation)

        self.dropout_rate = dropout_rate
        self.srng = T.shared_randomstreams.RandomStreams(rng.randint(999999))
        self.mask = mask
        self.layer = self.output

        # Computes outputs for train and test phase applying dropout when needed.
        train_output = self.layer * T.cast(self.mask, theano.config.floatX)
        test_output = self.output * (1 - dropout_rate)
        self.output = ifelse(T.eq(is_train, 1), train_output, test_output)
        return 
Example #10
Source File: servoing_policy.py    From visual_dynamics with MIT License 6 votes vote down vote up
def _get_jac_vars(self):
        if not self.predictor.feature_jacobian_name:
            raise NotImplementedError

        X_var, U_var, X_target_var, U_lin_var, alpha_var = self.input_vars

        names = [self.predictor.feature_name, self.predictor.feature_jacobian_name, self.predictor.next_feature_name]
        vars_ = L.get_output([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(names)], deterministic=True)
        feature_vars, jac_vars, next_feature_vars = iter_util.unflatten_tree(names, vars_)

        y_vars = [T.flatten(feature_var, outdim=2) for feature_var in feature_vars]
        y_target_vars = [theano.clone(y_var, replace={X_var: X_target_var}) for y_var in y_vars]
        y_target_vars = [theano.ifelse.ifelse(T.eq(alpha_var, 1.0),
                                              y_target_var,
                                              alpha_var * y_target_var + (1 - alpha_var) * y_var)
                         for (y_var, y_target_var) in zip(y_vars, y_target_vars)]

        jac_vars = [theano.clone(jac_var, replace={U_var: U_lin_var}) for jac_var in jac_vars]
        return jac_vars 
Example #11
Source File: servoing_policy.py    From visual_dynamics with MIT License 6 votes vote down vote up
def _get_jac_z_vars(self):
        if not self.predictor.feature_jacobian_name:
            raise NotImplementedError

        X_var, U_var, X_target_var, U_lin_var, alpha_var = self.input_vars

        names = [self.predictor.feature_name, self.predictor.feature_jacobian_name, self.predictor.next_feature_name]
        vars_ = L.get_output([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(names)], deterministic=True)
        feature_vars, jac_vars, next_feature_vars = iter_util.unflatten_tree(names, vars_)

        y_vars = [T.flatten(feature_var, outdim=2) for feature_var in feature_vars]
        y_target_vars = [theano.clone(y_var, replace={X_var: X_target_var}) for y_var in y_vars]
        y_target_vars = [theano.ifelse.ifelse(T.eq(alpha_var, 1.0),
                                              y_target_var,
                                              alpha_var * y_target_var + (1 - alpha_var) * y_var)
                         for (y_var, y_target_var) in zip(y_vars, y_target_vars)]

        jac_vars = [theano.clone(jac_var, replace={U_var: U_lin_var}) for jac_var in jac_vars]
        y_next_pred_vars = [T.flatten(next_feature_var, outdim=2) for next_feature_var in next_feature_vars]
        y_next_pred_vars = [theano.clone(y_next_pred_var, replace={U_var: U_lin_var}) for y_next_pred_var in y_next_pred_vars]

        z_vars = [y_target_var - y_next_pred_var + T.batched_tensordot(jac_var, U_lin_var, axes=(2, 1))
                  for (y_target_var, y_next_pred_var, jac_var) in zip(y_target_vars, y_next_pred_vars, jac_vars)]
        return jac_vars, z_vars 
Example #12
Source File: emb_cos_autoenc_cos_en2it.py    From clweadv with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, embedding_dim=100, num_hidden_layers=2, hidden_dim=200, in_dropout_p=0.2, hidden_dropout_p=0.5, update_hyperparams={'learning_rate': 0.01}):
		self.embedding_dim = embedding_dim
		self.num_hidden_layers = num_hidden_layers
		self.hidden_dim = hidden_dim
		self.in_dropout_p = in_dropout_p
		self.hidden_dropout_p = update_hyperparams
	
		print >> sys.stderr, 'Building computation graph for discriminator...'		
		self.input_var = T.matrix('input')
		self.target_var = T.matrix('targer')

		self.l_in = lasagne.layers.InputLayer(shape=(None, self.embedding_dim), input_var=T.tanh(self.input_var), name='l_in')
		self.l_in_dr = lasagne.layers.DropoutLayer(self.l_in, 0.2)
		self.layers = [self.l_in, self.l_in_dr]
		for i in xrange(self.num_hidden_layers):
			l_hid = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(self.layers[-1], num_units=self.hidden_dim, nonlinearity=lasagne.nonlinearities.leaky_rectify, W=lasagne.init.GlorotUniform(gain=leaky_relu_gain), name=('l_hid_%s' % i)))
			l_hid_dr = lasagne.layers.DropoutLayer(l_hid, 0.5)
			self.layers.append(l_hid)
			self.layers.append(l_hid_dr)
		self.l_preout = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(self.layers[-1], num_units=1, nonlinearity=None, name='l_preout'))
		self.l_out = lasagne.layers.NonlinearityLayer(self.l_preout, nonlinearity=lasagne.nonlinearities.sigmoid, name='l_out')

		self.prediction = lasagne.layers.get_output(self.l_out)
		self.loss = lasagne.objectives.binary_crossentropy(self.prediction, self.target_var).mean()
		self.accuracy = T.eq(T.ge(self.prediction, 0.5), self.target_var).mean()

		self.params = lasagne.layers.get_all_params(self.l_out, trainable=True)
		self.updates = lasagne.updates.adam(self.loss, self.params, **update_hyperparams)

		print >> sys.stderr, 'Compiling discriminator...'
		self.train_fn = theano.function([self.input_var, self.target_var], [self.loss, self.accuracy], updates=self.updates)
		self.eval_fn = theano.function([self.input_var, self.target_var], [self.loss, self.accuracy])

#discriminator_0 = Discriminator(d, DISCR_NUM_HIDDEN_LAYERS, DISCR_NUM_HIDDEN_LAYERS)
#discriminator_1 = Discriminator(d, DISCR_NUM_HIDDEN_LAYERS, DISCR_NUM_HIDDEN_LAYERS) 
Example #13
Source File: classifier_agent.py    From TextDetector with GNU General Public License v3.0 5 votes vote down vote up
def get_decide_func(self):
        """
        Returns a theano function that takes a minibatch
        (num_examples, num_features) of contexts and returns
        a minibatch (num_examples, num_classes) of one-hot codes
        for actions.
        """

        X = T.matrix()
        y_hat = self.mlp.fprop(X)

        theano_rng = make_theano_rng(None, 2013+11+20, which_method="multinomial")
        if self.stochastic:
            a = theano_rng.multinomial(pvals=y_hat, dtype='float32')
        else:
            mx = T.max(y_hat, axis=1).dimshuffle(0, 'x')
            a = T.eq(y_hat, mx)

        if self.epsilon is not None:
            a = theano_rng.multinomial(pvals = (1. - self.epsilon) * a +
                    self.epsilon * T.ones_like(y_hat) / y_hat.shape[1],
                    dtype = 'float32')

        if self.epsilon_stochastic is not None:
            a = theano_rng.multinomial(pvals = (1. - self.epsilon_stochastic) * a +
                    self.epsilon_stochastic * y_hat,
                    dtype = 'float32')

        logger.info("Compiling classifier agent learning function")
        t1 = time.time()
        f = function([X], a)
        t2 = time.time()

        logger.info("...done, took {0}".format(t2 - t1))

        return f 
Example #14
Source File: emb_adversarial_cos_autoenc_cos_en2it.py    From clweadv with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, embedding_dim=100, num_hidden_layers=2, hidden_dim=200, in_dropout_p=0.2, hidden_dropout_p=0.5, update_hyperparams={'learning_rate': 0.01}):
		self.embedding_dim = embedding_dim
		self.num_hidden_layers = num_hidden_layers
		self.hidden_dim = hidden_dim
		self.in_dropout_p = in_dropout_p
		self.hidden_dropout_p = update_hyperparams
	
		print >> sys.stderr, 'Building computation graph for discriminator...'		
		self.input_var = T.matrix('input')
		self.target_var = T.matrix('targer')

		self.l_in = lasagne.layers.InputLayer(shape=(None, self.embedding_dim), input_var=T.tanh(self.input_var), name='l_in')
		self.l_in_dr = lasagne.layers.DropoutLayer(self.l_in, 0.2)
		self.layers = [self.l_in, self.l_in_dr]
		for i in xrange(self.num_hidden_layers):
			l_hid = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(self.layers[-1], num_units=self.hidden_dim, nonlinearity=lasagne.nonlinearities.leaky_rectify, W=lasagne.init.GlorotUniform(gain=leaky_relu_gain), name=('l_hid_%s' % i)))
			l_hid_dr = lasagne.layers.DropoutLayer(l_hid, 0.5)
			self.layers.append(l_hid)
			self.layers.append(l_hid_dr)
		self.l_preout = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(self.layers[-1], num_units=1, nonlinearity=None, name='l_preout'))
		self.l_out = lasagne.layers.NonlinearityLayer(self.l_preout, nonlinearity=lasagne.nonlinearities.sigmoid, name='l_out')

		self.prediction = lasagne.layers.get_output(self.l_out)
		self.loss = lasagne.objectives.binary_crossentropy(self.prediction, self.target_var).mean()
		self.accuracy = T.eq(T.ge(self.prediction, 0.5), self.target_var).mean()

		self.params = lasagne.layers.get_all_params(self.l_out, trainable=True)
		self.updates = lasagne.updates.adam(self.loss, self.params, **update_hyperparams)

		print >> sys.stderr, 'Compiling discriminator...'
		self.train_fn = theano.function([self.input_var, self.target_var], [self.loss, self.accuracy], updates=self.updates)
		self.eval_fn = theano.function([self.input_var, self.target_var], [self.loss, self.accuracy]) 
Example #15
Source File: emb_adversarial_cosfeats_autoenc_cos_en2it.py    From clweadv with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, embedding_dim=100, num_hidden_layers=2, hidden_dim=200, in_dropout_p=0.2, hidden_dropout_p=0.5, update_hyperparams={'learning_rate': 0.01}):
		self.embedding_dim = embedding_dim
		self.num_hidden_layers = num_hidden_layers
		self.hidden_dim = hidden_dim
		self.in_dropout_p = in_dropout_p
		self.hidden_dropout_p = update_hyperparams
	
		print >> sys.stderr, 'Building computation graph for discriminator...'		
		self.input_var = T.matrix('input')
		self.input_var_extra = T.matrix('input_extra')
		self.target_var = T.matrix('target')

		self.cos_feats = cosine_sim(self.input_var, T.repeat(self.input_var_extra, 2, axis=0)).reshape((-1, 1))
		self.total_input = T.concatenate([self.input_var, self.cos_feats], axis=1)

		self.l_in = lasagne.layers.InputLayer(shape=(None, self.embedding_dim+1), input_var=self.total_input, name='l_in')
		self.l_in_dr = lasagne.layers.DropoutLayer(self.l_in, 0.2)
		self.layers = [self.l_in, self.l_in_dr]
		for i in xrange(self.num_hidden_layers):
			l_hid = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(self.layers[-1], num_units=self.hidden_dim, nonlinearity=lasagne.nonlinearities.leaky_rectify, W=lasagne.init.GlorotUniform(gain=leaky_relu_gain), name=('l_hid_%s' % i)))
			l_hid_dr = lasagne.layers.DropoutLayer(l_hid, 0.5)
			self.layers.append(l_hid)
			self.layers.append(l_hid_dr)
		self.l_preout = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(self.layers[-1], num_units=1, nonlinearity=None, name='l_preout'))
		self.l_out = lasagne.layers.NonlinearityLayer(self.l_preout, nonlinearity=lasagne.nonlinearities.sigmoid, name='l_out')

		self.prediction = lasagne.layers.get_output(self.l_out)
		self.loss = lasagne.objectives.binary_crossentropy(self.prediction, self.target_var).mean()
		self.accuracy = T.eq(T.ge(self.prediction, 0.5), self.target_var).mean()

		self.params = lasagne.layers.get_all_params(self.l_out, trainable=True)
		self.updates = lasagne.updates.adam(self.loss, self.params, **update_hyperparams)

		print >> sys.stderr, 'Compiling discriminator...'
		self.train_fn = theano.function([self.input_var, self.input_var_extra, self.target_var], [self.loss, self.accuracy], updates=self.updates)
		self.eval_fn = theano.function([self.input_var, self.input_var_extra, self.target_var], [self.loss, self.accuracy]) 
Example #16
Source File: linesearch.py    From TextDetector with GNU General Public License v3.0 5 votes vote down vote up
def my_not(arg):
    """
    .. todo::

        WRITEME
    """
    return TT.eq(arg, zero) 
Example #17
Source File: channel_out.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def activation(self, network, in_vw):
        # NOTE: mostly copied from FeaturePoolNode
        k = network.find_hyperparameter(["num_pieces"])
        axis = network.find_hyperparameter(
            ["feature_pool_axis",
             "axis"],
            # by default, the first non-batch axis
            treeano.utils.nth_non_batch_axis(network, 0))

        # shape calculation
        in_shape = in_vw.shape
        in_features = in_shape[axis]
        assert (in_features % k) == 0
        out_shape = list(in_shape)
        out_shape[axis] = in_shape[axis] // k
        out_shape = tuple(out_shape)

        # calculate indices of maximum activation
        in_var = in_vw.variable
        symbolic_shape = in_vw.symbolic_shape()
        new_symbolic_shape = (symbolic_shape[:axis]
                              + (out_shape[axis], k) +
                              symbolic_shape[axis + 1:])
        reshaped = in_var.reshape(new_symbolic_shape)
        if True:
            # this implementation seems to be slightly faster
            maxed = T.max(reshaped, axis=axis + 1, keepdims=True)

            mask = T.eq(maxed, reshaped).reshape(symbolic_shape)
        else:
            max_idxs = T.argmax(reshaped, axis=axis + 1, keepdims=True)

            # calculate indices of each unit
            arange_pattern = ["x"] * (in_vw.ndim + 1)
            arange_pattern[axis + 1] = 0
            idxs = T.arange(k).dimshuffle(tuple(arange_pattern))

            mask = T.eq(max_idxs, idxs).reshape(symbolic_shape)
        return in_vw.variable * mask 
Example #18
Source File: channel_out.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def activation(self, network, in_vw):
        # NOTE: mostly copied from FeaturePoolNode
        k = network.find_hyperparameter(["num_pieces"])
        axis = network.find_hyperparameter(
            ["feature_pool_axis",
             "axis"],
            # by default, the first non-batch axis
            treeano.utils.nth_non_batch_axis(network, 0))

        # shape calculation
        in_shape = in_vw.shape
        in_features = in_shape[axis]
        assert (in_features % k) == 0
        out_shape = list(in_shape)
        out_shape[axis] = in_shape[axis] // k
        out_shape = tuple(out_shape)

        # calculate indices of maximum activation
        in_var = in_vw.variable
        symbolic_shape = in_vw.symbolic_shape()
        new_symbolic_shape = (symbolic_shape[:axis]
                              + (out_shape[axis], k) +
                              symbolic_shape[axis + 1:])
        reshaped = in_var.reshape(new_symbolic_shape)
        if True:
            # this implementation seems to be slightly faster
            maxed = T.max(reshaped, axis=axis + 1, keepdims=True)

            mask = T.eq(maxed, reshaped).reshape(symbolic_shape)
        else:
            max_idxs = T.argmax(reshaped, axis=axis + 1, keepdims=True)

            # calculate indices of each unit
            arange_pattern = ["x"] * (in_vw.ndim + 1)
            arange_pattern[axis + 1] = 0
            idxs = T.arange(k).dimshuffle(tuple(arange_pattern))

            mask = T.eq(max_idxs, idxs).reshape(symbolic_shape)
        return in_vw.variable * mask 
Example #19
Source File: FixedEmbedding.py    From deeplearning4nlp-tutorial with Apache License 2.0 5 votes vote down vote up
def get_output_mask(self, train=None):
        X = self.get_input(train)
        if not self.mask_zero:
            return None
        else:
            return T.ones_like(X) * (1 - T.eq(X, 0)) 
Example #20
Source File: linesearch.py    From TextDetector with GNU General Public License v3.0 5 votes vote down vote up
def lazy_and(name='node', *args):
    """
    .. todo::

        WRITEME
    """
    def apply_me(args):
        if len(args) == 1:
            return args[0]
        else:
            rval = ifelse(TT.eq(args[0], zero), false, apply_me(args[1:]),
                         name=name + str(len(args)))
            return rval
    return apply_me(args) 
Example #21
Source File: emb_lin_adversarial_cos_autoenc_cos_en2it.py    From clweadv with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, embedding_dim=100, num_hidden_layers=2, hidden_dim=200, in_dropout_p=0.2, hidden_dropout_p=0.5, update_hyperparams={'learning_rate': 0.01}):
		self.embedding_dim = embedding_dim
		self.num_hidden_layers = num_hidden_layers
		self.hidden_dim = hidden_dim
		self.in_dropout_p = in_dropout_p
		self.hidden_dropout_p = update_hyperparams
	
		print >> sys.stderr, 'Building computation graph for discriminator...'		
		self.input_var = T.matrix('input')
		self.target_var = T.matrix('targer')

		self.l_in = lasagne.layers.InputLayer(shape=(None, self.embedding_dim), input_var=T.tanh(self.input_var), name='l_in')
		self.l_in_dr = lasagne.layers.DropoutLayer(self.l_in, 0.2)
		self.layers = [self.l_in, self.l_in_dr]
		for i in xrange(self.num_hidden_layers):
			l_hid = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(self.layers[-1], num_units=self.hidden_dim, nonlinearity=lasagne.nonlinearities.leaky_rectify, W=lasagne.init.GlorotUniform(gain=leaky_relu_gain), name=('l_hid_%s' % i)))
			l_hid_dr = lasagne.layers.DropoutLayer(l_hid, 0.5)
			self.layers.append(l_hid)
			self.layers.append(l_hid_dr)
		self.l_preout = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(self.layers[-1], num_units=1, nonlinearity=None, name='l_preout'))
		self.l_out = lasagne.layers.NonlinearityLayer(self.l_preout, nonlinearity=lasagne.nonlinearities.sigmoid, name='l_out')

		self.prediction = lasagne.layers.get_output(self.l_out)
		self.loss = lasagne.objectives.binary_crossentropy(self.prediction, self.target_var).mean()
		self.accuracy = T.eq(T.ge(self.prediction, 0.5), self.target_var).mean()

		self.params = lasagne.layers.get_all_params(self.l_out, trainable=True)
		self.updates = lasagne.updates.adam(self.loss, self.params, **update_hyperparams)

		print >> sys.stderr, 'Compiling discriminator...'
		self.train_fn = theano.function([self.input_var, self.target_var], [self.loss, self.accuracy], updates=self.updates)
		self.eval_fn = theano.function([self.input_var, self.target_var], [self.loss, self.accuracy]) 
Example #22
Source File: emb_multidr_adversarial_resnet_cos_autoenc_cos_en2it.py    From clweadv with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, embedding_dim=100, num_hidden_layers=2, hidden_dim=200, in_dropout_p=0.2, hidden_dropout_p=0.5, hidden2out_dropout_p=0.5, update_hyperparams={'learning_rate': 0.01}):
		self.embedding_dim = embedding_dim
		self.num_hidden_layers = num_hidden_layers
		self.hidden_dim = hidden_dim
		self.in_dropout_p = in_dropout_p
		self.hidden_dropout_p = hidden_dropout_p
		self.hidden2out_dropout_p = hidden2out_dropout_p
		self.update_hyperparameters = update_hyperparams
	
		print >> sys.stderr, 'Building computation graph for discriminator...'		
		self.input_var = T.matrix('input')
		self.target_var = T.matrix('targer')

		self.l_in = lasagne.layers.InputLayer(shape=(None, self.embedding_dim), input_var=T.tanh(self.input_var), name='l_in')
		self.l_in_dr = lasagne.layers.DropoutLayer(self.l_in, self.in_dropout_p)
		self.l_prehid = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(self.l_in_dr, num_units=self.hidden_dim, nonlinearity=lasagne.nonlinearities.leaky_rectify, W=lasagne.init.GlorotUniform(gain=leaky_relu_gain), name='l_prehid'))
		self.layers = [self.l_in, self.l_in_dr, self.l_prehid]
		for i in xrange(self.num_hidden_layers):
			l_hid_predr = lasagne.layers.DropoutLayer(self.layers[-1], self.hidden_dropout_p)
			l_hid = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(l_hid_predr, num_units=self.hidden_dim, nonlinearity=lasagne.nonlinearities.leaky_rectify, W=lasagne.init.GlorotUniform(gain=leaky_relu_gain), name=('l_hid_%s' % i)))
			l_hid_sum = lasagne.layers.ElemwiseSumLayer([self.layers[-1], l_hid])
			self.layers.append(l_hid_predr)
			self.layers.append(l_hid)
			self.layers.append(l_hid_sum)

		self.l_preout_predr = lasagne.layers.DropoutLayer(self.layers[-1], self.hidden2out_dropout_p)
		self.l_preout = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(self.l_preout_predr, num_units=1, nonlinearity=None, name='l_preout'))
		self.l_out = lasagne.layers.NonlinearityLayer(self.l_preout, nonlinearity=lasagne.nonlinearities.sigmoid, name='l_out')

		self.prediction = lasagne.layers.get_output(self.l_out)
		self.loss = lasagne.objectives.binary_crossentropy(self.prediction, self.target_var).mean()
		self.accuracy = T.eq(T.ge(self.prediction, 0.5), self.target_var).mean()

		self.params = lasagne.layers.get_all_params(self.l_out, trainable=True)
		self.updates = lasagne.updates.adam(self.loss, self.params, **update_hyperparams)

		print >> sys.stderr, 'Compiling discriminator...'
		self.train_fn = theano.function([self.input_var, self.target_var], [self.loss, self.accuracy], updates=self.updates)
		self.eval_fn = theano.function([self.input_var, self.target_var], [self.loss, self.accuracy]) 
Example #23
Source File: emb_adversarial_autoenc_en2it.py    From clweadv with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, embedding_dim=100, num_hidden_layers=2, hidden_dim=200, in_dropout_p=0.2, hidden_dropout_p=0.5, update_hyperparams={'learning_rate': 0.01}):
		self.embedding_dim = embedding_dim
		self.num_hidden_layers = num_hidden_layers
		self.hidden_dim = hidden_dim
		self.in_dropout_p = in_dropout_p
		self.hidden_dropout_p = update_hyperparams
	
		print >> sys.stderr, 'Building computation graph for discriminator...'		
		self.input_var = T.matrix('input')
		self.target_var = T.matrix('targer')

		self.l_in = lasagne.layers.InputLayer(shape=(None, self.embedding_dim), input_var=T.tanh(self.input_var), name='l_in')
		self.l_in_dr = lasagne.layers.DropoutLayer(self.l_in, 0.2)
		self.layers = [self.l_in, self.l_in_dr]
		for i in xrange(self.num_hidden_layers):
			l_hid = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(self.layers[-1], num_units=self.hidden_dim, nonlinearity=lasagne.nonlinearities.leaky_rectify, W=lasagne.init.GlorotUniform(gain=leaky_relu_gain), name=('l_hid_%s' % i)))
			l_hid_dr = lasagne.layers.DropoutLayer(l_hid, 0.5)
			self.layers.append(l_hid)
			self.layers.append(l_hid_dr)
		self.l_preout = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(self.layers[-1], num_units=1, nonlinearity=None, name='l_preout'))
		self.l_out = lasagne.layers.NonlinearityLayer(self.l_preout, nonlinearity=lasagne.nonlinearities.sigmoid, name='l_out')

		self.prediction = lasagne.layers.get_output(self.l_out)
		self.loss = lasagne.objectives.binary_crossentropy(self.prediction, self.target_var).mean()
		self.accuracy = T.eq(T.ge(self.prediction, 0.5), self.target_var).mean()

		self.params = lasagne.layers.get_all_params(self.l_out, trainable=True)
		self.updates = lasagne.updates.adam(self.loss, self.params, **update_hyperparams)

		print >> sys.stderr, 'Compiling discriminator...'
		self.train_fn = theano.function([self.input_var, self.target_var], [self.loss, self.accuracy], updates=self.updates)
		self.eval_fn = theano.function([self.input_var, self.target_var], [self.loss, self.accuracy]) 
Example #24
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def equal(x, y):
    return T.eq(x, y) 
Example #25
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def equal(x, y):
    return T.eq(x, y) 
Example #26
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def equal(x, y):
    return T.eq(x, y) 
Example #27
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def equal(x, y):
    return T.eq(x, y) 
Example #28
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def equal(x, y):
    return T.eq(x, y) 
Example #29
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def equal(x, y):
    return T.eq(x, y) 
Example #30
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def equal(x, y):
    return T.eq(x, y)