Python lasagne.layers.get_output() Examples

The following are 30 code examples of lasagne.layers.get_output(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module lasagne.layers , or try the search function .
Example #1
Source File: test_layers_theano.py    From visual_dynamics with MIT License 6 votes vote down vote up
def test_conv2d(x_shape, num_filters, filter_size, flip_filters, batch_size=2):
    X_var = T.tensor4('X')
    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX)

    l_conv = L.Conv2DLayer(l_x, num_filters, filter_size=filter_size, stride=1, pad='same',
                           flip_filters=flip_filters, untie_biases=True, nonlinearity=None, b=None)
    conv_var = L.get_output(l_conv)
    conv_fn = theano.function([X_var], conv_var)
    tic()
    conv = conv_fn(X)
    toc("conv time for x_shape=%r, num_filters=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, filter_size, flip_filters, batch_size))

    tic()
    loop_conv = conv2d(X, l_conv.W.get_value(), flip_filters=flip_filters)
    toc("loop conv time for x_shape=%r, num_filters=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, filter_size, flip_filters, batch_size))

    assert np.allclose(conv, loop_conv, atol=1e-6) 
Example #2
Source File: lasagne_net.py    From BirdCLEF-Baseline with MIT License 6 votes vote down vote up
def test_function(net, hasTargets=True, layer_index=-1):    

    # We need the prediction function to calculate the validation accuracy
    # this way we can test the net during/after training
    # We need a version with targets and one without
    prediction = l.get_output(l.get_all_layers(net)[layer_index], deterministic=True)

    log.i("COMPILING TEST FUNCTION...", new_line=False)
    start = time.time()
    if hasTargets:
        # Theano variable for the class targets
        targets = T.matrix('targets', dtype=theano.config.floatX)
        
        loss = loss_function(net, prediction, targets)
        accuracy = accuracy_function(net, prediction, targets)
        
        test_net = theano.function([l.get_all_layers(net)[0].input_var, targets], [prediction, loss, accuracy], allow_input_downcast=True)

    else:
        test_net = theano.function([l.get_all_layers(net)[0].input_var], prediction, allow_input_downcast=True)
        
    log.i(("DONE! (", int(time.time() - start), "s )"))

    return test_net 
Example #3
Source File: lasagne_net.py    From BirdCLEF-Baseline with MIT License 6 votes vote down vote up
def train_function(net):

    # We use dynamic learning rates which change after some epochs
    lr_dynamic = T.scalar(name='learning_rate')

    # Theano variable for the class targets
    targets = T.matrix('targets', dtype=theano.config.floatX)

    # Get the network output
    prediction = l.get_output(net)
    
    # The theano train functions takes images and class targets as input
    log.i("COMPILING TRAIN FUNCTION...", new_line=False)
    start = time.time()
    loss = loss_function(net, prediction, targets)
    updates = net_updates(net, loss, lr_dynamic)
    train_net = theano.function([l.get_all_layers(net)[0].input_var, targets, lr_dynamic], loss, updates=updates, allow_input_downcast=True)
    log.i(("DONE! (", int(time.time() - start), "s )"))

    return train_net

################# PREDICTION FUNCTION #################### 
Example #4
Source File: init_policy.py    From pixelworld with MIT License 6 votes vote down vote up
def dist_info_sym(self, obs_var, state_info_vars):
        n_batches, n_steps = obs_var.shape[:2]
        obs_var = obs_var.reshape((n_batches, n_steps, -1))
        if self._state_include_action:
            prev_action_var = state_info_vars["prev_action"]
            all_input_var = TT.concatenate(
                [obs_var, prev_action_var],
                axis=2
            )
        else:
            all_input_var = obs_var
        return dict(
            prob=L.get_output(
                self._prob_network.output_layer,
                {self._prob_network.input_layer: all_input_var}
            )
        ) 
Example #5
Source File: neuralforestlayer.py    From ShallowNeuralDecisionForest with MIT License 6 votes vote down vote up
def __init__(self, incoming, depth, n_estimators, n_outputs, pi_iters, **kwargs):
        self._incoming = incoming
        self._depth = depth
        self._n_estimators = n_estimators
        self._n_outputs = n_outputs
        self._pi_iters = pi_iters
        super(NeuralForestLayer, self).__init__(incoming, **kwargs)

        pi_init = Constant(val=1.0 / n_outputs)(((1 << (depth - 1)) * n_estimators, n_outputs))
        pi_name = "%s.%s" % (self.name, 'pi') if self.name is not None else 'pi'
        self.pi = theano.shared(pi_init, name=pi_name)

        # what we want to do here is pi / pi.sum(axis=1)
        # to be safe, if certain rows only contain zeroes (for some pi all y's became 0),
        #     replace such row with 1/n_outputs
        sum_pi_over_y = self.pi.sum(axis=1).dimshuffle(0, 'x')
        all_0_y = T.eq(sum_pi_over_y, 0)
        norm_pi_body = (self.pi + all_0_y * (1.0 / n_outputs)) / (sum_pi_over_y + all_0_y)
        self.normalize_pi = theano.function([], [], updates=[(self.pi, norm_pi_body)])
        self.update_pi_one_iter = self.get_update_pi_one_iter_func()

        self.normalize_pi()

        t_input = T.matrix('t_input')
        self.f_leaf_proba = theano.function([t_input], self.get_probabilities_for(get_output(incoming, t_input))) 
Example #6
Source File: conv_sup_cc_lbp.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def make_training_functions(network, encode_layer, input_var, aug_var, fea_var, target_var):
    prediction = layers.get_output(network);
    loss = lasagne.objectives.binary_crossentropy(prediction, target_var).mean();

    params = layers.get_all_params(network, trainable=True);
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.0005, momentum=0.975);

    encode = lasagne.layers.get_output(encode_layer, deterministic=True);
    test_output = lasagne.layers.get_output(network, deterministic=True);
    test_output = lasagne.layers.get_output(network, deterministic=True);
    test_loss = lasagne.objectives.binary_crossentropy(test_output, target_var).mean();

    val_fn = theano.function([input_var, aug_var, fea_var, target_var], [test_loss, encode, test_output]);
    train_fn = theano.function([input_var, aug_var, fea_var, target_var], loss, updates=updates);

    return train_fn, val_fn; 
Example #7
Source File: conv_sup_cc.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def make_training_functions(network, encode_layer, input_var, aug_var, target_var):
    prediction = layers.get_output(network);
    loss = lasagne.objectives.binary_crossentropy(prediction, target_var).mean();

    params = layers.get_all_params(network, trainable=True);
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.0005, momentum=0.975);

    encode = lasagne.layers.get_output(encode_layer, deterministic=True);
    test_output = lasagne.layers.get_output(network, deterministic=True);
    test_output = lasagne.layers.get_output(network, deterministic=True);
    test_loss = lasagne.objectives.binary_crossentropy(test_output, target_var).mean();

    val_fn = theano.function([input_var, aug_var, target_var], [test_loss, encode, test_output]);
    train_fn = theano.function([input_var, aug_var, target_var], loss, updates=updates);

    return train_fn, val_fn; 
Example #8
Source File: net_theano.py    From visual_dynamics with MIT License 6 votes vote down vote up
def build_bilinear_net(input_shapes, X_var=None, U_var=None, X_diff_var=None, axis=1):
    x_shape, u_shape = input_shapes
    X_var = X_var or T.tensor4('X')
    U_var = U_var or T.matrix('U')
    X_diff_var = X_diff_var or T.tensor4('X_diff')
    X_next_var = X_var + X_diff_var

    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var)
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var)

    l_x_diff_pred = LT.BilinearLayer([l_x, l_u], axis=axis)
    l_x_next_pred = L.ElemwiseMergeLayer([l_x, l_x_diff_pred], T.add)
    l_y = L.flatten(l_x)
    l_y_diff_pred = L.flatten(l_x_diff_pred)

    X_next_pred_var = lasagne.layers.get_output(l_x_next_pred)
    loss = ((X_next_var - X_next_pred_var) ** 2).mean(axis=0).sum() / 2.

    net_name = 'BilinearNet'
    input_vars = OrderedDict([(var.name, var) for var in [X_var, U_var, X_diff_var]])
    pred_layers = OrderedDict([('y_diff_pred', l_y_diff_pred), ('y', l_y), ('x0_next_pred', l_x_next_pred)])
    return net_name, input_vars, pred_layers, loss 
Example #9
Source File: test_layers_theano.py    From visual_dynamics with MIT License 6 votes vote down vote up
def test_channelwise_locally_connected2d(x_shape, filter_size, flip_filters, batch_size=2):
    X_var = T.tensor4('X')
    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX)

    l_conv = LT.LocallyConnected2DLayer(l_x, x_shape[0], filter_size=filter_size, channelwise=True,
                                        stride=1, pad='same', flip_filters=flip_filters,
                                        untie_biases=True, nonlinearity=None, b=None)
    conv_var = L.get_output(l_conv)
    conv_fn = theano.function([X_var], conv_var)
    tic()
    conv = conv_fn(X)
    toc("channelwise locally connected time for x_shape=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, filter_size, flip_filters, batch_size))

    tic()
    loop_conv = channelwise_locally_connected2d(X, l_conv.W.get_value(), flip_filters=flip_filters)
    toc("loop channelwise locally connected time for x_shape=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, filter_size, flip_filters, batch_size))

    assert np.allclose(conv, loop_conv, atol=1e-7) 
Example #10
Source File: benchmark_imagenet.py    From convnet-benchmarks with MIT License 6 votes vote down vote up
def main():
    batch_size = args.batch_size
    print('Building model...')
    layer, input_var = build_model(batch_size=batch_size)
    labels_var = T.ivector('labels')
    output = get_output(layer)
    loss = T.nnet.categorical_crossentropy(
        T.nnet.softmax(output), labels_var).mean(
        dtype=theano.config.floatX)
    gradient = T.grad(loss, get_all_params(layer))

    print('Compiling theano functions...')
    forward_func = theano.function([input_var], output)
    full_func = theano.function([input_var, labels_var], gradient)
    print('Functions are compiled')

    images = np.random.rand(batch_size, 3, image_sz, image_sz).astype(np.float32)
    labels = np.random.randint(0, 1000, size=batch_size).astype(np.int32)

    time_theano_run(forward_func, [images], 'Forward')
    time_theano_run(full_func, [images, labels], 'Forward-Backward') 
Example #11
Source File: test_layers.py    From ntm-lasagne with MIT License 6 votes vote down vote up
def test_batch_size():
    input_var01, input_var16 = T.tensor3s('input01', 'input16')
    l_output01 = model(input_var01, batch_size=1)
    l_output16 = model(input_var16, batch_size=16)

    # Share the parameters for both models
    params01 = get_all_param_values(l_output01)
    set_all_param_values(l_output16, params01)

    posterior_fn01 = theano.function([input_var01], get_output(l_output01))
    posterior_fn16 = theano.function([input_var16], get_output(l_output16))

    example_input = np.random.rand(16, 30, 8)
    example_output16 = posterior_fn16(example_input)
    example_output01 = np.zeros_like(example_output16)

    for i in range(16):
        example_output01[i] = posterior_fn01(example_input[i][np.newaxis, :, :])

    assert example_output16.shape == (16, 30, 8)
    assert np.allclose(example_output16, example_output01, atol=1e-3) 
Example #12
Source File: servoing_policy.py    From visual_dynamics with MIT License 6 votes vote down vote up
def _get_jac_z_vars(self):
        if not self.predictor.feature_jacobian_name:
            raise NotImplementedError

        X_var, U_var, X_target_var, U_lin_var, alpha_var = self.input_vars

        names = [self.predictor.feature_name, self.predictor.feature_jacobian_name, self.predictor.next_feature_name]
        vars_ = L.get_output([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(names)], deterministic=True)
        feature_vars, jac_vars, next_feature_vars = iter_util.unflatten_tree(names, vars_)

        y_vars = [T.flatten(feature_var, outdim=2) for feature_var in feature_vars]
        y_target_vars = [theano.clone(y_var, replace={X_var: X_target_var}) for y_var in y_vars]
        y_target_vars = [theano.ifelse.ifelse(T.eq(alpha_var, 1.0),
                                              y_target_var,
                                              alpha_var * y_target_var + (1 - alpha_var) * y_var)
                         for (y_var, y_target_var) in zip(y_vars, y_target_vars)]

        jac_vars = [theano.clone(jac_var, replace={U_var: U_lin_var}) for jac_var in jac_vars]
        y_next_pred_vars = [T.flatten(next_feature_var, outdim=2) for next_feature_var in next_feature_vars]
        y_next_pred_vars = [theano.clone(y_next_pred_var, replace={U_var: U_lin_var}) for y_next_pred_var in y_next_pred_vars]

        z_vars = [y_target_var - y_next_pred_var + T.batched_tensordot(jac_var, U_lin_var, axes=(2, 1))
                  for (y_target_var, y_next_pred_var, jac_var) in zip(y_target_vars, y_next_pred_vars, jac_vars)]
        return jac_vars, z_vars 
Example #13
Source File: servoing_policy.py    From visual_dynamics with MIT License 6 votes vote down vote up
def _get_jac_vars(self):
        if not self.predictor.feature_jacobian_name:
            raise NotImplementedError

        X_var, U_var, X_target_var, U_lin_var, alpha_var = self.input_vars

        names = [self.predictor.feature_name, self.predictor.feature_jacobian_name, self.predictor.next_feature_name]
        vars_ = L.get_output([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(names)], deterministic=True)
        feature_vars, jac_vars, next_feature_vars = iter_util.unflatten_tree(names, vars_)

        y_vars = [T.flatten(feature_var, outdim=2) for feature_var in feature_vars]
        y_target_vars = [theano.clone(y_var, replace={X_var: X_target_var}) for y_var in y_vars]
        y_target_vars = [theano.ifelse.ifelse(T.eq(alpha_var, 1.0),
                                              y_target_var,
                                              alpha_var * y_target_var + (1 - alpha_var) * y_var)
                         for (y_var, y_target_var) in zip(y_vars, y_target_vars)]

        jac_vars = [theano.clone(jac_var, replace={U_var: U_lin_var}) for jac_var in jac_vars]
        return jac_vars 
Example #14
Source File: snn_mlp_policy.py    From snn4hrl with MIT License 6 votes vote down vote up
def dist_info_sym(self, obs_var, latent_var=None):  # this is ment to be for one path!
        # now this is not doing anything! And for computing the dist_info_vars of npo_snn_rewardMI it doesn't work
        if latent_var is None:
            latent_var1 = theano.shared(np.expand_dims(self.latent_fix, axis=0))  # new fix to avoid putting the latent as an input: just take the one fixed!
            latent_var = TT.tile(latent_var1, [obs_var.shape[0], 1])

        # generate the generalized input (append latents to obs.)
        if self.bilinear_integration:
            extended_obs_var = TT.concatenate([obs_var, latent_var,
                                               TT.flatten(obs_var[:, :, np.newaxis] * latent_var[:, np.newaxis, :],
                                                          outdim=2)]
                                              , axis=1)
        else:
            extended_obs_var = TT.concatenate([obs_var, latent_var], axis=1)
        mean_var, log_std_var = L.get_output([self._l_mean, self._l_log_std], extended_obs_var)
        if self.min_std is not None:
            log_std_var = TT.maximum(log_std_var, np.log(self.min_std))
        return dict(mean=mean_var, log_std=log_std_var) 
Example #15
Source File: theano_funcs.py    From adversarial-autoencoder with MIT License 6 votes vote down vote up
def create_decoder_func(layers):
    Z = T.fmatrix('Z')
    Z_batch = T.fmatrix('Z_batch')

    X = get_output(
        layers['l_decoder_out'],
        inputs={
            layers['l_encoder_out']: Z
        },
        deterministic=True
    )

    decoder_func = theano.function(
        inputs=[theano.In(Z_batch)],
        outputs=X,
        givens={
            Z: Z_batch,
        },
    )

    return decoder_func


# forward/backward (optional) pass for the encoder/decoder pair 
Example #16
Source File: theano_funcs.py    From adversarial-autoencoder with MIT License 6 votes vote down vote up
def create_encoder_func(layers):
    X = T.fmatrix('X')
    X_batch = T.fmatrix('X_batch')

    Z = get_output(layers['l_encoder_out'], X, deterministic=True)

    encoder_func = theano.function(
        inputs=[theano.In(X_batch)],
        outputs=Z,
        givens={
            X: X_batch,
        },
    )

    return encoder_func


# forward pass for the decoder, p(x|z) 
Example #17
Source File: WordDropout.py    From neural-dep-srl with Apache License 2.0 5 votes vote down vote up
def __init__(self, incoming, w_freq, alpha, shared_axes=(),
                 **kwargs):
        super(WordDropoutLayer, self).__init__(incoming, **kwargs)
        self._srng = RandomStreams(get_rng().randint(1, 2147462579))
        self.w_frew = w_freq
        self.alpha = alpha
        # self.retain = lo(alpha)/(lo(p)+lo(alpha))
        self.retain = T.constant(1.)-(T.constant(alpha) / (lo(w_freq) + T.constant(alpha)))
        self.shared_axes = tuple(shared_axes) 
Example #18
Source File: conv_sup_cc.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def make_training_functions(network, encode_layer, input_var, aug_var, target_var):
    prediction = layers.get_output(network);
    loss = lasagne.objectives.binary_crossentropy(prediction, target_var).mean();

    params = layers.get_all_params(network, trainable=True);
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.0005, momentum=0.975);

    encode = lasagne.layers.get_output(encode_layer, deterministic=True);
    test_output = lasagne.layers.get_output(network, deterministic=True);
    test_loss = lasagne.objectives.binary_crossentropy(test_output, target_var).mean();

    val_fn = theano.function([input_var, aug_var, target_var], [test_loss, encode, test_output]);
    train_fn = theano.function([input_var, aug_var, target_var], loss, updates=updates);

    return train_fn, val_fn; 
Example #19
Source File: conv_sup_cc_lbp.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def make_training_functions(network, encode_layer, input_var, aug_var, fea_var, target_var):
    prediction = layers.get_output(network);
    loss = lasagne.objectives.binary_crossentropy(prediction, target_var).mean();

    params = layers.get_all_params(network, trainable=True);
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.0005, momentum=0.975);

    encode = lasagne.layers.get_output(encode_layer, deterministic=True);
    test_output = lasagne.layers.get_output(network, deterministic=True);
    test_loss = lasagne.objectives.binary_crossentropy(test_output, target_var).mean();

    val_fn = theano.function([input_var, aug_var, fea_var, target_var], [test_loss, encode, test_output]);
    train_fn = theano.function([input_var, aug_var, fea_var, target_var], loss, updates=updates);

    return train_fn, val_fn; 
Example #20
Source File: test_layers.py    From ntm-lasagne with MIT License 5 votes vote down vote up
def test_batch_size_none():
    input_var = T.tensor3('input')
    l_output = model(input_var, batch_size=None)
    posterior_fn = theano.function([input_var], get_output(l_output))

    example_input = np.random.rand(16, 30, 8)
    example_output = posterior_fn(example_input)

    assert example_output.shape == (16, 30, 8) 
Example #21
Source File: birdCLEF_evaluate.py    From BirdCLEF2017 with MIT License 5 votes vote down vote up
def getPredictionFuntion(net):
    net_output = l.get_output(net, deterministic=True)

    print "COMPILING THEANO TEST FUNCTION...",
    start = time.time()
    test_net = theano.function([l.get_all_layers(net)[0].input_var], net_output, allow_input_downcast=True)
    print "DONE! (", int(time.time() - start), "s )"

    return test_net

################# PREDICTION POOLING #################### 
Example #22
Source File: models.py    From drmad with MIT License 5 votes vote down vote up
def __init__(self, x, y, args):
        self.params_theta = []
        self.params_lambda = []
        self.params_weight = []
        if args.dataset == 'mnist':
            input_size = (None, 1, 28, 28)
        elif args.dataset == 'cifar10':
            input_size = (None, 3, 32, 32)
        else:
            raise AssertionError
        layers = [ll.InputLayer(input_size)]
        self.penalty = theano.shared(np.array(0.))

        #conv1
        layers.append(Conv2DLayerWithReg(args, layers[-1], 20, 5))
        self.add_params_to_self(args, layers[-1])
        layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2))
        #conv1
        layers.append(Conv2DLayerWithReg(args, layers[-1], 50, 5))
        self.add_params_to_self(args, layers[-1])
        layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2))
        #fc1
        layers.append(DenseLayerWithReg(args, layers[-1], num_units=500))
        self.add_params_to_self(args, layers[-1])
        #softmax
        layers.append(DenseLayerWithReg(args, layers[-1], num_units=10, nonlinearity=nonlinearities.softmax))
        self.add_params_to_self(args, layers[-1])

        self.layers = layers
        self.y = ll.get_output(layers[-1], x, deterministic=False)
        self.prediction = T.argmax(self.y, axis=1)
        # self.penalty = penalty if penalty != 0. else T.constant(0.)
        print(self.params_lambda)
        # time.sleep(20)
        # cost function
        self.loss = T.mean(categorical_crossentropy(self.y, y))
        self.lossWithPenalty = T.add(self.loss, self.penalty)
        print "loss and losswithpenalty", type(self.loss), type(self.lossWithPenalty) 
Example #23
Source File: birdCLEF_test.py    From BirdCLEF2017 with MIT License 5 votes vote down vote up
def getPredictionFuntion(net):
    net_output = l.get_output(net, deterministic=True)

    print "COMPILING THEANO TEST FUNCTION...",
    start = time.time()
    test_net = theano.function([l.get_all_layers(NET)[0].input_var], net_output, allow_input_downcast=True)
    print "DONE! (", int(time.time() - start), "s )"

    return test_net 
Example #24
Source File: WordDropout.py    From neural-dep-srl with Apache License 2.0 5 votes vote down vote up
def __init__(self, incoming, previous_mask, p=0.5, rescale=False, shared_axes=(),
                 **kwargs):
        super(ConditionedWordDropoutLayer, self).__init__(incoming, **kwargs)
        self._srng = RandomStreams(get_rng().randint(1, 2147462579))
        self.p = p
        self.rescale = rescale
        # self.retain = lo(alpha)/(lo(p)+lo(alpha))
        self.retain = T.constant(1) - p
        self.previous_mask = -(lo(previous_mask)-T.constant(1))
        self.shared_axes = tuple(shared_axes) 
Example #25
Source File: model.py    From BirdNET with MIT License 5 votes vote down vote up
def test_function(net, layer_index=-1):

    log.p('COMPILING THEANO TEST FUNCTION FUNCTION...', new_line=False)    

    prediction = l.get_output(l.get_all_layers(net)[layer_index], deterministic=True)    
    test_function = theano.function([l.get_all_layers(net)[0].input_var], prediction, allow_input_downcast=True)        

    log.p('DONE!')

    return test_function 
Example #26
Source File: util.py    From neural-dep-srl with Apache License 2.0 5 votes vote down vote up
def mask_loss(loss, mask):
    return loss * lo(LL.FlattenLayer(mask, 1)) 
Example #27
Source File: util.py    From neural-dep-srl with Apache License 2.0 5 votes vote down vote up
def ls(l, x):
    return lo(l).shape[x] 
Example #28
Source File: AED_test.py    From AcousticEventDetection with MIT License 5 votes vote down vote up
def getPredictionFuntion(net):
    net_output = l.get_output(net, deterministic=True)

    print "COMPILING THEANO TEST FUNCTION...",
    start = time.time()
    test_net = theano.function([l.get_all_layers(NET)[0].input_var], net_output, allow_input_downcast=True)
    print "DONE! (", int(time.time() - start), "s )"

    return test_net

################# PREDICTION POOLING #################### 
Example #29
Source File: AED_eval.py    From AcousticEventDetection with MIT License 5 votes vote down vote up
def getPredictionFuntion(net):
    net_output = l.get_output(net, deterministic=True)

    print "COMPILING THEANO TEST FUNCTION...",
    start = time.time()
    test_net = theano.function([l.get_all_layers(NET)[0].input_var], net_output, allow_input_downcast=True)
    print "DONE! (", int(time.time() - start), "s )"

    return test_net

################# PREDICTION POOLING #################### 
Example #30
Source File: util.py    From cnn_workshop with Apache License 2.0 5 votes vote down vote up
def get_activations(layer, x):
    # compile theano function
    xs = T.tensor4('xs').astype(theano.config.floatX)
    get_activity = theano.function([xs], get_output(layer, xs))

    return get_activity(x)