Python lasagne.layers.InputLayer() Examples

The following are 30 code examples of lasagne.layers.InputLayer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module lasagne.layers , or try the search function .
Example #1
Source File: models.py    From recurrent-memory with GNU Lesser General Public License v3.0 6 votes vote down vote up
def LeInitRecurrent(input_var, mask_var=None, batch_size=1, n_in=100, n_out=1, n_hid=200, diag_val=0.9, offdiag_val=0.01, out_nlin=lasagne.nonlinearities.linear):
    # Input Layer
    l_in = InputLayer((batch_size, None, n_in), input_var=input_var)
    if mask_var==None:
        l_mask=None
    else:
        l_mask = InputLayer((batch_size, None), input_var=mask_var)

    _, seqlen, _ = l_in.input_var.shape
    
    l_in_hid = DenseLayer(lasagne.layers.InputLayer((None, n_in)), n_hid, W=lasagne.init.GlorotNormal(0.95), nonlinearity=lasagne.nonlinearities.linear)
    l_hid_hid = DenseLayer(lasagne.layers.InputLayer((None, n_hid)), n_hid, W=LeInit(diag_val=diag_val, offdiag_val=offdiag_val), nonlinearity=lasagne.nonlinearities.linear)
    l_rec = lasagne.layers.CustomRecurrentLayer(l_in, l_in_hid, l_hid_hid, nonlinearity=lasagne.nonlinearities.rectify, mask_input=l_mask, grad_clipping=100)

    # Output Layer
    l_shp = ReshapeLayer(l_rec, (-1, n_hid))
    l_dense = DenseLayer(l_shp, num_units=n_out, W=lasagne.init.GlorotNormal(0.95), nonlinearity=out_nlin)

    # To reshape back to our original shape, we can use the symbolic shape variables we retrieved above.
    l_out = ReshapeLayer(l_dense, (batch_size, seqlen, n_out))

    return l_out, l_rec 
Example #2
Source File: model.py    From gogh-figure with GNU Affero General Public License v3.0 6 votes vote down vote up
def setup_transform_net(self, input_var=None):
		transform_net = InputLayer(shape=self.shape, input_var=input_var)
		transform_net = style_conv_block(transform_net, self.num_styles, 32, 9, 1)
		transform_net = style_conv_block(transform_net, self.num_styles, 64, 3, 2)
		transform_net = style_conv_block(transform_net, self.num_styles, 128, 3, 2)
		for _ in range(5):
			transform_net = residual_block(transform_net, self.num_styles)
		transform_net = nn_upsample(transform_net, self.num_styles)
		transform_net = nn_upsample(transform_net, self.num_styles)

		if self.net_type == 0:
			transform_net = style_conv_block(transform_net, self.num_styles, 3, 9, 1, tanh)
			transform_net = ExpressionLayer(transform_net, lambda X: 150.*X, output_shape=None)
		elif self.net_type == 1:
			transform_net = style_conv_block(transform_net, self.num_styles, 3, 9, 1, sigmoid)

		self.network['transform_net'] = transform_net 
Example #3
Source File: servoing_policy_network.py    From visual_dynamics with MIT License 6 votes vote down vote up
def __init__(self, input_shape, output_dim, servoing_pol,
                 name=None, input_var=None):

        if name is None:
            prefix = ""
        else:
            prefix = name + "_"

        if len(input_shape) == 3:
            l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var)
            l_hid = L.reshape(l_in, ([0],) + input_shape)
        elif len(input_shape) == 2:
            l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var)
            input_shape = (1,) + input_shape
            l_hid = L.reshape(l_in, ([0],) + input_shape)
        else:
            l_in = L.InputLayer(shape=(None,) + input_shape, input_var=input_var)
            l_hid = l_in

        l_out = TheanoServoingPolicyLayer(l_hid, servoing_pol)
        self._l_in = l_in
        self._l_out = l_out
        self._input_var = l_in.input_var 
Example #4
Source File: adda_network.py    From adda_mnist64 with MIT License 6 votes vote down vote up
def network_classifier(self, input_var):

        network = {}
        network['classifier/input'] = InputLayer(shape=(None, 3, 64, 64), input_var=input_var, name='classifier/input')
        network['classifier/conv1'] = Conv2DLayer(network['classifier/input'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv1')
        network['classifier/pool1'] = MaxPool2DLayer(network['classifier/conv1'], pool_size=2, stride=2, pad=0, name='classifier/pool1')
        network['classifier/conv2'] = Conv2DLayer(network['classifier/pool1'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv2')
        network['classifier/pool2'] = MaxPool2DLayer(network['classifier/conv2'], pool_size=2, stride=2, pad=0, name='classifier/pool2')
        network['classifier/conv3'] = Conv2DLayer(network['classifier/pool2'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv3')
        network['classifier/pool3'] = MaxPool2DLayer(network['classifier/conv3'], pool_size=2, stride=2, pad=0, name='classifier/pool3')
        network['classifier/conv4'] = Conv2DLayer(network['classifier/pool3'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv4')
        network['classifier/pool4'] = MaxPool2DLayer(network['classifier/conv4'], pool_size=2, stride=2, pad=0, name='classifier/pool4')
        network['classifier/dense1'] = DenseLayer(network['classifier/pool4'], num_units=64, nonlinearity=rectify, name='classifier/dense1')
        network['classifier/output'] = DenseLayer(network['classifier/dense1'], num_units=10, nonlinearity=softmax, name='classifier/output')

        return network 
Example #5
Source File: models_uncond.py    From EvolutionaryGAN with MIT License 6 votes vote down vote up
def build_discriminator_128(image=None,ndf=128):
    lrelu = LeakyRectify(0.2)
    # input: images
    InputImg = InputLayer(shape=(None, 3, 128, 128), input_var=image)
    print ("Dis Img_input:", InputImg.output_shape)
    # Conv Layer
    dis1 = Conv2DLayer(InputImg, ndf, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)
    print ("Dis conv1:", dis1.output_shape)
    # Conv Layer
    dis2 = batch_norm(Conv2DLayer(dis1, ndf*2, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv2:", dis2.output_shape)
    # Conv Layer
    dis3 = batch_norm(Conv2DLayer(dis2, ndf*4, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv3:", dis3.output_shape)
    # Conv Layer
    dis4 = batch_norm(Conv2DLayer(dis3, ndf*8, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv3:", dis4.output_shape)
    # Conv Layer
    dis5 = batch_norm(Conv2DLayer(dis4, ndf*16, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv4:", dis5.output_shape)
    # Conv Layer
    dis6 = DenseLayer(dis5, 1, W=Normal(0.02), nonlinearity=sigmoid)
    print ("Dis output:", dis6.output_shape)
    return dis6 
Example #6
Source File: test_layers_theano.py    From visual_dynamics with MIT License 6 votes vote down vote up
def test_conv2d(x_shape, num_filters, filter_size, flip_filters, batch_size=2):
    X_var = T.tensor4('X')
    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX)

    l_conv = L.Conv2DLayer(l_x, num_filters, filter_size=filter_size, stride=1, pad='same',
                           flip_filters=flip_filters, untie_biases=True, nonlinearity=None, b=None)
    conv_var = L.get_output(l_conv)
    conv_fn = theano.function([X_var], conv_var)
    tic()
    conv = conv_fn(X)
    toc("conv time for x_shape=%r, num_filters=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, filter_size, flip_filters, batch_size))

    tic()
    loop_conv = conv2d(X, l_conv.W.get_value(), flip_filters=flip_filters)
    toc("loop conv time for x_shape=%r, num_filters=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, filter_size, flip_filters, batch_size))

    assert np.allclose(conv, loop_conv, atol=1e-6) 
Example #7
Source File: models_uncond.py    From EvolutionaryGAN with MIT License 6 votes vote down vote up
def build_generator_64(noise=None, ngf=128):
    # noise input 
    InputNoise = InputLayer(shape=(None, 100), input_var=noise)
    #FC Layer 
    gnet0 = DenseLayer(InputNoise, ngf*8*4*4, W=Normal(0.02), nonlinearity=relu)
    print ("Gen fc1:", gnet0.output_shape)
    #Reshape Layer
    gnet1 = ReshapeLayer(gnet0,([0],ngf*8,4,4))
    print ("Gen rs1:", gnet1.output_shape)
    # DeConv Layer
    gnet2 = Deconv2DLayer(gnet1, ngf*8, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv2:", gnet2.output_shape)
    # DeConv Layer
    gnet3 = Deconv2DLayer(gnet2, ngf*4, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv3:", gnet3.output_shape)
    # DeConv Layer
    gnet4 = Deconv2DLayer(gnet3, ngf*4, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv4:", gnet4.output_shape)
    # DeConv Layer
    gnet5 = Deconv2DLayer(gnet4, ngf*2, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv5:", gnet5.output_shape)
    # DeConv Layer
    gnet6 = Deconv2DLayer(gnet5, 3, (3,3), (1,1), crop='same', W=Normal(0.02),nonlinearity=tanh)
    print ("Gen output:", gnet6.output_shape)
    return gnet6 
Example #8
Source File: models_uncond.py    From EvolutionaryGAN with MIT License 6 votes vote down vote up
def build_discriminator_32(image=None,ndf=128):
    lrelu = LeakyRectify(0.2)
    # input: images
    InputImg = InputLayer(shape=(None, 3, 32, 32), input_var=image)
    print ("Dis Img_input:", InputImg.output_shape)
    # Conv Layer
    dis1 = Conv2DLayer(InputImg, ndf, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)
    print ("Dis conv1:", dis1.output_shape)
    # Conv Layer
    dis2 = batch_norm(Conv2DLayer(dis1, ndf*2, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv2:", dis2.output_shape)
    # Conv Layer
    dis3 = batch_norm(Conv2DLayer(dis2, ndf*4, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv3:", dis3.output_shape)
    # Conv Layer
    dis4 = DenseLayer(dis3, 1, W=Normal(0.02), nonlinearity=sigmoid)
    print ("Dis output:", dis4.output_shape)
    return dis4 
Example #9
Source File: models_uncond.py    From EvolutionaryGAN with MIT License 6 votes vote down vote up
def build_generator_32(noise=None, ngf=128):
    # noise input 
    InputNoise = InputLayer(shape=(None, 100), input_var=noise)
    #FC Layer 
    gnet0 = DenseLayer(InputNoise, ngf*4*4*4, W=Normal(0.02), nonlinearity=relu)
    print ("Gen fc1:", gnet0.output_shape)
    #Reshape Layer
    gnet1 = ReshapeLayer(gnet0,([0],ngf*4,4,4))
    print ("Gen rs1:", gnet1.output_shape)
    # DeConv Layer
    gnet2 = Deconv2DLayer(gnet1, ngf*2, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv1:", gnet2.output_shape)
    # DeConv Layer
    gnet3 = Deconv2DLayer(gnet2, ngf, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv2:", gnet3.output_shape)
    # DeConv Layer
    gnet4 = Deconv2DLayer(gnet3, 3, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=tanh)
    print ("Gen output:", gnet4.output_shape)
    return gnet4 
Example #10
Source File: models_uncond.py    From EvolutionaryGAN with MIT License 6 votes vote down vote up
def build_discriminator_toy(image=None, nd=512, GP_norm=None):
    Input = InputLayer(shape=(None, 2), input_var=image)
    print ("Dis input:", Input.output_shape)
    dis0 = DenseLayer(Input, nd, W=Normal(0.02), nonlinearity=relu)
    print ("Dis fc0:", dis0.output_shape)
    if GP_norm is True:
        dis1 = DenseLayer(dis0, nd, W=Normal(0.02), nonlinearity=relu)
    else:
        dis1 = batch_norm(DenseLayer(dis0, nd, W=Normal(0.02), nonlinearity=relu))
    print ("Dis fc1:", dis1.output_shape)
    if GP_norm is True:
        dis2 = batch_norm(DenseLayer(dis1, nd, W=Normal(0.02), nonlinearity=relu))
    else:
        dis2 = DenseLayer(dis1, nd, W=Normal(0.02), nonlinearity=relu)
    print ("Dis fc2:", dis2.output_shape)
    disout = DenseLayer(dis2, 1, W=Normal(0.02), nonlinearity=sigmoid)
    print ("Dis output:", disout.output_shape)
    return disout 
Example #11
Source File: test_layers_theano.py    From visual_dynamics with MIT License 6 votes vote down vote up
def test_locally_connected2d(x_shape, num_filters, filter_size, flip_filters, batch_size=2):
    X_var = T.tensor4('X')
    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX)

    l_conv = LT.LocallyConnected2DLayer(l_x, num_filters, filter_size=filter_size, stride=1, pad='same',
                                        flip_filters=flip_filters, untie_biases=True, nonlinearity=None, b=None)
    conv_var = L.get_output(l_conv)
    conv_fn = theano.function([X_var], conv_var)
    tic()
    conv = conv_fn(X)
    toc("locally connected time for x_shape=%r, num_filters=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, filter_size, flip_filters, batch_size))

    tic()
    loop_conv = locally_connected2d(X, l_conv.W.get_value(), flip_filters=flip_filters)
    toc("loop locally connected time for x_shape=%r, num_filters=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, filter_size, flip_filters, batch_size))

    assert np.allclose(conv, loop_conv, atol=1e-6) 
Example #12
Source File: models.py    From recurrent-memory with GNU Lesser General Public License v3.0 6 votes vote down vote up
def OrthoInitRecurrent(input_var, mask_var=None, batch_size=1, n_in=100, n_out=1, n_hid=200, init_val=0.9, out_nlin=lasagne.nonlinearities.linear):
    # Input Layer
    l_in         = InputLayer((batch_size, None, n_in), input_var=input_var)
    if mask_var==None:
        l_mask=None
    else:
        l_mask = InputLayer((batch_size, None), input_var=mask_var)

    _, seqlen, _ = l_in.input_var.shape
    
    l_in_hid     = DenseLayer(lasagne.layers.InputLayer((None, n_in)), n_hid,  W=lasagne.init.GlorotNormal(0.95), nonlinearity=lasagne.nonlinearities.linear)
    l_hid_hid    = DenseLayer(lasagne.layers.InputLayer((None, n_hid)), n_hid, W=lasagne.init.Orthogonal(gain=init_val), nonlinearity=lasagne.nonlinearities.linear)
    l_rec        = lasagne.layers.CustomRecurrentLayer(l_in, l_in_hid, l_hid_hid, nonlinearity=lasagne.nonlinearities.tanh, mask_input=l_mask, grad_clipping=100)

    # Output Layer
    l_shp        = ReshapeLayer(l_rec, (-1, n_hid))
    l_dense      = DenseLayer(l_shp, num_units=n_out, W=lasagne.init.GlorotNormal(0.95), nonlinearity=out_nlin)
    
    # To reshape back to our original shape, we can use the symbolic shape variables we retrieved above.
    l_out        = ReshapeLayer(l_dense, (batch_size, seqlen, n_out))

    return l_out, l_rec 
Example #13
Source File: __init__.py    From aenet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def build_model(self):
        '''
        Build Acoustic Event Net model
        :return:
        '''

        # A architecture 41 classes
        nonlin = lasagne.nonlinearities.rectify
        net = {}
        net['input'] = InputLayer((None, feat_shape[0], feat_shape[1], feat_shape[2]))  # channel, time. frequency
        # ----------- 1st layer group ---------------
        net['conv1a'] = ConvLayer(net['input'], num_filters=64, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['conv1b'] = ConvLayer(net['conv1a'], num_filters=64, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['pool1'] = MaxPool2DLayer(net['conv1b'], pool_size=(1, 2))  # (time, freq)
        # ----------- 2nd layer group ---------------
        net['conv2a'] = ConvLayer(net['pool1'], num_filters=128, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['conv2b'] = ConvLayer(net['conv2a'], num_filters=128, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['pool2'] = MaxPool2DLayer(net['conv2b'], pool_size=(2, 2))  # (time, freq)
        # ----------- fully connected layer group ---------------
        net['fc5'] = DenseLayer(net['pool2'], num_units=1024, nonlinearity=nonlin)
        net['fc6'] = DenseLayer(net['fc5'], num_units=1024, nonlinearity=nonlin)
        net['prob'] = DenseLayer(net['fc6'], num_units=41, nonlinearity=lasagne.nonlinearities.softmax)

        return net 
Example #14
Source File: test_layers_theano.py    From visual_dynamics with MIT License 6 votes vote down vote up
def test_channelwise_locally_connected2d(x_shape, filter_size, flip_filters, batch_size=2):
    X_var = T.tensor4('X')
    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX)

    l_conv = LT.LocallyConnected2DLayer(l_x, x_shape[0], filter_size=filter_size, channelwise=True,
                                        stride=1, pad='same', flip_filters=flip_filters,
                                        untie_biases=True, nonlinearity=None, b=None)
    conv_var = L.get_output(l_conv)
    conv_fn = theano.function([X_var], conv_var)
    tic()
    conv = conv_fn(X)
    toc("channelwise locally connected time for x_shape=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, filter_size, flip_filters, batch_size))

    tic()
    loop_conv = channelwise_locally_connected2d(X, l_conv.W.get_value(), flip_filters=flip_filters)
    toc("loop channelwise locally connected time for x_shape=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, filter_size, flip_filters, batch_size))

    assert np.allclose(conv, loop_conv, atol=1e-7) 
Example #15
Source File: net_theano.py    From visual_dynamics with MIT License 6 votes vote down vote up
def build_bilinear_net(input_shapes, X_var=None, U_var=None, X_diff_var=None, axis=1):
    x_shape, u_shape = input_shapes
    X_var = X_var or T.tensor4('X')
    U_var = U_var or T.matrix('U')
    X_diff_var = X_diff_var or T.tensor4('X_diff')
    X_next_var = X_var + X_diff_var

    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var)
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var)

    l_x_diff_pred = LT.BilinearLayer([l_x, l_u], axis=axis)
    l_x_next_pred = L.ElemwiseMergeLayer([l_x, l_x_diff_pred], T.add)
    l_y = L.flatten(l_x)
    l_y_diff_pred = L.flatten(l_x_diff_pred)

    X_next_pred_var = lasagne.layers.get_output(l_x_next_pred)
    loss = ((X_next_var - X_next_pred_var) ** 2).mean(axis=0).sum() / 2.

    net_name = 'BilinearNet'
    input_vars = OrderedDict([(var.name, var) for var in [X_var, U_var, X_diff_var]])
    pred_layers = OrderedDict([('y_diff_pred', l_y_diff_pred), ('y', l_y), ('x0_next_pred', l_x_next_pred)])
    return net_name, input_vars, pred_layers, loss 
Example #16
Source File: enhance.py    From neural-enhance with GNU Affero General Public License v3.0 6 votes vote down vote up
def __init__(self):
        self.network = collections.OrderedDict()
        self.network['img'] = InputLayer((None, 3, None, None))
        self.network['seed'] = InputLayer((None, 3, None, None))

        config, params = self.load_model()
        self.setup_generator(self.last_layer(), config)

        if args.train:
            concatenated = lasagne.layers.ConcatLayer([self.network['img'], self.network['out']], axis=0)
            self.setup_perceptual(concatenated)
            self.load_perceptual()
            self.setup_discriminator()
        self.load_generator(params)
        self.compile()

    #------------------------------------------------------------------------------------------------------------------
    # Network Configuration
    #------------------------------------------------------------------------------------------------------------------ 
Example #17
Source File: eeg_cnn_lib.py    From EEGLearn with GNU General Public License v2.0 5 votes vote down vote up
def build_cnn(input_var=None, w_init=None, n_layers=(4, 2, 1), n_filters_first=32, imsize=32, n_colors=3):
    """
    Builds a VGG style CNN network followed by a fully-connected layer and a softmax layer.
    Stacks are separated by a maxpool layer. Number of kernels in each layer is twice
    the number in previous stack.
    input_var: Theano variable for input to the network
    outputs: pointer to the output of the last layer of network (softmax)

    :param input_var: theano variable as input to the network
    :param w_init: Initial weight values
    :param n_layers: number of layers in each stack. An array of integers with each
                    value corresponding to the number of layers in each stack.
                    (e.g. [4, 2, 1] == 3 stacks with 4, 2, and 1 layers in each.
    :param n_filters_first: number of filters in the first layer
    :param imsize: Size of the image
    :param n_colors: Number of color channels (depth)
    :return: a pointer to the output of last layer
    """
    weights = []        # Keeps the weights for all layers
    count = 0
    # If no initial weight is given, initialize with GlorotUniform
    if w_init is None:
        w_init = [lasagne.init.GlorotUniform()] * sum(n_layers)
    # Input layer
    network = InputLayer(shape=(None, n_colors, imsize, imsize),
                                        input_var=input_var)
    for i, s in enumerate(n_layers):
        for l in range(s):
            network = Conv2DLayer(network, num_filters=n_filters_first * (2 ** i), filter_size=(3, 3),
                          W=w_init[count], pad='same')
            count += 1
            weights.append(network.W)
        network = MaxPool2DLayer(network, pool_size=(2, 2))
    return network, weights 
Example #18
Source File: models.py    From recurrent-memory with GNU Lesser General Public License v3.0 5 votes vote down vote up
def GRURecurrent(input_var, mask_var=None, batch_size=1, n_in=100, n_out=1, n_hid=200, diag_val=0.9, offdiag_val=0.01, out_nlin=lasagne.nonlinearities.linear):
    # Input Layer
    l_in         = InputLayer((batch_size, None, n_in), input_var=input_var)
    if mask_var==None:
        l_mask = None
    else:
        l_mask = InputLayer((batch_size, None), input_var=mask_var)
        
    _, seqlen, _ = l_in.input_var.shape
    l_rec        = GRULayer(l_in, n_hid, 
                            resetgate=lasagne.layers.Gate(W_in=lasagne.init.GlorotNormal(0.05), 
                                                          W_hid=lasagne.init.GlorotNormal(0.05), 
                                                          W_cell=None, b=lasagne.init.Constant(0.)), 
                            updategate=lasagne.layers.Gate(W_in=lasagne.init.GlorotNormal(0.05), 
                                                           W_hid=lasagne.init.GlorotNormal(0.05), 
                                                           W_cell=None), 
                            hidden_update=lasagne.layers.Gate(W_in=lasagne.init.GlorotNormal(0.05), 
                                                              W_hid=LeInit(diag_val=diag_val, offdiag_val=offdiag_val), 
                                                              W_cell=None, nonlinearity=lasagne.nonlinearities.rectify), 
                            hid_init = lasagne.init.Constant(0.), backwards=False, learn_init=False, 
                            gradient_steps=-1, grad_clipping=10., unroll_scan=False, precompute_input=True, mask_input=l_mask, only_return_final=False)

    # Output Layer
    l_shp        = ReshapeLayer(l_rec, (-1, n_hid))
    l_dense      = DenseLayer(l_shp, num_units=n_out, W=lasagne.init.GlorotNormal(0.05), nonlinearity=out_nlin)
    # To reshape back to our original shape, we can use the symbolic shape variables we retrieved above.
    l_out        = ReshapeLayer(l_dense, (batch_size, seqlen, n_out))

    return l_out, l_rec 
Example #19
Source File: models.py    From recurrent-memory with GNU Lesser General Public License v3.0 5 votes vote down vote up
def LeInitRecurrent(input_var, mask_var=None, batch_size=1, n_in=100, n_out=1, 
                    n_hid=200, diag_val=0.9, offdiag_val=0.01,
                    out_nlin=lasagne.nonlinearities.linear):
    # Input Layer
    l_in = InputLayer((batch_size, None, n_in), input_var=input_var)
    if mask_var==None:
        l_mask=None
    else:
        l_mask = InputLayer((batch_size, None), input_var=mask_var)

    _, seqlen, _ = l_in.input_var.shape
    
    l_in_hid = DenseLayer(lasagne.layers.InputLayer((None, n_in)), n_hid,  
                          W=lasagne.init.GlorotNormal(0.95), 
                          nonlinearity=lasagne.nonlinearities.linear)
    l_hid_hid = DenseLayer(lasagne.layers.InputLayer((None, n_hid)), n_hid, 
                           W=LeInit(diag_val=diag_val, offdiag_val=offdiag_val), 
                           nonlinearity=lasagne.nonlinearities.linear)
    l_rec = lasagne.layers.CustomRecurrentLayer(l_in, l_in_hid, l_hid_hid, nonlinearity=lasagne.nonlinearities.rectify, mask_input=l_mask, grad_clipping=100)

    # Output Layer
    l_shp = ReshapeLayer(l_rec, (-1, n_hid))
    l_dense = DenseLayer(l_shp, num_units=n_out, W=lasagne.init.GlorotNormal(0.95), nonlinearity=out_nlin)
    
    # To reshape back to our original shape, we can use the symbolic shape variables we retrieved above.
    l_out = ReshapeLayer(l_dense, (batch_size, seqlen, n_out))

    return l_out, l_rec 
Example #20
Source File: nn_adagrad_pca.py    From kaggle_otto with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_model(self, input_dim):
        l_in = InputLayer(shape=(self.batch_size, input_dim))

        l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)

        l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden / 2, nonlinearity=rectify)
        l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)

        l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout)

        l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)

        return l_out 
Example #21
Source File: models.py    From recurrent-memory with GNU Lesser General Public License v3.0 5 votes vote down vote up
def LeInitRecurrentWithFastWeights(input_var, mask_var=None, batch_size=1, n_in=100, n_out=1,
                    n_hid=200, diag_val=0.9, offdiag_val=0.01,
                    out_nlin=lasagne.nonlinearities.linear, gamma=0.9):
    # Input Layer
    l_in = InputLayer((batch_size, None, n_in), input_var=input_var)
    if mask_var==None:
        l_mask=None
    else:
        l_mask = InputLayer((batch_size, None), input_var=mask_var)

    _, seqlen, _ = l_in.input_var.shape
    
    l_in_hid = DenseLayer(lasagne.layers.InputLayer((None, n_in)), n_hid,  
                          W=lasagne.init.GlorotNormal(0.95), 
                          nonlinearity=lasagne.nonlinearities.linear)
    l_hid_hid = DenseLayer(lasagne.layers.InputLayer((None, n_hid)), n_hid, 
                           W=LeInit(diag_val=diag_val, offdiag_val=offdiag_val), 
                           nonlinearity=lasagne.nonlinearities.linear)
    l_rec = CustomRecurrentLayerWithFastWeights(l_in, l_in_hid, l_hid_hid, 
                                                nonlinearity=lasagne.nonlinearities.rectify,
                                                mask_input=l_mask, grad_clipping=100, gamma=gamma)

    # Output Layer
    l_shp = ReshapeLayer(l_rec, (-1, n_hid))
    l_dense = DenseLayer(l_shp, num_units=n_out, W=lasagne.init.GlorotNormal(0.95), nonlinearity=out_nlin)
    
    # To reshape back to our original shape, we can use the symbolic shape variables we retrieved above.
    l_out = ReshapeLayer(l_dense, (batch_size, seqlen, n_out))

    return l_out, l_rec 
Example #22
Source File: models.py    From recurrent-memory with GNU Lesser General Public License v3.0 5 votes vote down vote up
def TanhRecurrent(input_var, mask_var=None, batch_size=1, n_in=100, n_out=1,
                    n_hid=200, wscale=1.0,
                    out_nlin=lasagne.nonlinearities.linear):
    # Input Layer
    l_in = InputLayer((batch_size, None, n_in), input_var=input_var)
    if mask_var == None:
        l_mask = None
    else:
        l_mask = InputLayer((batch_size, None), input_var=mask_var)

    _, seqlen, _ = l_in.input_var.shape

    l_in_hid = DenseLayer(lasagne.layers.InputLayer((None, n_in)), n_hid,
                          W=lasagne.init.HeNormal(0.95), nonlinearity=lasagne.nonlinearities.linear)
    l_hid_hid = DenseLayer(lasagne.layers.InputLayer((None, n_hid)), n_hid,
                           W=lasagne.init.HeNormal(gain=wscale), nonlinearity=lasagne.nonlinearities.linear)
    l_rec = lasagne.layers.CustomRecurrentLayer(l_in, l_in_hid, l_hid_hid, nonlinearity=lasagne.nonlinearities.tanh,
                                                mask_input=l_mask, grad_clipping=100)

    l_shp_1 =  ReshapeLayer(l_rec, (-1, n_hid))
    l_shp_2 =  ReshapeLayer(l_hid_hid, (-1, n_hid))

    l_shp = lasagne.layers.ElemwiseSumLayer((l_shp_1,l_shp_2),coeffs=(np.float32(0.2),np.float32(0.8)))

    # Output Layer
    l_dense = DenseLayer(l_shp, num_units=n_out, W=lasagne.init.HeNormal(0.95), nonlinearity=out_nlin)

    # To reshape back to our original shape, we can use the symbolic shape variables we retrieved above.
    l_out = ReshapeLayer(l_dense, (batch_size, seqlen, n_out))

    return l_out, l_rec 
Example #23
Source File: bagging_nn_rmsprop.py    From kaggle_otto with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_model(self, input_dim):
        l_in = InputLayer(shape=(self.batch_size, input_dim))

        l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)

        l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)

        l_out = DenseLayer(l_hidden2_dropout, num_units=self.n_classes_, nonlinearity=softmax)

        return l_out 
Example #24
Source File: res_net_blocks.py    From dcase_task2 with MIT License 5 votes vote down vote up
def ResNet_FullPreActivation(input_shape=(None, 3, PIXELS, PIXELS), input_var=None, n_classes=10, n=18):
    """
    Adapted from https://github.com/Lasagne/Recipes/tree/master/papers/deep_residual_learning.
    Tweaked to be consistent with 'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027)

    Formula to figure out depth: 6n + 2
    """

    # Building the network
    l_in = InputLayer(shape=input_shape, input_var=input_var)

    # first layer, output is 16 x 32 x 32
    l = batch_norm(ConvLayer(l_in, num_filters=16, filter_size=(3, 3), stride=(1, 1), nonlinearity=rectify, pad='same', W=he_norm))

    # first stack of residual blocks, output is 16 x 32 x 32
    l = residual_block(l, first=True)
    for _ in range(1, n):
        l = residual_block(l)

    # second stack of residual blocks, output is 32 x 16 x 16
    l = residual_block(l, increase_dim=True)
    for _ in range(1, n):
        l = residual_block(l)

    # third stack of residual blocks, output is 64 x 8 x 8
    l = residual_block(l, increase_dim=True)
    for _ in range(1, n):
        l = residual_block(l)

    bn_post_conv = BatchNormLayer(l)
    bn_post_relu = NonlinearityLayer(bn_post_conv, rectify)

    # average pooling
    avg_pool = GlobalPoolLayer(bn_post_relu)

    # fully connected layer
    network = DenseLayer(avg_pool, num_units=n_classes, W=HeNormal(), nonlinearity=softmax)

    return network 
Example #25
Source File: models.py    From drmad with MIT License 5 votes vote down vote up
def __init__(self, x, y, args):
        self.params_theta = []
        self.params_lambda = []
        self.params_weight = []
        if args.dataset == 'mnist':
            input_size = (None, 1, 28, 28)
        elif args.dataset == 'cifar10':
            input_size = (None, 3, 32, 32)
        else:
            raise AssertionError
        layers = [ll.InputLayer(input_size)]
        self.penalty = theano.shared(np.array(0.))

        #conv1
        layers.append(Conv2DLayerWithReg(args, layers[-1], 20, 5))
        self.add_params_to_self(args, layers[-1])
        layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2))
        #conv1
        layers.append(Conv2DLayerWithReg(args, layers[-1], 50, 5))
        self.add_params_to_self(args, layers[-1])
        layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2))
        #fc1
        layers.append(DenseLayerWithReg(args, layers[-1], num_units=500))
        self.add_params_to_self(args, layers[-1])
        #softmax
        layers.append(DenseLayerWithReg(args, layers[-1], num_units=10, nonlinearity=nonlinearities.softmax))
        self.add_params_to_self(args, layers[-1])

        self.layers = layers
        self.y = ll.get_output(layers[-1], x, deterministic=False)
        self.prediction = T.argmax(self.y, axis=1)
        # self.penalty = penalty if penalty != 0. else T.constant(0.)
        print(self.params_lambda)
        # time.sleep(20)
        # cost function
        self.loss = T.mean(categorical_crossentropy(self.y, y))
        self.lossWithPenalty = T.add(self.loss, self.penalty)
        print "loss and losswithpenalty", type(self.loss), type(self.lossWithPenalty) 
Example #26
Source File: nn_rmsprop_features.py    From kaggle_otto with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_model(self, input_dim):
        l_in = InputLayer(shape=(self.batch_size, input_dim))

        l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden / 2, nonlinearity=rectify)
        l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)

        l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)

        l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden / 2, nonlinearity=rectify)
        l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout)

        l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)

        return l_out 
Example #27
Source File: net.py    From neuralnilm with Apache License 2.0 5 votes vote down vote up
def build_net(input_shape, layers):
    # Input layer
    layer = InputLayer(shape=input_shape)
    for layer_config in layers:
        layer_type = layer_config.pop('type')
        layer = layer_type(layer, **layer_config)

    return layer 
Example #28
Source File: bagging_nn_nesterov.py    From kaggle_otto with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_model(self, input_dim):
        l_in = InputLayer(shape=(self.batch_size, input_dim))

        l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)

        l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)

        l_out = DenseLayer(l_hidden2_dropout, num_units=self.n_classes_, nonlinearity=softmax)

        return l_out 
Example #29
Source File: NN.py    From kusanagi with MIT License 5 votes vote down vote up
def mlp(input_dims, output_dims, hidden_dims=[200]*4, batchsize=None,
        nonlinearities=nonlinearities.rectify,
        output_nonlinearity=nonlinearities.linear,
        W_init=lasagne.init.GlorotUniform('relu'),
        b_init=lasagne.init.Uniform(0.01),
        name='mlp', **kwargs):
    if not isinstance(nonlinearities, list):
        nonlinearities = [nonlinearities]*len(hidden_dims)
    if not isinstance(W_init, list):
        W_init = [W_init]*(len(hidden_dims)+1)
    if not isinstance(b_init, list):
        b_init = [b_init]*(len(hidden_dims)+1)
    network_spec = []
    # input layer
    input_shape = (batchsize, input_dims)
    network_spec.append((InputLayer,
                         dict(shape=input_shape,
                              name=name+'_input')))

    # hidden layers
    for i in range(len(hidden_dims)):
        layer_type = DenseLayer
        network_spec.append((layer_type,
                             dict(num_units=hidden_dims[i],
                                  nonlinearity=nonlinearities[i],
                                  W=W_init[i],
                                  b=b_init[i],
                                  name=name+'_fc%d' % (i))))
    # output layer
    layer_type = DenseLayer
    network_spec.append((layer_type,
                         dict(num_units=output_dims,
                              nonlinearity=output_nonlinearity,
                              W=W_init[-1],
                              b=b_init[-1],
                              name=name+'_output')))
    return network_spec 
Example #30
Source File: nn_adagrad.py    From kaggle_otto with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_model(self, input_dim):
        l_in = InputLayer(shape=(self.batch_size, input_dim))

        l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)

        l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)

        l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout)

        l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)

        return l_out