Python lasagne.layers.DenseLayer() Examples

The following are 30 code examples of lasagne.layers.DenseLayer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module lasagne.layers , or try the search function .
Example #1
Source File: models.py    From recurrent-memory with GNU Lesser General Public License v3.0 6 votes vote down vote up
def LeInitRecurrent(input_var, mask_var=None, batch_size=1, n_in=100, n_out=1, n_hid=200, diag_val=0.9, offdiag_val=0.01, out_nlin=lasagne.nonlinearities.linear):
    # Input Layer
    l_in = InputLayer((batch_size, None, n_in), input_var=input_var)
    if mask_var==None:
        l_mask=None
    else:
        l_mask = InputLayer((batch_size, None), input_var=mask_var)

    _, seqlen, _ = l_in.input_var.shape
    
    l_in_hid = DenseLayer(lasagne.layers.InputLayer((None, n_in)), n_hid, W=lasagne.init.GlorotNormal(0.95), nonlinearity=lasagne.nonlinearities.linear)
    l_hid_hid = DenseLayer(lasagne.layers.InputLayer((None, n_hid)), n_hid, W=LeInit(diag_val=diag_val, offdiag_val=offdiag_val), nonlinearity=lasagne.nonlinearities.linear)
    l_rec = lasagne.layers.CustomRecurrentLayer(l_in, l_in_hid, l_hid_hid, nonlinearity=lasagne.nonlinearities.rectify, mask_input=l_mask, grad_clipping=100)

    # Output Layer
    l_shp = ReshapeLayer(l_rec, (-1, n_hid))
    l_dense = DenseLayer(l_shp, num_units=n_out, W=lasagne.init.GlorotNormal(0.95), nonlinearity=out_nlin)

    # To reshape back to our original shape, we can use the symbolic shape variables we retrieved above.
    l_out = ReshapeLayer(l_dense, (batch_size, seqlen, n_out))

    return l_out, l_rec 
Example #2
Source File: models.py    From recurrent-memory with GNU Lesser General Public License v3.0 6 votes vote down vote up
def OrthoInitRecurrent(input_var, mask_var=None, batch_size=1, n_in=100, n_out=1, n_hid=200, init_val=0.9, out_nlin=lasagne.nonlinearities.linear):
    # Input Layer
    l_in         = InputLayer((batch_size, None, n_in), input_var=input_var)
    if mask_var==None:
        l_mask=None
    else:
        l_mask = InputLayer((batch_size, None), input_var=mask_var)

    _, seqlen, _ = l_in.input_var.shape
    
    l_in_hid     = DenseLayer(lasagne.layers.InputLayer((None, n_in)), n_hid,  W=lasagne.init.GlorotNormal(0.95), nonlinearity=lasagne.nonlinearities.linear)
    l_hid_hid    = DenseLayer(lasagne.layers.InputLayer((None, n_hid)), n_hid, W=lasagne.init.Orthogonal(gain=init_val), nonlinearity=lasagne.nonlinearities.linear)
    l_rec        = lasagne.layers.CustomRecurrentLayer(l_in, l_in_hid, l_hid_hid, nonlinearity=lasagne.nonlinearities.tanh, mask_input=l_mask, grad_clipping=100)

    # Output Layer
    l_shp        = ReshapeLayer(l_rec, (-1, n_hid))
    l_dense      = DenseLayer(l_shp, num_units=n_out, W=lasagne.init.GlorotNormal(0.95), nonlinearity=out_nlin)
    
    # To reshape back to our original shape, we can use the symbolic shape variables we retrieved above.
    l_out        = ReshapeLayer(l_dense, (batch_size, seqlen, n_out))

    return l_out, l_rec 
Example #3
Source File: __init__.py    From aenet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def build_model(self):
        '''
        Build Acoustic Event Net model
        :return:
        '''

        # A architecture 41 classes
        nonlin = lasagne.nonlinearities.rectify
        net = {}
        net['input'] = InputLayer((None, feat_shape[0], feat_shape[1], feat_shape[2]))  # channel, time. frequency
        # ----------- 1st layer group ---------------
        net['conv1a'] = ConvLayer(net['input'], num_filters=64, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['conv1b'] = ConvLayer(net['conv1a'], num_filters=64, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['pool1'] = MaxPool2DLayer(net['conv1b'], pool_size=(1, 2))  # (time, freq)
        # ----------- 2nd layer group ---------------
        net['conv2a'] = ConvLayer(net['pool1'], num_filters=128, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['conv2b'] = ConvLayer(net['conv2a'], num_filters=128, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['pool2'] = MaxPool2DLayer(net['conv2b'], pool_size=(2, 2))  # (time, freq)
        # ----------- fully connected layer group ---------------
        net['fc5'] = DenseLayer(net['pool2'], num_units=1024, nonlinearity=nonlin)
        net['fc6'] = DenseLayer(net['fc5'], num_units=1024, nonlinearity=nonlin)
        net['prob'] = DenseLayer(net['fc6'], num_units=41, nonlinearity=lasagne.nonlinearities.softmax)

        return net 
Example #4
Source File: conv_sup_cc_4ch.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('inputs');
    aug_var = T.matrix('aug_var');
    target_var = T.matrix('targets');

    ae = pickle.load(open('model_4ch/conv_ae.pkl', 'rb'));

    input_layer_index = map(lambda pair : pair[0], ae.layers).index('input');
    first_layer = ae.get_all_layers()[input_layer_index + 1];
    input_layer = layers.InputLayer(shape=(None, 4, 32, 32), input_var = input_var);
    first_layer.input_layer = input_layer;

    encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer');
    encode_layer = ae.get_all_layers()[encode_layer_index];
    aug_layer = layers.InputLayer(shape=(None, classn), input_var = aug_var);

    cat_layer = lasagne.layers.ConcatLayer([encode_layer, aug_layer], axis = 1);
    hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 100, nonlinearity = rectify);

    network = layers.DenseLayer(incoming = hidden_layer, num_units = classn, nonlinearity = sigmoid);

    return network, encode_layer, input_var, aug_var, target_var; 
Example #5
Source File: NN.py    From kusanagi with MIT License 6 votes vote down vote up
def dropout_mlp(input_dims, output_dims, hidden_dims=[200]*4, batchsize=None,
                nonlinearities=nonlinearities.rectify,
                output_nonlinearity=nonlinearities.linear,
                W_init=lasagne.init.GlorotUniform('relu'),
                b_init=lasagne.init.Uniform(0.01),
                p=0.5, p_input=0.2,
                dropout_class=layers.DenseDropoutLayer,
                name='dropout_mlp'):
    if not isinstance(p, list):
        p = [p]*(len(hidden_dims))
    p = [p_input] + p

    network_spec = mlp(input_dims, output_dims, hidden_dims, batchsize,
                       nonlinearities, output_nonlinearity, W_init, b_init,
                       name)

    # first layer is input layer, we skip it
    for i in range(len(p)):
        layer_class, layer_args = network_spec[i+1]
        if layer_class == DenseLayer and p[i] != 0:
            layer_args['p'] = p[i]
            network_spec[i+1] = (dropout_class, layer_args)
    return network_spec 
Example #6
Source File: adda_network.py    From adda_mnist64 with MIT License 6 votes vote down vote up
def network_classifier(self, input_var):

        network = {}
        network['classifier/input'] = InputLayer(shape=(None, 3, 64, 64), input_var=input_var, name='classifier/input')
        network['classifier/conv1'] = Conv2DLayer(network['classifier/input'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv1')
        network['classifier/pool1'] = MaxPool2DLayer(network['classifier/conv1'], pool_size=2, stride=2, pad=0, name='classifier/pool1')
        network['classifier/conv2'] = Conv2DLayer(network['classifier/pool1'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv2')
        network['classifier/pool2'] = MaxPool2DLayer(network['classifier/conv2'], pool_size=2, stride=2, pad=0, name='classifier/pool2')
        network['classifier/conv3'] = Conv2DLayer(network['classifier/pool2'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv3')
        network['classifier/pool3'] = MaxPool2DLayer(network['classifier/conv3'], pool_size=2, stride=2, pad=0, name='classifier/pool3')
        network['classifier/conv4'] = Conv2DLayer(network['classifier/pool3'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv4')
        network['classifier/pool4'] = MaxPool2DLayer(network['classifier/conv4'], pool_size=2, stride=2, pad=0, name='classifier/pool4')
        network['classifier/dense1'] = DenseLayer(network['classifier/pool4'], num_units=64, nonlinearity=rectify, name='classifier/dense1')
        network['classifier/output'] = DenseLayer(network['classifier/dense1'], num_units=10, nonlinearity=softmax, name='classifier/output')

        return network 
Example #7
Source File: models_uncond.py    From EvolutionaryGAN with MIT License 6 votes vote down vote up
def build_discriminator_128(image=None,ndf=128):
    lrelu = LeakyRectify(0.2)
    # input: images
    InputImg = InputLayer(shape=(None, 3, 128, 128), input_var=image)
    print ("Dis Img_input:", InputImg.output_shape)
    # Conv Layer
    dis1 = Conv2DLayer(InputImg, ndf, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)
    print ("Dis conv1:", dis1.output_shape)
    # Conv Layer
    dis2 = batch_norm(Conv2DLayer(dis1, ndf*2, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv2:", dis2.output_shape)
    # Conv Layer
    dis3 = batch_norm(Conv2DLayer(dis2, ndf*4, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv3:", dis3.output_shape)
    # Conv Layer
    dis4 = batch_norm(Conv2DLayer(dis3, ndf*8, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv3:", dis4.output_shape)
    # Conv Layer
    dis5 = batch_norm(Conv2DLayer(dis4, ndf*16, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv4:", dis5.output_shape)
    # Conv Layer
    dis6 = DenseLayer(dis5, 1, W=Normal(0.02), nonlinearity=sigmoid)
    print ("Dis output:", dis6.output_shape)
    return dis6 
Example #8
Source File: models_uncond.py    From EvolutionaryGAN with MIT License 6 votes vote down vote up
def build_discriminator_toy(image=None, nd=512, GP_norm=None):
    Input = InputLayer(shape=(None, 2), input_var=image)
    print ("Dis input:", Input.output_shape)
    dis0 = DenseLayer(Input, nd, W=Normal(0.02), nonlinearity=relu)
    print ("Dis fc0:", dis0.output_shape)
    if GP_norm is True:
        dis1 = DenseLayer(dis0, nd, W=Normal(0.02), nonlinearity=relu)
    else:
        dis1 = batch_norm(DenseLayer(dis0, nd, W=Normal(0.02), nonlinearity=relu))
    print ("Dis fc1:", dis1.output_shape)
    if GP_norm is True:
        dis2 = batch_norm(DenseLayer(dis1, nd, W=Normal(0.02), nonlinearity=relu))
    else:
        dis2 = DenseLayer(dis1, nd, W=Normal(0.02), nonlinearity=relu)
    print ("Dis fc2:", dis2.output_shape)
    disout = DenseLayer(dis2, 1, W=Normal(0.02), nonlinearity=sigmoid)
    print ("Dis output:", disout.output_shape)
    return disout 
Example #9
Source File: models_uncond.py    From EvolutionaryGAN with MIT License 6 votes vote down vote up
def build_generator_32(noise=None, ngf=128):
    # noise input 
    InputNoise = InputLayer(shape=(None, 100), input_var=noise)
    #FC Layer 
    gnet0 = DenseLayer(InputNoise, ngf*4*4*4, W=Normal(0.02), nonlinearity=relu)
    print ("Gen fc1:", gnet0.output_shape)
    #Reshape Layer
    gnet1 = ReshapeLayer(gnet0,([0],ngf*4,4,4))
    print ("Gen rs1:", gnet1.output_shape)
    # DeConv Layer
    gnet2 = Deconv2DLayer(gnet1, ngf*2, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv1:", gnet2.output_shape)
    # DeConv Layer
    gnet3 = Deconv2DLayer(gnet2, ngf, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv2:", gnet3.output_shape)
    # DeConv Layer
    gnet4 = Deconv2DLayer(gnet3, 3, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=tanh)
    print ("Gen output:", gnet4.output_shape)
    return gnet4 
Example #10
Source File: models_uncond.py    From EvolutionaryGAN with MIT License 6 votes vote down vote up
def build_discriminator_32(image=None,ndf=128):
    lrelu = LeakyRectify(0.2)
    # input: images
    InputImg = InputLayer(shape=(None, 3, 32, 32), input_var=image)
    print ("Dis Img_input:", InputImg.output_shape)
    # Conv Layer
    dis1 = Conv2DLayer(InputImg, ndf, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)
    print ("Dis conv1:", dis1.output_shape)
    # Conv Layer
    dis2 = batch_norm(Conv2DLayer(dis1, ndf*2, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv2:", dis2.output_shape)
    # Conv Layer
    dis3 = batch_norm(Conv2DLayer(dis2, ndf*4, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
    print ("Dis conv3:", dis3.output_shape)
    # Conv Layer
    dis4 = DenseLayer(dis3, 1, W=Normal(0.02), nonlinearity=sigmoid)
    print ("Dis output:", dis4.output_shape)
    return dis4 
Example #11
Source File: models_uncond.py    From EvolutionaryGAN with MIT License 6 votes vote down vote up
def build_generator_64(noise=None, ngf=128):
    # noise input 
    InputNoise = InputLayer(shape=(None, 100), input_var=noise)
    #FC Layer 
    gnet0 = DenseLayer(InputNoise, ngf*8*4*4, W=Normal(0.02), nonlinearity=relu)
    print ("Gen fc1:", gnet0.output_shape)
    #Reshape Layer
    gnet1 = ReshapeLayer(gnet0,([0],ngf*8,4,4))
    print ("Gen rs1:", gnet1.output_shape)
    # DeConv Layer
    gnet2 = Deconv2DLayer(gnet1, ngf*8, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv2:", gnet2.output_shape)
    # DeConv Layer
    gnet3 = Deconv2DLayer(gnet2, ngf*4, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv3:", gnet3.output_shape)
    # DeConv Layer
    gnet4 = Deconv2DLayer(gnet3, ngf*4, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv4:", gnet4.output_shape)
    # DeConv Layer
    gnet5 = Deconv2DLayer(gnet4, ngf*2, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
    print ("Gen deconv5:", gnet5.output_shape)
    # DeConv Layer
    gnet6 = Deconv2DLayer(gnet5, 3, (3,3), (1,1), crop='same', W=Normal(0.02),nonlinearity=tanh)
    print ("Gen output:", gnet6.output_shape)
    return gnet6 
Example #12
Source File: nn_adagrad.py    From kaggle_otto with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_model(self, input_dim):
        l_in = InputLayer(shape=(self.batch_size, input_dim))

        l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)

        l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)

        l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout)

        l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)

        return l_out 
Example #13
Source File: nn_rmsprop_features.py    From kaggle_otto with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_model(self, input_dim):
        l_in = InputLayer(shape=(self.batch_size, input_dim))

        l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden / 2, nonlinearity=rectify)
        l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)

        l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)

        l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden / 2, nonlinearity=rectify)
        l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout)

        l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)

        return l_out 
Example #14
Source File: models.py    From recurrent-memory with GNU Lesser General Public License v3.0 5 votes vote down vote up
def TanhRecurrent(input_var, mask_var=None, batch_size=1, n_in=100, n_out=1,
                    n_hid=200, wscale=1.0,
                    out_nlin=lasagne.nonlinearities.linear):
    # Input Layer
    l_in = InputLayer((batch_size, None, n_in), input_var=input_var)
    if mask_var == None:
        l_mask = None
    else:
        l_mask = InputLayer((batch_size, None), input_var=mask_var)

    _, seqlen, _ = l_in.input_var.shape

    l_in_hid = DenseLayer(lasagne.layers.InputLayer((None, n_in)), n_hid,
                          W=lasagne.init.HeNormal(0.95), nonlinearity=lasagne.nonlinearities.linear)
    l_hid_hid = DenseLayer(lasagne.layers.InputLayer((None, n_hid)), n_hid,
                           W=lasagne.init.HeNormal(gain=wscale), nonlinearity=lasagne.nonlinearities.linear)
    l_rec = lasagne.layers.CustomRecurrentLayer(l_in, l_in_hid, l_hid_hid, nonlinearity=lasagne.nonlinearities.tanh,
                                                mask_input=l_mask, grad_clipping=100)

    l_shp_1 =  ReshapeLayer(l_rec, (-1, n_hid))
    l_shp_2 =  ReshapeLayer(l_hid_hid, (-1, n_hid))

    l_shp = lasagne.layers.ElemwiseSumLayer((l_shp_1,l_shp_2),coeffs=(np.float32(0.2),np.float32(0.8)))

    # Output Layer
    l_dense = DenseLayer(l_shp, num_units=n_out, W=lasagne.init.HeNormal(0.95), nonlinearity=out_nlin)

    # To reshape back to our original shape, we can use the symbolic shape variables we retrieved above.
    l_out = ReshapeLayer(l_dense, (batch_size, seqlen, n_out))

    return l_out, l_rec 
Example #15
Source File: models.py    From recurrent-memory with GNU Lesser General Public License v3.0 5 votes vote down vote up
def LeInitRecurrentWithFastWeights(input_var, mask_var=None, batch_size=1, n_in=100, n_out=1,
                    n_hid=200, diag_val=0.9, offdiag_val=0.01,
                    out_nlin=lasagne.nonlinearities.linear, gamma=0.9):
    # Input Layer
    l_in = InputLayer((batch_size, None, n_in), input_var=input_var)
    if mask_var==None:
        l_mask=None
    else:
        l_mask = InputLayer((batch_size, None), input_var=mask_var)

    _, seqlen, _ = l_in.input_var.shape
    
    l_in_hid = DenseLayer(lasagne.layers.InputLayer((None, n_in)), n_hid,  
                          W=lasagne.init.GlorotNormal(0.95), 
                          nonlinearity=lasagne.nonlinearities.linear)
    l_hid_hid = DenseLayer(lasagne.layers.InputLayer((None, n_hid)), n_hid, 
                           W=LeInit(diag_val=diag_val, offdiag_val=offdiag_val), 
                           nonlinearity=lasagne.nonlinearities.linear)
    l_rec = CustomRecurrentLayerWithFastWeights(l_in, l_in_hid, l_hid_hid, 
                                                nonlinearity=lasagne.nonlinearities.rectify,
                                                mask_input=l_mask, grad_clipping=100, gamma=gamma)

    # Output Layer
    l_shp = ReshapeLayer(l_rec, (-1, n_hid))
    l_dense = DenseLayer(l_shp, num_units=n_out, W=lasagne.init.GlorotNormal(0.95), nonlinearity=out_nlin)
    
    # To reshape back to our original shape, we can use the symbolic shape variables we retrieved above.
    l_out = ReshapeLayer(l_dense, (batch_size, seqlen, n_out))

    return l_out, l_rec 
Example #16
Source File: models.py    From recurrent-memory with GNU Lesser General Public License v3.0 5 votes vote down vote up
def GRURecurrent(input_var, mask_var=None, batch_size=1, n_in=100, n_out=1, n_hid=200, diag_val=0.9, offdiag_val=0.01, out_nlin=lasagne.nonlinearities.linear):
    # Input Layer
    l_in         = InputLayer((batch_size, None, n_in), input_var=input_var)
    if mask_var==None:
        l_mask = None
    else:
        l_mask = InputLayer((batch_size, None), input_var=mask_var)
        
    _, seqlen, _ = l_in.input_var.shape
    l_rec        = GRULayer(l_in, n_hid, 
                            resetgate=lasagne.layers.Gate(W_in=lasagne.init.GlorotNormal(0.05), 
                                                          W_hid=lasagne.init.GlorotNormal(0.05), 
                                                          W_cell=None, b=lasagne.init.Constant(0.)), 
                            updategate=lasagne.layers.Gate(W_in=lasagne.init.GlorotNormal(0.05), 
                                                           W_hid=lasagne.init.GlorotNormal(0.05), 
                                                           W_cell=None), 
                            hidden_update=lasagne.layers.Gate(W_in=lasagne.init.GlorotNormal(0.05), 
                                                              W_hid=LeInit(diag_val=diag_val, offdiag_val=offdiag_val), 
                                                              W_cell=None, nonlinearity=lasagne.nonlinearities.rectify), 
                            hid_init = lasagne.init.Constant(0.), backwards=False, learn_init=False, 
                            gradient_steps=-1, grad_clipping=10., unroll_scan=False, precompute_input=True, mask_input=l_mask, only_return_final=False)

    # Output Layer
    l_shp        = ReshapeLayer(l_rec, (-1, n_hid))
    l_dense      = DenseLayer(l_shp, num_units=n_out, W=lasagne.init.GlorotNormal(0.05), nonlinearity=out_nlin)
    # To reshape back to our original shape, we can use the symbolic shape variables we retrieved above.
    l_out        = ReshapeLayer(l_dense, (batch_size, seqlen, n_out))

    return l_out, l_rec 
Example #17
Source File: models.py    From recurrent-memory with GNU Lesser General Public License v3.0 5 votes vote down vote up
def LeInitRecurrent(input_var, mask_var=None, batch_size=1, n_in=100, n_out=1, 
                    n_hid=200, diag_val=0.9, offdiag_val=0.01,
                    out_nlin=lasagne.nonlinearities.linear):
    # Input Layer
    l_in = InputLayer((batch_size, None, n_in), input_var=input_var)
    if mask_var==None:
        l_mask=None
    else:
        l_mask = InputLayer((batch_size, None), input_var=mask_var)

    _, seqlen, _ = l_in.input_var.shape
    
    l_in_hid = DenseLayer(lasagne.layers.InputLayer((None, n_in)), n_hid,  
                          W=lasagne.init.GlorotNormal(0.95), 
                          nonlinearity=lasagne.nonlinearities.linear)
    l_hid_hid = DenseLayer(lasagne.layers.InputLayer((None, n_hid)), n_hid, 
                           W=LeInit(diag_val=diag_val, offdiag_val=offdiag_val), 
                           nonlinearity=lasagne.nonlinearities.linear)
    l_rec = lasagne.layers.CustomRecurrentLayer(l_in, l_in_hid, l_hid_hid, nonlinearity=lasagne.nonlinearities.rectify, mask_input=l_mask, grad_clipping=100)

    # Output Layer
    l_shp = ReshapeLayer(l_rec, (-1, n_hid))
    l_dense = DenseLayer(l_shp, num_units=n_out, W=lasagne.init.GlorotNormal(0.95), nonlinearity=out_nlin)
    
    # To reshape back to our original shape, we can use the symbolic shape variables we retrieved above.
    l_out = ReshapeLayer(l_dense, (batch_size, seqlen, n_out))

    return l_out, l_rec 
Example #18
Source File: models.py    From recurrent-memory with GNU Lesser General Public License v3.0 5 votes vote down vote up
def LeInitRecurrentWithFastWeights(input_var, mask_var=None, batch_size=1, n_in=100, n_out=1, 
                    n_hid=200, diag_val=0.9, offdiag_val=0.01,
                    out_nlin=lasagne.nonlinearities.linear, gamma=0.9):
    # Input Layer
    l_in = InputLayer((batch_size, None, n_in), input_var=input_var)
    if mask_var==None:
        l_mask=None
    else:
        l_mask = InputLayer((batch_size, None), input_var=mask_var)

    _, seqlen, _ = l_in.input_var.shape
    
    l_in_hid = DenseLayer(lasagne.layers.InputLayer((None, n_in)), n_hid,  
                          W=lasagne.init.GlorotNormal(0.95), 
                          nonlinearity=lasagne.nonlinearities.linear)
    l_hid_hid = DenseLayer(lasagne.layers.InputLayer((None, n_hid)), n_hid, 
                           W=LeInit(diag_val=diag_val, offdiag_val=offdiag_val), 
                           nonlinearity=lasagne.nonlinearities.linear)
    l_rec = CustomRecurrentLayerWithFastWeights(l_in, l_in_hid, l_hid_hid, 
                                                nonlinearity=lasagne.nonlinearities.rectify,
                                                mask_input=l_mask, grad_clipping=100, gamma=gamma)

    # Output Layer
    l_shp = ReshapeLayer(l_rec, (-1, n_hid))
    l_dense = DenseLayer(l_shp, num_units=n_out, W=lasagne.init.GlorotNormal(0.95), nonlinearity=out_nlin)
    
    # To reshape back to our original shape, we can use the symbolic shape variables we retrieved above.
    l_out = ReshapeLayer(l_dense, (batch_size, seqlen, n_out))

    return l_out, l_rec 
Example #19
Source File: bagging_nn_nesterov.py    From kaggle_otto with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_model(self, input_dim):
        l_in = InputLayer(shape=(self.batch_size, input_dim))

        l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)

        l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)

        l_out = DenseLayer(l_hidden2_dropout, num_units=self.n_classes_, nonlinearity=softmax)

        return l_out 
Example #20
Source File: nn_adagrad_pca.py    From kaggle_otto with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_model(self, input_dim):
        l_in = InputLayer(shape=(self.batch_size, input_dim))

        l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)

        l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)

        l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout)

        l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)

        return l_out 
Example #21
Source File: deep_conv_classification_alt51_heatmap.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = batch_norm(layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var; 
Example #22
Source File: nn_adagrad_pca.py    From kaggle_otto with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_model(self, input_dim):
        l_in = InputLayer(shape=(self.batch_size, input_dim))

        l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)

        l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden / 2, nonlinearity=rectify)
        l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)

        l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden, nonlinearity=rectify)
        l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout)

        l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)

        return l_out 
Example #23
Source File: models.py    From recurrent-memory with GNU Lesser General Public License v3.0 5 votes vote down vote up
def GRURecurrent(input_var, mask_var=None, batch_size=1, n_in=100, n_out=1, n_hid=200, diag_val=0.9, offdiag_val=0.01, out_nlin=lasagne.nonlinearities.linear):
    # Input Layer
    l_in         = InputLayer((batch_size, None, n_in), input_var=input_var)
    if mask_var==None:
        l_mask = None
    else:
        l_mask = InputLayer((batch_size, None), input_var=mask_var)
        
    _, seqlen, _ = l_in.input_var.shape
    l_rec        = GRULayer(l_in, n_hid, 
                            resetgate=lasagne.layers.Gate(W_in=lasagne.init.GlorotNormal(0.05), 
                                                          W_hid=lasagne.init.GlorotNormal(0.05), 
                                                          W_cell=None, b=lasagne.init.Constant(0.)), 
                            updategate=lasagne.layers.Gate(W_in=lasagne.init.GlorotNormal(0.05), 
                                                           W_hid=lasagne.init.GlorotNormal(0.05), 
                                                           W_cell=None), 
                            hidden_update=lasagne.layers.Gate(W_in=lasagne.init.GlorotNormal(0.05), 
                                                              W_hid=LeInit(diag_val=diag_val, offdiag_val=offdiag_val), 
                                                              W_cell=None, nonlinearity=lasagne.nonlinearities.rectify), 
                            hid_init = lasagne.init.Constant(0.), backwards=False, learn_init=False, 
                            gradient_steps=-1, grad_clipping=10., unroll_scan=False, precompute_input=True, mask_input=l_mask, only_return_final=False)

    # Output Layer
    l_shp        = ReshapeLayer(l_rec, (-1, n_hid))
    l_dense      = DenseLayer(l_shp, num_units=n_out, W=lasagne.init.GlorotNormal(0.05), nonlinearity=out_nlin)
    # To reshape back to our original shape, we can use the symbolic shape variables we retrieved above.
    l_out        = ReshapeLayer(l_dense, (batch_size, seqlen, n_out))

    return l_out, l_rec 
Example #24
Source File: NN.py    From kusanagi with MIT License 5 votes vote down vote up
def mlp(input_dims, output_dims, hidden_dims=[200]*4, batchsize=None,
        nonlinearities=nonlinearities.rectify,
        output_nonlinearity=nonlinearities.linear,
        W_init=lasagne.init.GlorotUniform('relu'),
        b_init=lasagne.init.Uniform(0.01),
        name='mlp', **kwargs):
    if not isinstance(nonlinearities, list):
        nonlinearities = [nonlinearities]*len(hidden_dims)
    if not isinstance(W_init, list):
        W_init = [W_init]*(len(hidden_dims)+1)
    if not isinstance(b_init, list):
        b_init = [b_init]*(len(hidden_dims)+1)
    network_spec = []
    # input layer
    input_shape = (batchsize, input_dims)
    network_spec.append((InputLayer,
                         dict(shape=input_shape,
                              name=name+'_input')))

    # hidden layers
    for i in range(len(hidden_dims)):
        layer_type = DenseLayer
        network_spec.append((layer_type,
                             dict(num_units=hidden_dims[i],
                                  nonlinearity=nonlinearities[i],
                                  W=W_init[i],
                                  b=b_init[i],
                                  name=name+'_fc%d' % (i))))
    # output layer
    layer_type = DenseLayer
    network_spec.append((layer_type,
                         dict(num_units=output_dims,
                              nonlinearity=output_nonlinearity,
                              W=W_init[-1],
                              b=b_init[-1],
                              name=name+'_output')))
    return network_spec 
Example #25
Source File: deep_conv_classification_alt55.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = (layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = (layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var; 
Example #26
Source File: deep_conv_classification_alt51_luad10_luad10in20_brca10x1_heatmap.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = batch_norm(layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var; 
Example #27
Source File: deep_conv_classification_alt56.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = (layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = (layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = (layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var; 
Example #28
Source File: deep_conv_classification_alt51_luad10_luad10in20_brca10x2_heatmap.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = batch_norm(layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var; 
Example #29
Source File: deep_conv_classification_alt51_luad10in20_brca10.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = batch_norm(layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var; 
Example #30
Source File: deep_conv_classification_alt51_luad10_luad10in20_brca10x1.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');
    target_var = T.imatrix('targets');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
    layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
    layer = batch_norm(layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 480,  filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));

    layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
    network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);

    return network, input_var, target_var;