Python torch.nn.Tanh() Examples

The following are 30 code examples of torch.nn.Tanh(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source Project: DDPAE-video-prediction   Author: jthsieh   File: decoder.py    License: MIT License 7 votes vote down vote up
def __init__(self, input_size, n_channels, ngf, n_layers, activation='tanh'):
    super(ImageDecoder, self).__init__()

    ngf = ngf * (2 ** (n_layers - 2))
    layers = [nn.ConvTranspose2d(input_size, ngf, 4, 1, 0, bias=False),
              nn.BatchNorm2d(ngf),
              nn.ReLU(True)]

    for i in range(1, n_layers - 1):
      layers += [nn.ConvTranspose2d(ngf, ngf // 2, 4, 2, 1, bias=False),
                 nn.BatchNorm2d(ngf // 2),
                 nn.ReLU(True)]
      ngf = ngf // 2

    layers += [nn.ConvTranspose2d(ngf, n_channels, 4, 2, 1, bias=False)]
    if activation == 'tanh':
      layers += [nn.Tanh()]
    elif activation == 'sigmoid':
      layers += [nn.Sigmoid()]
    else:
      raise NotImplementedError

    self.main = nn.Sequential(*layers) 
Example #2
Source Project: Pytorch-Project-Template   Author: moemen95   File: dcgan_generator.py    License: MIT License 6 votes vote down vote up
def __init__(self, config):
        super().__init__()
        self.config = config

        self.relu = nn.ReLU(inplace=True)

        self.deconv1 = nn.ConvTranspose2d(in_channels=self.config.g_input_size, out_channels=self.config.num_filt_g * 8, kernel_size=4, stride=1, padding=0, bias=False)
        self.batch_norm1 = nn.BatchNorm2d(self.config.num_filt_g*8)

        self.deconv2 = nn.ConvTranspose2d(in_channels=self.config.num_filt_g * 8, out_channels=self.config.num_filt_g * 4, kernel_size=4, stride=2, padding=1, bias=False)
        self.batch_norm2 = nn.BatchNorm2d(self.config.num_filt_g*4)

        self.deconv3 = nn.ConvTranspose2d(in_channels=self.config.num_filt_g * 4, out_channels=self.config.num_filt_g * 2, kernel_size=4, stride=2, padding=1, bias=False)
        self.batch_norm3 = nn.BatchNorm2d(self.config.num_filt_g*2)

        self.deconv4 = nn.ConvTranspose2d(in_channels=self.config.num_filt_g * 2, out_channels=self.config.num_filt_g , kernel_size=4, stride=2, padding=1, bias=False)
        self.batch_norm4 = nn.BatchNorm2d(self.config.num_filt_g)

        self.deconv5 = nn.ConvTranspose2d(in_channels=self.config.num_filt_g, out_channels=self.config.input_channels, kernel_size=4, stride=2, padding=1, bias=False)

        self.out = nn.Tanh()

        self.apply(weights_init) 
Example #3
Source Project: LanczosNetwork   Author: lrjconan   File: set2set.py    License: MIT License 6 votes vote down vote up
def __init__(self, hidden_dim):
    """ Implementation of customized LSTM for set2set """
    super(Set2SetLSTM, self).__init__()
    self.hidden_dim = hidden_dim
    self.forget_gate = nn.Sequential(
        *[nn.Linear(2 * self.hidden_dim, self.hidden_dim),
          nn.Sigmoid()])
    self.input_gate = nn.Sequential(
        *[nn.Linear(2 * self.hidden_dim, self.hidden_dim),
          nn.Sigmoid()])
    self.output_gate = nn.Sequential(
        *[nn.Linear(2 * self.hidden_dim, self.hidden_dim),
          nn.Sigmoid()])
    self.memory_gate = nn.Sequential(
        *[nn.Linear(2 * self.hidden_dim, self.hidden_dim),
          nn.Tanh()])

    self._init_param() 
Example #4
Source Project: cycleGAN-PyTorch   Author: arnab39   File: generators.py    License: MIT License 6 votes vote down vote up
def __init__(self, input_nc=3, output_nc=3, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=True, num_blocks=6):
        super(ResnetGenerator, self).__init__()
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d

        res_model = [nn.ReflectionPad2d(3),
                    conv_norm_relu(input_nc, ngf * 1, 7, norm_layer=norm_layer, bias=use_bias),
                    conv_norm_relu(ngf * 1, ngf * 2, 3, 2, 1, norm_layer=norm_layer, bias=use_bias),
                    conv_norm_relu(ngf * 2, ngf * 4, 3, 2, 1, norm_layer=norm_layer, bias=use_bias)]

        for i in range(num_blocks):
            res_model += [ResidualBlock(ngf * 4, norm_layer, use_dropout, use_bias)]

        res_model += [dconv_norm_relu(ngf * 4, ngf * 2, 3, 2, 1, 1, norm_layer=norm_layer, bias=use_bias),
                      dconv_norm_relu(ngf * 2, ngf * 1, 3, 2, 1, 1, norm_layer=norm_layer, bias=use_bias),
                      nn.ReflectionPad2d(3),
                      nn.Conv2d(ngf, output_nc, 7),
                      nn.Tanh()]
        self.res_model = nn.Sequential(*res_model) 
Example #5
Source Project: ConvLab   Author: ConvLab   File: classifier.py    License: MIT License 6 votes vote down vote up
def __init__(self, input_dropout_p, rnn_cell, input_size, hidden_size, num_layers, output_dropout_p, bidirectional, variable_lengths):
        super(EncoderGRUATTN, self).__init__(input_dropout_p=input_dropout_p, 
                                             rnn_cell=rnn_cell, 
                                             input_size=input_size, 
                                             hidden_size=hidden_size, 
                                             num_layers=num_layers, 
                                             output_dropout_p=output_dropout_p, 
                                             bidirectional=bidirectional)
        self.variable_lengths = variable_lengths
        self.nhid_attn = hidden_size
        self.output_size = hidden_size*2 if bidirectional else hidden_size

        # attention to combine selection hidden states
        self.attn = nn.Sequential(
            nn.Linear(2 * hidden_size, hidden_size), 
            nn.Tanh(), 
            nn.Linear(hidden_size, 1)
        ) 
Example #6
Source Project: SQL_Database_Optimization   Author: llSourcell   File: aggregator_predict.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, N_word, N_h, N_depth, use_ca):
        super(AggPredictor, self).__init__()
        self.use_ca = use_ca

        self.agg_lstm = nn.LSTM(input_size=N_word, hidden_size=N_h/2,
                num_layers=N_depth, batch_first=True,
                dropout=0.3, bidirectional=True)
        if use_ca:
            print "Using column attention on aggregator predicting"
            self.agg_col_name_enc = nn.LSTM(input_size=N_word,
                    hidden_size=N_h/2, num_layers=N_depth,
                    batch_first=True, dropout=0.3, bidirectional=True)
            self.agg_att = nn.Linear(N_h, N_h)
        else:
            print "Not using column attention on aggregator predicting"
            self.agg_att = nn.Linear(N_h, 1)
        self.agg_out = nn.Sequential(nn.Linear(N_h, N_h),
                nn.Tanh(), nn.Linear(N_h, 6))
        self.softmax = nn.Softmax() 
Example #7
Source Project: deep_architect   Author: negrinho   File: main_pytorch.py    License: MIT License 6 votes vote down vote up
def nonlinearity(h_nonlin_name):

    def Nonlinearity(nonlin_name):
        if nonlin_name == 'relu':
            m = nn.ReLU()
        elif nonlin_name == 'tanh':
            m = nn.Tanh()
        elif nonlin_name == 'elu':
            m = nn.ELU()
        else:
            raise ValueError

        return m

    return hpt.siso_pytorch_module_from_pytorch_layer_fn(
        Nonlinearity, {'nonlin_name': h_nonlin_name}) 
Example #8
Source Project: sgd-influence   Author: sato9hara   File: MyNet.py    License: MIT License 6 votes vote down vote up
def __init__(self, device, m=[24, 12]):
        super(MnistAE, self).__init__()
        self.m = m
        self.encoder = nn.Sequential(
            nn.Conv2d(1, self.m[0], 3, stride=1, padding=0),
            nn.ReLU(True),
            nn.MaxPool2d(2, stride=2, padding=0),
            nn.Conv2d(self.m[0], self.m[1], 3, stride=1, padding=0),
            nn.ReLU(True),
            nn.MaxPool2d(2, stride=1, padding=0)
        )
        self.decoder = nn.Sequential(
            nn.ConvTranspose2d(self.m[1], self.m[1], 5, stride=2, padding=0),
            nn.ReLU(True),
            nn.ConvTranspose2d(self.m[1], self.m[0], 4, stride=1, padding=0),
            nn.ReLU(True),
            nn.ConvTranspose2d(self.m[0], 1, 3, stride=1, padding=0),
            nn.Tanh()
        ) 
Example #9
Source Project: sgd-influence   Author: sato9hara   File: MyNet.py    License: MIT License 6 votes vote down vote up
def __init__(self, device, m=[64, 32, 16]):
        super(CifarAE, self).__init__()
        self.m = m
        self.mm = np.array((0.4914, 0.4822, 0.4465))[np.newaxis, :, np.newaxis, np.newaxis]
        self.ss = np.array((0.2023, 0.1994, 0.2010))[np.newaxis, :, np.newaxis, np.newaxis]
        self.mm = torch.from_numpy(self.mm).float().to(device)
        self.ss = torch.from_numpy(self.ss).float().to(device)
        self.encoder = nn.Sequential(
            nn.Conv2d(3, self.m[0], 3, stride=1, padding=0),
            nn.ReLU(True),
            nn.MaxPool2d(2, stride=2, padding=0),
            nn.Conv2d(self.m[0], self.m[1], 3, stride=1, padding=0),
            nn.ReLU(True),
            nn.MaxPool2d(2, stride=1, padding=0)
        )
        self.decoder = nn.Sequential(
            nn.ConvTranspose2d(self.m[1], self.m[1], 5, stride=2, padding=0),
            nn.ReLU(True),
            nn.ConvTranspose2d(self.m[1], self.m[0], 4, stride=1, padding=0),
            nn.ReLU(True),
            nn.ConvTranspose2d(self.m[0], 3, 3, stride=1, padding=0),
            nn.Tanh()
        ) 
Example #10
Source Project: video-caption-openNMT.pytorch   Author: xiadingZ   File: GlobalAttention.py    License: MIT License 6 votes vote down vote up
def __init__(self, dim, coverage=False, attn_type="dot"):
        super(GlobalAttention, self).__init__()

        self.dim = dim
        self.attn_type = attn_type
        assert (self.attn_type in ["dot", "general", "mlp"]), (
                "Please select a valid attention type.")

        if self.attn_type == "general":
            self.linear_in = nn.Linear(dim, dim, bias=False)
        elif self.attn_type == "mlp":
            self.linear_context = nn.Linear(dim, dim, bias=False)
            self.linear_query = nn.Linear(dim, dim, bias=True)
            self.v = nn.Linear(dim, 1, bias=False)
        # mlp wants it with bias
        out_bias = self.attn_type == "mlp"
        self.linear_out = nn.Linear(dim*2, dim, bias=out_bias)

        self.sm = nn.Softmax(dim=-1)
        self.tanh = nn.Tanh()

        if coverage:
            self.linear_cover = nn.Linear(1, dim, bias=False) 
Example #11
Source Project: ggnn.pytorch   Author: calebmah   File: model.py    License: MIT License 6 votes vote down vote up
def __init__(self, state_dim, n_node, n_edge_types):
        super(Propogator, self).__init__()

        self.n_node = n_node
        self.n_edge_types = n_edge_types

        self.reset_gate = nn.Sequential(
            nn.Linear(state_dim*3, state_dim),
            nn.Sigmoid()
        )
        self.update_gate = nn.Sequential(
            nn.Linear(state_dim*3, state_dim),
            nn.Sigmoid()
        )
        self.tansform = nn.Sequential(
            nn.Linear(state_dim*3, state_dim),
            nn.Tanh()
        ) 
Example #12
Source Project: StackGAN-Pytorch   Author: hanzhanggit   File: model.py    License: MIT License 6 votes vote down vote up
def define_module(self):
        ninput = self.z_dim + self.ef_dim
        ngf = self.gf_dim
        # TEXT.DIMENSION -> GAN.CONDITION_DIM
        self.ca_net = CA_NET()

        # -> ngf x 4 x 4
        self.fc = nn.Sequential(
            nn.Linear(ninput, ngf * 4 * 4, bias=False),
            nn.BatchNorm1d(ngf * 4 * 4),
            nn.ReLU(True))

        # ngf x 4 x 4 -> ngf/2 x 8 x 8
        self.upsample1 = upBlock(ngf, ngf // 2)
        # -> ngf/4 x 16 x 16
        self.upsample2 = upBlock(ngf // 2, ngf // 4)
        # -> ngf/8 x 32 x 32
        self.upsample3 = upBlock(ngf // 4, ngf // 8)
        # -> ngf/16 x 64 x 64
        self.upsample4 = upBlock(ngf // 8, ngf // 16)
        # -> 3 x 64 x 64
        self.img = nn.Sequential(
            conv3x3(ngf // 16, 3),
            nn.Tanh()) 
Example #13
Source Project: AIX360   Author: IBM   File: dipvae_utils.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, num_nodes=50, ip_dim=1, op_dim=1, activation_type='relu', args=None):
        super(FCNet, self).__init__()
        self.args = args
        if activation_type == 'relu':
            self.activation = nn.ReLU()
        elif activation_type == 'tanh':
            self.activation = nn.Tanh()
        else:
            print("Activation Type not supported")
            return
        layer = Linear
        self.fc_hidden = []
        self.fc1 = layer(ip_dim, num_nodes)
        self.bn1 = nn.BatchNorm1d(num_nodes)
        for _ in np.arange(self.args.num_layers - 1):
            self.fc_hidden.append(layer(num_nodes, num_nodes))
            self.fc_hidden.append(nn.BatchNorm1d(num_nodes))
            self.fc_hidden.append(self.activation)
        self.features = nn.Sequential(*self.fc_hidden)
        self.fc_out = layer(num_nodes, op_dim) 
Example #14
Source Project: pytorch-flows   Author: ikostrikov   File: flows.py    License: MIT License 6 votes vote down vote up
def __init__(self,
                 num_inputs,
                 num_hidden,
                 num_cond_inputs=None,
                 act='relu',
                 pre_exp_tanh=False):
        super(MADE, self).__init__()

        activations = {'relu': nn.ReLU, 'sigmoid': nn.Sigmoid, 'tanh': nn.Tanh}
        act_func = activations[act]

        input_mask = get_mask(
            num_inputs, num_hidden, num_inputs, mask_type='input')
        hidden_mask = get_mask(num_hidden, num_hidden, num_inputs)
        output_mask = get_mask(
            num_hidden, num_inputs * 2, num_inputs, mask_type='output')

        self.joiner = nn.MaskedLinear(num_inputs, num_hidden, input_mask,
                                      num_cond_inputs)

        self.trunk = nn.Sequential(act_func(),
                                   nn.MaskedLinear(num_hidden, num_hidden,
                                                   hidden_mask), act_func(),
                                   nn.MaskedLinear(num_hidden, num_inputs * 2,
                                                   output_mask)) 
Example #15
Source Project: SQLNet   Author: xiaojunxu   File: aggregator_predict.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, N_word, N_h, N_depth, use_ca):
        super(AggPredictor, self).__init__()
        self.use_ca = use_ca

        self.agg_lstm = nn.LSTM(input_size=N_word, hidden_size=N_h/2,
                num_layers=N_depth, batch_first=True,
                dropout=0.3, bidirectional=True)
        if use_ca:
            print "Using column attention on aggregator predicting"
            self.agg_col_name_enc = nn.LSTM(input_size=N_word,
                    hidden_size=N_h/2, num_layers=N_depth,
                    batch_first=True, dropout=0.3, bidirectional=True)
            self.agg_att = nn.Linear(N_h, N_h)
        else:
            print "Not using column attention on aggregator predicting"
            self.agg_att = nn.Linear(N_h, 1)
        self.agg_out = nn.Sequential(nn.Linear(N_h, N_h),
                nn.Tanh(), nn.Linear(N_h, 6))
        self.softmax = nn.Softmax() 
Example #16
Source Project: cmrc2019   Author: ymcui   File: modeling.py    License: Creative Commons Attribution Share Alike 4.0 International 5 votes vote down vote up
def __init__(self, config):
        super(BertPooler, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.activation = nn.Tanh() 
Example #17
Source Project: hgraph2graph   Author: wengong-jin   File: rnn.py    License: MIT License 5 votes vote down vote up
def __init__(self, input_size, hidden_size, depth):
        super(LSTM, self).__init__()
        self.hidden_size = hidden_size
        self.input_size = input_size
        self.depth = depth

        self.W_i = nn.Sequential( nn.Linear(input_size + hidden_size, hidden_size), nn.Sigmoid() )
        self.W_o = nn.Sequential( nn.Linear(input_size + hidden_size, hidden_size), nn.Sigmoid() )
        self.W_f = nn.Sequential( nn.Linear(input_size + hidden_size, hidden_size), nn.Sigmoid() )
        self.W = nn.Sequential( nn.Linear(input_size + hidden_size, hidden_size), nn.Tanh() ) 
Example #18
Source Project: hgraph2graph   Author: wengong-jin   File: encoder.py    License: MIT License 5 votes vote down vote up
def __init__(self, vocab, avocab, rnn_type, embed_size, hidden_size, depthT, depthG, dropout):
        super(HierMPNEncoder, self).__init__()
        self.vocab = vocab
        self.hidden_size = hidden_size
        self.dropout = dropout
        self.atom_size = atom_size = avocab.size()
        self.bond_size = bond_size = len(MolGraph.BOND_LIST) + MolGraph.MAX_POS

        self.E_c = nn.Sequential(
                nn.Embedding(vocab.size()[0], embed_size),
                nn.Dropout(dropout)
        )
        self.E_i = nn.Sequential(
                nn.Embedding(vocab.size()[1], embed_size),
                nn.Dropout(dropout)
        )
        self.W_c = nn.Sequential( 
                nn.Linear(embed_size + hidden_size, hidden_size), 
                nn.ReLU(),
                nn.Dropout(dropout)
        )
        self.W_i = nn.Sequential( 
                nn.Linear(embed_size + hidden_size, hidden_size), 
                nn.ReLU(),
                nn.Dropout(dropout)
        )

        self.E_a = torch.eye(atom_size).cuda()
        self.E_b = torch.eye( len(MolGraph.BOND_LIST) ).cuda()
        self.E_apos = torch.eye( MolGraph.MAX_POS ).cuda()
        self.E_pos = torch.eye( MolGraph.MAX_POS ).cuda()

        self.W_root = nn.Sequential( 
                nn.Linear(hidden_size * 2, hidden_size), 
                nn.Tanh() #root activation is tanh
        )
        self.tree_encoder = MPNEncoder(rnn_type, hidden_size + MolGraph.MAX_POS, hidden_size, hidden_size, depthT, dropout)
        self.inter_encoder = MPNEncoder(rnn_type, hidden_size + MolGraph.MAX_POS, hidden_size, hidden_size, depthT, dropout)
        self.graph_encoder = MPNEncoder(rnn_type, atom_size + bond_size, atom_size, hidden_size, depthG, dropout) 
Example #19
Source Project: hgraph2graph   Author: wengong-jin   File: rnn.py    License: MIT License 5 votes vote down vote up
def __init__(self, input_size, hidden_size, depth):
        super(LSTM, self).__init__()
        self.hidden_size = hidden_size
        self.input_size = input_size
        self.depth = depth

        self.W_i = nn.Sequential( nn.Linear(input_size + hidden_size, hidden_size), nn.Sigmoid() )
        self.W_o = nn.Sequential( nn.Linear(input_size + hidden_size, hidden_size), nn.Sigmoid() )
        self.W_f = nn.Sequential( nn.Linear(input_size + hidden_size, hidden_size), nn.Sigmoid() )
        self.W = nn.Sequential( nn.Linear(input_size + hidden_size, hidden_size), nn.Tanh() ) 
Example #20
Source Project: hgraph2graph   Author: wengong-jin   File: encoder.py    License: MIT License 5 votes vote down vote up
def __init__(self, vocab, avocab, rnn_type, embed_size, hidden_size, depthT, depthG, dropout):
        super(HierMPNEncoder, self).__init__()
        self.vocab = vocab
        self.hidden_size = hidden_size
        self.dropout = dropout
        self.atom_size = atom_size = avocab.size()
        self.bond_size = bond_size = len(MolGraph.BOND_LIST) + MolGraph.MAX_POS

        self.E_c = nn.Sequential(
                nn.Embedding(vocab.size()[0], embed_size),
                nn.Dropout(dropout)
        )
        self.E_i = nn.Sequential(
                nn.Embedding(vocab.size()[1], embed_size),
                nn.Dropout(dropout)
        )
        self.W_c = nn.Sequential( 
                nn.Linear(embed_size + hidden_size, hidden_size), 
                nn.ReLU(),
                nn.Dropout(dropout)
        )
        self.W_i = nn.Sequential( 
                nn.Linear(embed_size + hidden_size, hidden_size), 
                nn.ReLU(),
                nn.Dropout(dropout)
        )

        self.E_a = torch.eye(atom_size).cuda()
        self.E_b = torch.eye( len(MolGraph.BOND_LIST) ).cuda()
        self.E_apos = torch.eye( MolGraph.MAX_POS ).cuda()
        self.E_pos = torch.eye( MolGraph.MAX_POS ).cuda()

        self.W_root = nn.Sequential( 
                nn.Linear(hidden_size * 2, hidden_size), 
                nn.Tanh() #root activation is tanh
        )
        self.tree_encoder = MPNEncoder(rnn_type, hidden_size + MolGraph.MAX_POS, hidden_size, hidden_size, depthT, dropout)
        self.inter_encoder = MPNEncoder(rnn_type, hidden_size + MolGraph.MAX_POS, hidden_size, hidden_size, depthT, dropout)
        self.graph_encoder = MPNEncoder(rnn_type, atom_size + bond_size, atom_size, hidden_size, depthG, dropout) 
Example #21
Source Project: VSE-C   Author: ExplorerFreda   File: model.py    License: MIT License 5 votes vote down vote up
def __init__(self, pooling, hidden_size=1024, attention_size=128):
        super(Combiner, self).__init__()
        self.method = pooling
        if self.method == 'attn':
            self.ws1 = nn.Linear(hidden_size, attention_size, bias=False)
            self.ws2 = nn.Linear(attention_size, 1, bias=False)
            self.tanh = nn.Tanh() 
Example #22
Source Project: deep-learning-note   Author: wdxtub   File: 53_machine_translation.py    License: MIT License 5 votes vote down vote up
def attention_model(input_size, attention_size):
    model = nn.Sequential(nn.Linear(input_size, attention_size, bias=False),
                          nn.Tanh(),
                          nn.Linear(attention_size, 1, bias=False))
    return model 
Example #23
Source Project: TaskBot   Author: EvilPsyCHo   File: cnn_attention.py    License: GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, attention_size, non_linear):
        super().__init__()
        if non_linear == "relu":
            self.non_linear = nn.ReLU()
        else:
            self.non_linear = nn.Tanh()
        self.fc = nn.Linear(attention_size, attention_size)
        uniform(self.fc.weight.data, -0.005, 0.005) 
Example #24
Source Project: mrc-for-flat-nested-ner   Author: pranciskus   File: bert_basic_layer.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, config):
        super(BertPooler, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.activation = nn.Tanh() 
Example #25
Source Project: cycleGAN-PyTorch   Author: arnab39   File: generators.py    License: MIT License 5 votes vote down vote up
def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, 
                                innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
        super(UnetSkipConnectionBlock, self).__init__()
        self.outermost = outermost
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d
        if input_nc is None:
            input_nc = outer_nc
        downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)

        if outermost:
            upconv = nn.ConvTranspose2d(inner_nc*2, outer_nc, kernel_size=4, stride=2, padding=1)
            down = [downconv]
            up = [nn.ReLU(True), upconv, nn.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
            down = [nn.LeakyReLU(0.2, True), downconv]
            up = [nn.ReLU(True), upconv, norm_layer(outer_nc)]
            model = down + up
        else:
            upconv = nn.ConvTranspose2d(inner_nc*2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
            down = [nn.LeakyReLU(0.2, True), downconv, norm_layer(inner_nc)]
            up = [nn.ReLU(True), upconv, norm_layer(outer_nc)]

            if use_dropout:
                model = down + [submodule] + up + [nn.Dropout(0.5)]
            else:
                model = down + [submodule] + up

        self.model = nn.Sequential(*model) 
Example #26
Source Project: ConvLab   Author: ConvLab   File: classifier.py    License: MIT License 5 votes vote down vote up
def __init__(self, input_dropout_p, input_size, output_size):
        super(FeatureProjecter, self).__init__()
        self.input_dropout = nn.Dropout(p=input_dropout_p)
        self.sel_encoder = nn.Sequential(
            nn.Linear(input_size, output_size), 
            nn.Tanh()
        ) 
Example #27
Source Project: ConvLab   Author: ConvLab   File: encoders.py    License: MIT License 5 votes vote down vote up
def __init__(self, goal_vocab_size, k, nembed, nhid, init_range):
        super(MlpGoalEncoder, self).__init__()

        # create separate embedding for counts and values
        self.cnt_enc = nn.Embedding(goal_vocab_size, nembed)
        self.val_enc = nn.Embedding(goal_vocab_size, nembed)

        self.encoder = nn.Sequential(
            nn.Tanh(),
            nn.Linear(k*nembed, nhid) 
        )

        self.cnt_enc.weight.data.uniform_(-init_range, init_range)
        self.val_enc.weight.data.uniform_(-init_range, init_range)
        self._init_cont(self.encoder, init_range) 
Example #28
Source Project: ConvLab   Author: ConvLab   File: encoders.py    License: MIT License 5 votes vote down vote up
def __init__(self, goal_vocab_sizes, nhid, init_range):
        super(TaskMlpGoalEncoder, self).__init__()
        
        self.encoder = nn.ModuleList()
        for v_size in goal_vocab_sizes:
            domain_encoder = nn.Sequential(
                nn.Linear(v_size, nhid), 
                nn.Tanh()
            )
            self._init_cont(domain_encoder, init_range)
            self.encoder.append(domain_encoder) 
Example #29
Source Project: ScenarioMeta   Author: THUDM   File: utils.py    License: MIT License 5 votes vote down vote up
def activation_method(name):
    """
    :param name: (str)
    :return: torch.nn.Module
    """
    name = name.lower()
    if name == "sigmoid":
        return nn.Sigmoid()
    elif name == "tanh":
        return nn.Tanh()
    elif name == "relu":
        return nn.ReLU()
    else:
        return nn.Sequential() 
Example #30
Source Project: ACGAN-PyTorch   Author: clvrai   File: network.py    License: MIT License 5 votes vote down vote up
def __init__(self, ngpu, nz):
        super(_netG, self).__init__()
        self.ngpu = ngpu
        self.nz = nz

        # first linear layer
        self.fc1 = nn.Linear(110, 768)
        # Transposed Convolution 2
        self.tconv2 = nn.Sequential(
            nn.ConvTranspose2d(768, 384, 5, 2, 0, bias=False),
            nn.BatchNorm2d(384),
            nn.ReLU(True),
        )
        # Transposed Convolution 3
        self.tconv3 = nn.Sequential(
            nn.ConvTranspose2d(384, 256, 5, 2, 0, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(True),
        )
        # Transposed Convolution 4
        self.tconv4 = nn.Sequential(
            nn.ConvTranspose2d(256, 192, 5, 2, 0, bias=False),
            nn.BatchNorm2d(192),
            nn.ReLU(True),
        )
        # Transposed Convolution 5
        self.tconv5 = nn.Sequential(
            nn.ConvTranspose2d(192, 64, 5, 2, 0, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(True),
        )
        # Transposed Convolution 5
        self.tconv6 = nn.Sequential(
            nn.ConvTranspose2d(64, 3, 8, 2, 0, bias=False),
            nn.Tanh(),
        )