Python torch.nn.MaxPool1d() Examples

The following are 30 code examples of torch.nn.MaxPool1d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: models.py    From IGMC with MIT License 7 votes vote down vote up
def __init__(self, dataset, gconv=GCNConv, latent_dim=[32, 32, 32, 1], k=30, 
                 regression=False, adj_dropout=0.2, force_undirected=False):
        super(DGCNN, self).__init__(
            dataset, gconv, latent_dim, regression, adj_dropout, force_undirected
        )
        if k < 1:  # transform percentile to number
            node_nums = sorted([g.num_nodes for g in dataset])
            k = node_nums[int(math.ceil(k * len(node_nums)))-1]
            k = max(10, k)  # no smaller than 10
        self.k = int(k)
        print('k used in sortpooling is:', self.k)
        conv1d_channels = [16, 32]
        conv1d_activation = nn.ReLU()
        self.total_latent_dim = sum(latent_dim)
        conv1d_kws = [self.total_latent_dim, 5]
        self.conv1d_params1 = Conv1d(1, conv1d_channels[0], conv1d_kws[0], conv1d_kws[0])
        self.maxpool1d = nn.MaxPool1d(2, 2)
        self.conv1d_params2 = Conv1d(conv1d_channels[0], conv1d_channels[1], conv1d_kws[1], 1)
        dense_dim = int((k - 2) / 2 + 1)
        self.dense_dim = (dense_dim - conv1d_kws[1] + 1) * conv1d_channels[1]
        self.lin1 = Linear(self.dense_dim, 128) 
Example #2
Source File: mnist.py    From dgl with Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 k,
                 in_feats,
                 hiddens,
                 out_feats):
        super(ChebNet, self).__init__()
        self.pool = nn.MaxPool1d(2)
        self.layers = nn.ModuleList()
        self.readout = MaxPooling()

        # Input layer
        self.layers.append(
            ChebConv(in_feats, hiddens[0], k))

        for i in range(1, len(hiddens)):
            self.layers.append(
                ChebConv(hiddens[i - 1], hiddens[i], k))

        self.cls = nn.Sequential(
            nn.Linear(hiddens[-1], out_feats),
            nn.LogSoftmax()
        ) 
Example #3
Source File: networks.py    From MeshCNN with MIT License 6 votes vote down vote up
def __init__(self, norm_layer, nf0, conv_res, nclasses, input_res, pool_res, fc_n,
                 nresblocks=3):
        super(MeshConvNet, self).__init__()
        self.k = [nf0] + conv_res
        self.res = [input_res] + pool_res
        norm_args = get_norm_args(norm_layer, self.k[1:])

        for i, ki in enumerate(self.k[:-1]):
            setattr(self, 'conv{}'.format(i), MResConv(ki, self.k[i + 1], nresblocks))
            setattr(self, 'norm{}'.format(i), norm_layer(**norm_args[i]))
            setattr(self, 'pool{}'.format(i), MeshPool(self.res[i + 1]))


        self.gp = torch.nn.AvgPool1d(self.res[-1])
        # self.gp = torch.nn.MaxPool1d(self.res[-1])
        self.fc1 = nn.Linear(self.k[-1], fc_n)
        self.fc2 = nn.Linear(fc_n, nclasses) 
Example #4
Source File: resnet.py    From ecg_pytorch with Apache License 2.0 6 votes vote down vote up
def __init__(self, block, layers, num_classes=55):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv1d(8, 64, kernel_size=15, stride=2, padding=7,
                               bias=False)
        self.bn1 = nn.BatchNorm1d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv1d):
                n = m.kernel_size[0] * m.kernel_size[0] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm1d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example #5
Source File: module.py    From Tacotron-pytorch with MIT License 6 votes vote down vote up
def __init__(self, in_dim, K=16, hidden_sizes=[128, 128]):
        super(CBHG, self).__init__()
        self.in_dim = in_dim
        self.relu = nn.ReLU()
        self.conv1d_banks = nn.ModuleList(
                [BatchNormConv1d(in_dim, in_dim, kernel_size=k, stride=1,
                    padding=k//2, activation=self.relu)
                for k in range(1, K+1)])
        self.pool1d = nn.MaxPool1d(kernel_size=2, stride=1, padding=1)

        in_sizes = [K * in_dim] + hidden_sizes[:-1]
        activations = [self.relu] * (len(hidden_sizes) - 1) + [None]
        self.conv1d_projs = nn.ModuleList(
                [BatchNormConv1d(in_size, out_size, kernel_size=3,
                    stride=1, padding=1, activation=act)
                    for in_size, out_size, act in zip(in_sizes, hidden_sizes, activations)])

        self.pre_highway_proj = nn.Linear(hidden_sizes[-1], in_dim, bias=False)
        self.highways = nn.ModuleList(
                [Highway(in_dim, in_dim) for _ in range(4)])
        self.gru = nn.GRU(
                in_dim, in_dim, num_layers=1, batch_first=True, bidirectional=True) 
Example #6
Source File: tacotron.py    From ZeroSpeech-TTS-without-T with MIT License 6 votes vote down vote up
def __init__(self, in_dim, K=16, projections=[128, 128]):
		
		super(CBHG, self).__init__()
		self.in_dim = in_dim
		self.relu = nn.ReLU()
		self.conv1d_banks = nn.ModuleList(
			[BatchNormConv1d(in_dim, in_dim, kernel_size=k, stride=1,
							 padding=k // 2, activation=self.relu)
			 for k in range(1, K + 1)])
		self.max_pool1d = nn.MaxPool1d(kernel_size=2, stride=1, padding=1)

		in_sizes = [K * in_dim] + projections[:-1]
		activations = [self.relu] * (len(projections) - 1) + [None]
		self.conv1d_projections = nn.ModuleList(
			[BatchNormConv1d(in_size, out_size, kernel_size=3, stride=1,
							 padding=1, activation=ac)
			 for (in_size, out_size, ac) in zip(
				 in_sizes, projections, activations)])

		self.pre_highway = nn.Linear(projections[-1], in_dim, bias=False)
		self.highways = nn.ModuleList(
			[Highway(in_dim, in_dim) for _ in range(4)])

		self.gru = nn.GRU(
			in_dim, in_dim, 1, batch_first=True, bidirectional=True) 
Example #7
Source File: tacotron.py    From ZeroSpeech-TTS-without-T with MIT License 6 votes vote down vote up
def __init__(self, in_dim, K=16, projections=[128, 128]):
		
		super(CBHG, self).__init__()
		self.in_dim = in_dim
		self.relu = nn.ReLU()
		self.conv1d_banks = nn.ModuleList(
			[BatchNormConv1d(in_dim, in_dim, kernel_size=k, stride=1,
							 padding=k // 2, activation=self.relu)
			 for k in range(1, K + 1)])
		self.max_pool1d = nn.MaxPool1d(kernel_size=2, stride=1, padding=1)

		in_sizes = [K * in_dim] + projections[:-1]
		activations = [self.relu] * (len(projections) - 1) + [None]
		self.conv1d_projections = nn.ModuleList(
			[BatchNormConv1d(in_size, out_size, kernel_size=3, stride=1,
							 padding=1, activation=ac)
			 for (in_size, out_size, ac) in zip(
				 in_sizes, projections, activations)])

		self.pre_highway = nn.Linear(projections[-1], in_dim, bias=False)
		self.highways = nn.ModuleList(
			[Highway(in_dim, in_dim) for _ in range(4)])

		self.gru = nn.GRU(
			in_dim, in_dim, 1, batch_first=True, bidirectional=True) 
Example #8
Source File: pool_ending_classifier.py    From l2w with GNU General Public License v3.0 6 votes vote down vote up
def forward(self, context, endings, itos=None):
        # context not used.
        ends = endings[0]
        ends_ls = endings[1]

        end_seq_len = ends.size()[0]
        end = ends.view(end_seq_len, -1)
        end_batch_size = end.size()[1]
        maxpool_end = nn.MaxPool1d(end_seq_len)

        end_embed = self.embed_seq(end)
        end_pooled = maxpool_end(end_embed).view(end_batch_size, self.embedding_dim)
        #end_pooled = torch.sum(end_conv, 2)/end_seq_len

        final = self.fc(end_pooled).view(-1)
        return final 
Example #9
Source File: mnist.py    From dgl with Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 n_kernels,
                 in_feats,
                 hiddens,
                 out_feats):
        super(MoNet, self).__init__()
        self.pool = nn.MaxPool1d(2)
        self.layers = nn.ModuleList()
        self.readout = MaxPooling()

        # Input layer
        self.layers.append(
            GMMConv(in_feats, hiddens[0], 2, n_kernels))

        # Hidden layer
        for i in range(1, len(hiddens)):
            self.layers.append(GMMConv(hiddens[i - 1], hiddens[i], 2, n_kernels))

        self.cls = nn.Sequential(
            nn.Linear(hiddens[-1], out_feats),
            nn.LogSoftmax()
        ) 
Example #10
Source File: bert_for_multi_label.py    From Multi-Label-Text-Classification-for-Chinese with MIT License 6 votes vote down vote up
def __init__(self, config):
        super(BertPreTrainedModel, self).__init__(config)
        config.rnn_hidden = basic_config.rcnn.rnn_hidden
        config.num_layers = basic_config.rcnn.num_layers
        config.kernel_size = basic_config.rcnn.kernel_size
        config.lstm_dropout = basic_config.rcnn.dropout

        self.bert = BertModel(config)
        for param in self.bert.parameters():
            param.requires_grad = True
        self.lstm = nn.LSTM(config.hidden_size,
                            config.rnn_hidden,
                            config.num_layers,
                            bidirectional=True,
                            batch_first=True,
                            dropout=config.lstm_dropout)
        self.maxpool = nn.MaxPool1d(config.kernel_size)
        self.fc = nn.Linear(config.rnn_hidden * 2 +
                            config.hidden_size, config.num_labels) 
Example #11
Source File: pointnet_partseg.py    From dgl with Apache License 2.0 6 votes vote down vote up
def __init__(self, input_dims=3, num_points=2048):
        super(TransformNet, self).__init__()
        self.conv = nn.ModuleList()
        self.conv.append(nn.Conv1d(input_dims, 64, 1))
        self.conv.append(nn.Conv1d(64, 128, 1))
        self.conv.append(nn.Conv1d(128, 1024, 1))

        self.bn = nn.ModuleList()
        self.bn.append(nn.BatchNorm1d(64))
        self.bn.append(nn.BatchNorm1d(128))
        self.bn.append(nn.BatchNorm1d(1024))

        self.maxpool = nn.MaxPool1d(num_points)
        self.pool_feat_len = 1024

        self.mlp2 = nn.ModuleList()
        self.mlp2.append(nn.Linear(1024, 512))
        self.mlp2.append(nn.Linear(512, 256))

        self.bn2 = nn.ModuleList()
        self.bn2.append(nn.BatchNorm1d(512))
        self.bn2.append(nn.BatchNorm1d(256))

        self.input_dims = input_dims
        self.mlp_out = nn.Linear(256, input_dims * input_dims) 
Example #12
Source File: ch_pool.py    From attacut with MIT License 6 votes vote down vote up
def __init__(self, data_config, model_config="emb:16|l1:64"):
        super(Model, self).__init__()

        window_size = data_config['window_size']
        no_chars = data_config['num_tokens']
        max_length = data_config['max_seq_length']

        config = utils.parse_model_params(model_config)
        emb_dim = config['emb']
        
        self.embeddings = nn.Embedding(
            no_chars,
            emb_dim,
            padding_idx=0
        )

        self.num_embs = 2*window_size + 1
        self.pooling = nn.MaxPool1d(max_length)

        self.linear1 = nn.Linear(self.num_embs*emb_dim, config['l1'])
        self.linear2 = nn.Linear(config['l1'], 1) 
Example #13
Source File: dpcnn.py    From fastNLP with Apache License 2.0 6 votes vote down vote up
def __init__(self, init_embed, num_cls, n_filters=256,
                 kernel_size=3, n_layers=7, embed_dropout=0.1, cls_dropout=0.1):
        super().__init__()
        self.region_embed = RegionEmbedding(
            init_embed, out_dim=n_filters, kernel_sizes=[1, 3, 5])
        embed_dim = self.region_embed.embedding_dim
        self.conv_list = nn.ModuleList()
        for i in range(n_layers):
            self.conv_list.append(nn.Sequential(
                nn.ReLU(),
                nn.Conv1d(n_filters, n_filters, kernel_size,
                          padding=kernel_size//2),
                nn.Conv1d(n_filters, n_filters, kernel_size,
                          padding=kernel_size//2),
            ))
        self.pool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
        self.embed_drop = nn.Dropout(embed_dropout)
        self.classfier = nn.Sequential(
            nn.Dropout(cls_dropout),
            nn.Linear(n_filters, num_cls),
        )
        self.reset_parameters() 
Example #14
Source File: pooling.py    From fastNLP with Apache License 2.0 6 votes vote down vote up
def forward(self, x):
        if self.dimension == 1:
            x = torch.transpose(x, 1, 2)  # [N,L,C] -> [N,C,L]
            pooling = nn.MaxPool1d(
                stride=self.stride, padding=self.padding, dilation=self.dilation,
                kernel_size=self.kernel_size if self.kernel_size is not None else x.size(-1),
                return_indices=False, ceil_mode=self.ceil_mode
            )
        elif self.dimension == 2:
            pooling = nn.MaxPool2d(
                stride=self.stride, padding=self.padding, dilation=self.dilation,
                kernel_size=self.kernel_size if self.kernel_size is not None else (x.size(-2), x.size(-1)),
                return_indices=False, ceil_mode=self.ceil_mode
            )
        else:
            pooling = nn.MaxPool3d(
                stride=self.stride, padding=self.padding, dilation=self.dilation,
                kernel_size=self.kernel_size if self.kernel_size is not None else (x.size(-3), x.size(-2), x.size(-1)),
                return_indices=False, ceil_mode=self.ceil_mode
            )
        x = pooling(x)
        return x.squeeze(dim=-1)  # [N,C,1] -> [N,C] 
Example #15
Source File: arci.py    From MatchZoo-py with Apache License 2.0 6 votes vote down vote up
def _make_conv_pool_block(
        cls,
        in_channels: int,
        out_channels: int,
        kernel_size: int,
        activation: nn.Module,
        pool_size: int,
    ) -> nn.Module:
        """Make conv pool block."""
        return nn.Sequential(
            nn.ConstantPad1d((0, kernel_size - 1), 0),
            nn.Conv1d(
                in_channels=in_channels,
                out_channels=out_channels,
                kernel_size=kernel_size
            ),
            activation,
            nn.MaxPool1d(kernel_size=pool_size)
        ) 
Example #16
Source File: soundnet.py    From pretorched-x with MIT License 6 votes vote down vote up
def __init__(self, num_classes=1000, feature_dim=262000):
        super().__init__()
        self.num_classes = num_classes
        self.feature_dim = feature_dim
        self.conv1 = nn.Conv1d(1, 16, 64, stride=2, padding=32)
        self.pool1 = nn.MaxPool1d(8, stride=1, padding=0)
        self.conv2 = nn.Conv1d(16, 32, 32, stride=2, padding=16)
        self.pool2 = nn.MaxPool1d(8, stride=1, padding=0)
        self.conv3 = nn.Conv1d(32, 64, 16, stride=2, padding=8)
        self.conv4 = nn.Conv1d(64, 128, 8, stride=2, padding=4)
        self.conv5 = nn.Conv1d(128, 256, 4, stride=2, padding=2)
        self.pool5 = nn.MaxPool1d(4, stride=1, padding=0)
        self.conv6 = nn.Conv1d(256, 512, 4, stride=2, padding=2)
        self.conv7 = nn.Conv1d(512, 1024, 4, stride=2, padding=2)
        self.conv8 = nn.Conv1d(1024, 1000, 8, stride=2, padding=0)
        self.last_linear = nn.Linear(feature_dim, num_classes)
        self.flatten = Flatten()
        self.fdim = feature_dim 
Example #17
Source File: soundnet.py    From pretorched-x with MIT License 6 votes vote down vote up
def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv1d(1, 16, 64, stride=2, padding=32)
        self.pool1 = nn.MaxPool1d(8, stride=1, padding=0)
        self.conv2 = nn.Conv1d(16, 32, 32, stride=2, padding=16)
        self.pool2 = nn.MaxPool1d(8, stride=1, padding=0)
        self.conv3 = nn.Conv1d(32, 64, 16, stride=2, padding=8)
        self.conv4 = nn.Conv1d(64, 128, 8, stride=2, padding=4)
        self.conv5 = nn.Conv1d(128, 256, 4, stride=2, padding=2)
        self.pool5 = nn.MaxPool1d(4, stride=1, padding=0)
        self.conv6 = nn.Conv1d(256, 512, 4, stride=2, padding=2)
        self.conv7 = nn.Conv1d(512, 1024, 4, stride=2, padding=2)
        self.conv8_1 = nn.Conv1d(1024, 1000, 8, stride=2, padding=0)
        self.conv8_2 = nn.Conv1d(1024, 401, 8, stride=2, padding=0)
        self.last_linear1 = nn.Linear(262000, 1000)
        self.last_linear2 = nn.Linear(105062, 365)
        self.flatten = Flatten() 
Example #18
Source File: models.py    From AMNRE with MIT License 6 votes vote down vote up
def __init__(self,
               vocab_size,emb,emb_dim=dimWE,
               hidden_dim=dimC,lang=0):
        #emb---np wordVec vocab_size=len(emb)
        super(EncoderCNN,self).__init__()
        self.lang=lang
        self.word_emb=nn.Embedding(vocab_size,emb_dim)
        self.word_emb.weight.data.copy_(torch.from_numpy(emb))
        self.pos1_emb=nn.Embedding(MaxPos,dimWPE)
        self.pos2_emb=nn.Embedding(MaxPos,dimWPE)
        self.maxPooling=nn.MaxPool1d(SenLen[self.lang]-2)
        self.emb_dim=emb_dim+dimWPE*2
        self.hidden_dim=hidden_dim
        #using CNN
        self.tanh=nn.Tanh()
        self.conv=nn.Conv1d(self.emb_dim,hidden_dim,filter_size)
        self.dropout=nn.Dropout(p=CNNDropout) 
Example #19
Source File: model.py    From PJ_NLP with Apache License 2.0 5 votes vote down vote up
def __init__(self, vocab_size, emb_mat):
        kernel_size = 3
        filters = 128
        super(Cnn, self).__init__()
        self.loss_fn = nn.MultiLabelSoftMarginLoss()
        self.encoder = nn.Embedding(vocab_size, conf.emb_size)
        self.title_conv = nn.Sequential(
            nn.Conv1d(conf.emb_size, filters, kernel_size=kernel_size),
            nn.BatchNorm1d(filters),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=(conf.title_seq_len - 2 * kernel_size + 2))
        )
        self.content_conv = nn.Sequential(
            nn.Conv1d(conf.emb_size, filters, kernel_size=3),
            nn.BatchNorm1d(filters),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=(conf.content_seq_len - 2 * kernel_size + 2))
        )
        self.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear((filters + filters), 1024),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
            nn.Linear(1024, conf.n_classes),
        )

        self.encoder.weight.data.copy_(emb_mat) 
Example #20
Source File: MAXPOOL.py    From Kernel-Based-Neural-Ranking-Models with MIT License 5 votes vote down vote up
def max_score(self, inputs_d, inputs_q, mask_d, mask_q):
        q_embed = self.word_emb(inputs_q)
        d_embed = self.word_emb(inputs_d)
        
        q_embed_norm = F.normalize(q_embed, 2, 2)
        d_embed_norm = F.normalize(d_embed, 2, 2)

        mask_d = mask_d.view(mask_d.size()[0], mask_d.size()[1], 1)
        mask_q = mask_q.view(mask_q.size()[0], mask_q.size()[1], 1)

        q_embed_norm = q_embed_norm * mask_q
        d_embed_norm = d_embed_norm * mask_d

        q_embed_norm = q_embed_norm.permute(0, 2, 1)
        d_embed_norm = d_embed_norm.permute(0, 2, 1)
        
        maxop_q = nn.MaxPool1d(q_embed_norm.shape[2])
        maxq = maxop_q(q_embed_norm).squeeze()
        
        maxop_d = nn.MaxPool1d(d_embed_norm.shape[2])
        maxd = maxop_d(d_embed_norm).squeeze()
        
        pdist = nn.CosineSimilarity()

        output = pdist(maxq, maxd).unsqueeze(1)

        return output 
Example #21
Source File: model.py    From PJ_NLP with Apache License 2.0 5 votes vote down vote up
def __init__(self, vocab_size, emb_mat):
        kernel_sizes = [1, 2, 3, 4]
        super(TextCNN, self).__init__()
        self.encoder = nn.Embedding(vocab_size, conf.emb_size)
        self.loss_fn = nn.MultiLabelSoftMarginLoss()

        title_convs = [nn.Sequential(
            nn.Conv1d(in_channels=conf.emb_size, out_channels=conf.emb_hidden_size, kernel_size=kernel_size),
            nn.BatchNorm1d(conf.emb_hidden_size),
            nn.ReLU(inplace=True),

            nn.Conv1d(in_channels=conf.emb_hidden_size, out_channels=conf.emb_hidden_size, kernel_size=kernel_size),
            nn.BatchNorm1d(conf.emb_hidden_size),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=(conf.title_seq_len - kernel_size * 2 + 2))
        )
            for kernel_size in kernel_sizes]

        content_convs = [nn.Sequential(
            nn.Conv1d(in_channels=conf.emb_size, out_channels=conf.emb_hidden_size, kernel_size=kernel_size),
            nn.BatchNorm1d(conf.emb_hidden_size),
            nn.ReLU(inplace=True),

            nn.Conv1d(in_channels=conf.emb_hidden_size, out_channels=conf.emb_hidden_size, kernel_size=kernel_size),
            nn.BatchNorm1d(conf.emb_hidden_size),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=(conf.content_seq_len - kernel_size * 2 + 2))
        )
            for kernel_size in kernel_sizes]

        self.title_convs = nn.ModuleList(title_convs)
        self.content_convs = nn.ModuleList(content_convs)

        self.fc = nn.Sequential(
            nn.Linear(len(kernel_sizes) * (conf.emb_hidden_size + conf.emb_hidden_size), conf.liner_size),
            nn.BatchNorm1d(conf.liner_size),
            nn.ReLU(inplace=True),
            nn.Linear(conf.liner_size, conf.n_classes)
        )

        self.encoder.weight.data.copy_(emb_mat) 
Example #22
Source File: test_numerical.py    From coremltools with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_max_pool1d(self, input_shape, kernel_size, stride, pad):
        if pad > kernel_size / 2:
            # Because this test is xfail, we have to fail rather than
            # just return here, otherwise these test cases unexpectedly pass.
            # This can be changed to `return` once the above radar
            # is fixed and the test is no longer xfail.
            raise ValueError("pad must be less than half the kernel size")
        model = nn.MaxPool1d(kernel_size, stride, pad, ceil_mode=False)
        run_numerical_test(input_shape, model) 
Example #23
Source File: factories.py    From MONAI with Apache License 2.0 5 votes vote down vote up
def maxpooling_factory(dim):
    types = [nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d]
    return types[dim - 1] 
Example #24
Source File: encoder.py    From HATT-Proto with MIT License 5 votes vote down vote up
def __init__(self, max_length, word_embedding_dim=50, pos_embedding_dim=5, hidden_size=230):
        nn.Module.__init__(self)

        self.max_length = max_length
        self.hidden_size = hidden_size
        self.embedding_dim = word_embedding_dim + pos_embedding_dim * 2
        self.conv = nn.Conv1d(self.embedding_dim, self.hidden_size, 3, padding=1)
        self.pool = nn.MaxPool1d(max_length)

        # For PCNN
        self.mask_embedding = nn.Embedding(4, 3)
        self.mask_embedding.weight.data.copy_(torch.FloatTensor([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]]))
        self.mask_embedding.weight.requires_grad = False
        self._minus = -100 
Example #25
Source File: models.py    From Adv-ED with MIT License 5 votes vote down vote up
def __init__(self,config):
        super(DMBERT_Encoder,self).__init__(config)
        self.bert=BertModel(config)
        self.dropout=nn.Dropout(p=keepProb)
        #self.M=nn.Linear(EncodedDim,dimE)
        self.maxpooling=nn.MaxPool1d(SenLen) 
Example #26
Source File: models.py    From Adv-ED with MIT License 5 votes vote down vote up
def __init__(self):
        super(DMCNN_Encoder,self).__init__()
        self.word_emb=nn.Embedding(len(wordVec),dimWE,padding_idx=0)
        #self.word_emb=nn.Embedding.from_pretrained(torch.FloatTensor(wordVec),freeze=False,padding_idx=0)
        weight=torch.tensor(wordVec)
        weight.requires_grad_(True)
        self.word_emb.weight.data.copy_(weight)
        #self.word_emb.weight.requires_grad_(True)
        #print(self.word_emb.weight.data[0])
        self.pos_emb=nn.Embedding(MaxPos,dimPE)
        self.conv=nn.Conv1d(dimWE+dimPE,dimC,filter_size,padding=1)
        self.dropout=nn.Dropout(p=keepProb)
        #self.M=nn.Linear(EncodedDim,dimE)
        self.maxpooling=nn.MaxPool1d(SenLen) 
Example #27
Source File: cnn_context_classifier.py    From l2w with GNU General Public License v3.0 5 votes vote down vote up
def forward(self, context, endings, itos=None):
        ends = endings[0]
        ends_ls = endings[1]
        cont_seq_len, batch_size = context.size()

        end_seq_len = ends.size()[0]
        end = ends.view(end_seq_len, -1)
        end_batch_size = end.size()[1]
        decode_mode = (batch_size == 1 and end_batch_size > 1)
        if not decode_mode:
            assert batch_size == end_batch_size

        maxpool = nn.MaxPool1d(cont_seq_len) # define layer for context length

        context_convol = self.context_conv(self.embed_seq(context))
        context_pooled = maxpool(context_convol).view(batch_size, self.embedding_dim)
         
        maxpool_end = nn.MaxPool1d(end_seq_len)
        end_conv = F.relu(self.ending_conv(self.embed_seq(end)))
        end_pooled = maxpool_end(end_conv).view(end_batch_size, self.embedding_dim)

        if decode_mode:
            context_pooled = context_pooled.expand(end_batch_size, self.embedding_dim).contiguous()
        pooled = context_pooled * end_pooled

        dropped = self.drop(pooled)
        final = self.fc(dropped).view(-1)
        return final 
Example #28
Source File: models.py    From AMNRE with MIT License 5 votes vote down vote up
def __init__(self,
               vocab_size,emb,emb_dim=dimWE,
               hidden_dim=dimC):
        #emb---np wordVec vocab_size=len(emb)
        super(EncoderCNN,self).__init__()
        self.word_emb=nn.Embedding(vocab_size,emb_dim)
        self.word_emb.weight.data.copy_(torch.from_numpy(emb))
        self.pos1_emb=nn.Embedding(MaxPos,dimWPE)
        self.pos2_emb=nn.Embedding(MaxPos,dimWPE)
        self.maxPooling=nn.MaxPool1d(SenLen-2)
        self.emb_dim=emb_dim+dimWPE*2
        self.hidden_dim=hidden_dim
        #using CNN
        self.tanh=nn.Tanh()
        self.conv=nn.Conv1d(self.emb_dim,hidden_dim,filter_size) 
Example #29
Source File: cnn_encoder.py    From OpenNRE with MIT License 5 votes vote down vote up
def __init__(self, 
                 token2id, 
                 max_length=128, 
                 hidden_size=230, 
                 word_size=50,
                 position_size=5,
                 blank_padding=True,
                 word2vec=None,
                 kernel_size=3, 
                 padding_size=1,
                 dropout=0,
                 activation_function=F.relu,
                 mask_entity=False):
        """
        Args:
            token2id: dictionary of token->idx mapping
            max_length: max length of sentence, used for postion embedding
            hidden_size: hidden size
            word_size: size of word embedding
            position_size: size of position embedding
            blank_padding: padding for CNN
            word2vec: pretrained word2vec numpy
            kernel_size: kernel_size size for CNN
            padding_size: padding_size for CNN
        """
        # Hyperparameters
        super(CNNEncoder, self).__init__(token2id, max_length, hidden_size, word_size, position_size, blank_padding, word2vec, mask_entity=mask_entity)
        self.drop = nn.Dropout(dropout)
        self.kernel_size = kernel_size
        self.padding_size = padding_size
        self.act = activation_function

        self.conv = nn.Conv1d(self.input_size, self.hidden_size, self.kernel_size, padding=self.padding_size)
        self.pool = nn.MaxPool1d(self.max_length) 
Example #30
Source File: cnn_1d.py    From UDTL with MIT License 5 votes vote down vote up
def __init__(self, pretrained=False, in_channel=1, out_channel=10):
        super(CNN, self).__init__()
        if pretrained == True:
            warnings.warn("Pretrained model is not available")

        self.layer1 = nn.Sequential(
            nn.Conv1d(in_channel, 16, kernel_size=15),  # 16, 26 ,26
            nn.BatchNorm1d(16),
            nn.ReLU(inplace=True))


        self.layer2 = nn.Sequential(
            nn.Conv1d(16, 32, kernel_size=3),  # 32, 24, 24
            nn.BatchNorm1d(32),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=2, stride=2),
            )  # 32, 12,12     (24-2) /2 +1

        self.layer3 = nn.Sequential(
            nn.Conv1d(32, 64, kernel_size=3),  # 64,10,10
            nn.BatchNorm1d(64),
            nn.ReLU(inplace=True))

        self.layer4 = nn.Sequential(
            nn.Conv1d(64, 128, kernel_size=3),  # 128,8,8
            nn.BatchNorm1d(128),
            nn.ReLU(inplace=True),
            nn.AdaptiveMaxPool1d(4))  # 128, 4,4

        self.layer5 = nn.Sequential(
            nn.Linear(128 * 4, 256),
            nn.ReLU(inplace=True),
            nn.Dropout())