Python torch.nn.AdaptiveAvgPool1d() Examples

The following are 30 code examples of torch.nn.AdaptiveAvgPool1d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: resnet.py    From ecg_pytorch with Apache License 2.0 6 votes vote down vote up
def __init__(self, block, layers, num_classes=55):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv1d(8, 64, kernel_size=15, stride=2, padding=7,
                               bias=False)
        self.bn1 = nn.BatchNorm1d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv1d):
                n = m.kernel_size[0] * m.kernel_size[0] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm1d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example #2
Source File: jasper.py    From NeMo with Apache License 2.0 6 votes vote down vote up
def __init__(
        self, *, feat_in, num_classes, init_mode="xavier_uniform", return_logits=True, pooling_type='avg', **kwargs
    ):
        TrainableNM.__init__(self, **kwargs)

        self._feat_in = feat_in
        self._return_logits = return_logits
        self._num_classes = num_classes

        if pooling_type == 'avg':
            self.pooling = nn.AdaptiveAvgPool1d(1)
        elif pooling_type == 'max':
            self.pooling = nn.AdaptiveMaxPool1d(1)
        else:
            raise ValueError('Pooling type chosen is not valid. Must be either `avg` or `max`')

        self.decoder_layers = nn.Sequential(nn.Linear(self._feat_in, self._num_classes, bias=True))
        self.apply(lambda x: init_weights(x, mode=init_mode))
        self.to(self._device) 
Example #3
Source File: timm.py    From mlcomp with Apache License 2.0 6 votes vote down vote up
def __init__(self, variant, num_classes, pretrained=True, activation=None):
        super().__init__()

        model = timm.create_model(
            variant, pretrained=pretrained,
            num_classes=num_classes)

        self.model = model
        # self.model.fc = nn.Sequential(
        #     LambdaLayer(lambda x: x.unsqueeze_(0)),
        #     nn.AdaptiveAvgPool1d(self.model.fc.in_features),
        #     LambdaLayer(lambda x: x.squeeze_(0).view(x.size(0), -1)),
        #     self.model.fc
        # )

        if callable(activation) or activation is None:
            self.activation = activation
        elif activation == 'softmax':
            self.activation = nn.Softmax(dim=1)
        elif activation == 'sigmoid':
            self.activation = nn.Sigmoid()
        else:
            raise ValueError(
                'Activation should be "sigmoid"/"softmax"/callable/None') 
Example #4
Source File: jasper.py    From NeMo with Apache License 2.0 5 votes vote down vote up
def __init__(
        self,
        channels: int,
        reduction_ratio: int,
        context_window: int = -1,
        interpolation_mode: str = 'nearest',
        activation: Optional[Callable] = None,
    ):
        """
        Squeeze-and-Excitation sub-module.

        Args:
            channels: Input number of channels.
            reduction_ratio: Reduction ratio for "squeeze" layer.
            context_window: Integer number of timesteps that the context
                should be computed over, using stride 1 average pooling.
                If value < 1, then global context is computed.
            interpolation_mode: Interpolation mode of timestep dimension.
                Used only if context window is > 1.
                The modes available for resizing are: `nearest`, `linear` (3D-only),
                `bilinear`, `area`
            activation: Intermediate activation function used. Must be a
                callable activation function.
        """
        super(SqueezeExcite, self).__init__()
        self.context_window = int(context_window)
        self.interpolation_mode = interpolation_mode

        if self.context_window <= 0:
            self.pool = nn.AdaptiveAvgPool1d(1)  # context window = T
        else:
            self.pool = nn.AvgPool1d(self.context_window, stride=1)

        if activation is None:
            activation = nn.ReLU(inplace=True)

        self.fc = nn.Sequential(
            nn.Linear(channels, channels // reduction_ratio, bias=False),
            activation,
            nn.Linear(channels // reduction_ratio, channels, bias=False),
        ) 
Example #5
Source File: simple.py    From elektronn3 with MIT License 5 votes vote down vote up
def __init__(self, in_channels, n_classes, dropout_rate=0.05, act='relu'):
        super().__init__()
        if act == 'relu':
            act = nn.ReLU()
        elif act == 'leaky_relu':
            act = nn.LeakyReLU()
        self.seq = nn.Sequential(
            Conv3DLayer(in_channels, 20, (1, 5, 5), pooling=(1, 2, 2),
                        dropout_rate=dropout_rate, act=act),
            Conv3DLayer(20, 30, (1, 5, 5), pooling=(1, 2, 2),
                        dropout_rate=dropout_rate, act=act),
            Conv3DLayer(30, 40, (1, 4, 4), pooling=(1, 2, 2),
                        dropout_rate=dropout_rate, act=act),
            Conv3DLayer(40, 50, (1, 4, 4), pooling=(1, 2, 2),
                        dropout_rate=dropout_rate, act=act),
            Conv3DLayer(50, 60, (1, 2, 2), pooling=(1, 2, 2),
                        dropout_rate=dropout_rate, act=act),
            Conv3DLayer(60, 70, (1, 1, 1), pooling=(1, 2, 2),
                        dropout_rate=dropout_rate, act=act),
            Conv3DLayer(70, 70, (1, 1, 1), pooling=(1, 1, 1),
                        dropout_rate=dropout_rate, act=act),
        )
        self.adaptavgpool = nn.AdaptiveAvgPool1d(100)
        self.fc = nn.Sequential(
            nn.Linear(100, 50),
            act,
            nn.Linear(50, 30),
            act,
            nn.Linear(30, n_classes),
        ) 
Example #6
Source File: simple.py    From elektronn3 with MIT License 5 votes vote down vote up
def forward(self, x):
        x = self.seq(x)
        x = x.view(x.size()[0], 1, -1)  # AdaptiveAvgPool1d requires input of shape B C D
        x = self.adaptavgpool(x)
        x = self.fc(x.squeeze(1))  # remove auxiliary axis -> B C with C = n_classes
        return x 
Example #7
Source File: simple.py    From elektronn3 with MIT License 5 votes vote down vote up
def __init__(self, in_channels, n_classes, dropout_rate=0.05, act='relu',
                 n_scalar=1):
        super().__init__()
        if act == 'relu':
            act = nn.ReLU()
        elif act == 'leaky_relu':
            act = nn.LeakyReLU()
        self.seq = nn.Sequential(
            Conv3DLayer(in_channels, 20, (1, 5, 5), pooling=(1, 2, 2),
                        dropout_rate=dropout_rate, act=act),
            Conv3DLayer(20, 30, (1, 5, 5), pooling=(1, 2, 2),
                        dropout_rate=dropout_rate, act=act),
            Conv3DLayer(30, 40, (1, 4, 4), pooling=(1, 2, 2),
                        dropout_rate=dropout_rate, act=act),
            Conv3DLayer(40, 50, (1, 4, 4), pooling=(1, 2, 2),
                        dropout_rate=dropout_rate, act=act),
            Conv3DLayer(50, 60, (1, 2, 2), pooling=(1, 2, 2),
                        dropout_rate=dropout_rate, act=act),
            Conv3DLayer(60, 70, (1, 1, 1), pooling=(1, 2, 2),
                        dropout_rate=dropout_rate, act=act),
            Conv3DLayer(70, 70, (1, 1, 1), pooling=(1, 1, 1),
                        dropout_rate=dropout_rate, act=act),
        )
        self.adaptavgpool = nn.AdaptiveAvgPool1d(100)
        self.fc = nn.Sequential(
            nn.Linear(100 + n_scalar, 50),
            act,
            nn.Linear(50, 30),
            act,
            nn.Linear(30, n_classes),
        ) 
Example #8
Source File: simple.py    From elektronn3 with MIT License 5 votes vote down vote up
def forward(self, x, scal):
        x = self.seq(x)
        x = x.view(x.size()[0], 1, -1)  # AdaptiveAvgPool1d requires input of shape B C D
        x = self.adaptavgpool(x).squeeze(1)
        x = torch.cat((x, scal), 1)
        x = self.fc(x)  # remove auxiliary axis -> B C with C = n_classes
        return x 
Example #9
Source File: adding_model.py    From tape with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, config: SimpleConvConfig):
        super().__init__(config)
        self.embedding = nn.Embedding(config.vocab_size, config.filter_size)
        self.encoder = nn.Sequential(
            *[nn.Conv1d(config.filter_size, config.filter_size, config.kernel_size,
                        padding=config.kernel_size // 2)
              for _ in range(config.num_layers)])

        self.pooler = nn.AdaptiveAvgPool1d(1) 
Example #10
Source File: efficientnet.py    From mlcomp with Apache License 2.0 5 votes vote down vote up
def __init__(self, variant, num_classes, pretrained=True, activation=None):
        super().__init__()
        if 'efficientnet' not in variant:
            variant = f'efficientnet-{variant}'

        if pretrained:
            model = _EfficientNet.from_pretrained(variant,
                                                  num_classes=num_classes)
        else:
            model = _EfficientNet.from_name(variant, {
                'num_classes': num_classes
            })
        self.model = model

        self.model._fc = nn.Sequential(
            LambdaLayer(lambda x: x.unsqueeze_(0)),
            nn.AdaptiveAvgPool1d(self.model._fc.in_features),
            LambdaLayer(lambda x: x.squeeze_(0).view(x.size(0), -1)),
            self.model._fc
        )

        if callable(activation) or activation is None:
            self.activation = activation
        elif activation == 'softmax':
            self.activation = nn.Softmax(dim=1)
        elif activation == 'sigmoid':
            self.activation = nn.Sigmoid()
        else:
            raise ValueError(
                'Activation should be "sigmoid"/"softmax"/callable/None') 
Example #11
Source File: pretrained.py    From mlcomp with Apache License 2.0 5 votes vote down vote up
def __init__(self, variant, num_classes, pretrained=True, activation=None):
        super().__init__()
        params = {'num_classes': 1000}
        if not pretrained:
            params['pretrained'] = None

        model = pretrainedmodels.__dict__[variant](**params)
        self.model = model
        linear = self.model.last_linear

        if isinstance(linear, nn.Linear):
            self.model.last_linear = nn.Linear(
                model.last_linear.in_features,
                num_classes
            )
            self.model.last_linear.in_channels = linear.in_features
        elif isinstance(linear, nn.Conv2d):
            self.model.last_linear = nn.Conv2d(
                linear.in_channels,
                num_classes,
                kernel_size=linear.kernel_size,
                bias=True
            )
            self.model.last_linear.in_features = linear.in_channels

        self.model.last_linear = nn.Sequential(
            LambdaLayer(lambda x: x.unsqueeze_(0)),
            nn.AdaptiveAvgPool1d(self.model.last_linear.in_channels),
            LambdaLayer(lambda x: x.squeeze_(0).view(x.size(0), -1)),
            self.model.last_linear
        )

        if callable(activation) or activation is None:
            self.activation = activation
        elif activation == 'softmax':
            self.activation = nn.Softmax(dim=1)
        elif activation == 'sigmoid':
            self.activation = nn.Sigmoid()
        else:
            raise ValueError(
                'Activation should be "sigmoid"/"softmax"/callable/None') 
Example #12
Source File: multi_scale_resnet_1d.py    From candock with MIT License 5 votes vote down vote up
def __init__(self, kernel_size):
        super(Route, self).__init__()
        self.block1 = ResidualBlock(64, 64, kernel_size, stride=1)
        self.block2 = ResidualBlock(64, 128, kernel_size)
        self.block3 = ResidualBlock(128, 256, kernel_size)
        self.block4 = ResidualBlock(256, 512, kernel_size)
        self.avgpool = nn.AdaptiveAvgPool1d(1) 
Example #13
Source File: flops_counter.py    From ESNet with MIT License 5 votes vote down vote up
def is_supported_instance(module):
    if isinstance(module, (torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d,
                           torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, \
                           torch.nn.LeakyReLU, torch.nn.ReLU6, torch.nn.Linear, \
                           torch.nn.MaxPool2d, torch.nn.AvgPool2d, torch.nn.BatchNorm2d, \
                           torch.nn.Upsample, nn.AdaptiveMaxPool2d, nn.AdaptiveAvgPool2d, \
                           torch.nn.MaxPool1d, torch.nn.AvgPool1d, torch.nn.BatchNorm1d, \
                           nn.AdaptiveMaxPool1d, nn.AdaptiveAvgPool1d, \
                           nn.ConvTranspose2d, torch.nn.BatchNorm3d,
                           torch.nn.MaxPool3d, torch.nn.AvgPool3d, nn.AdaptiveMaxPool3d, nn.AdaptiveAvgPool3d)):
        return True

    return False 
Example #14
Source File: flops_counter.py    From ESNet with MIT License 5 votes vote down vote up
def add_flops_counter_hook_function(module):
    if is_supported_instance(module):
        if hasattr(module, '__flops_handle__'):
            return

        if isinstance(module, (torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d)):
            handle = module.register_forward_hook(conv_flops_counter_hook)
        elif isinstance(module, (torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, \
                                 torch.nn.LeakyReLU, torch.nn.ReLU6)):
            handle = module.register_forward_hook(relu_flops_counter_hook)
        elif isinstance(module, torch.nn.Linear):
            handle = module.register_forward_hook(linear_flops_counter_hook)
        elif isinstance(module, (torch.nn.AvgPool2d, torch.nn.MaxPool2d, nn.AdaptiveMaxPool2d, \
                                 nn.AdaptiveAvgPool2d, torch.nn.MaxPool3d, torch.nn.AvgPool3d, \
                                 torch.nn.AvgPool1d, torch.nn.MaxPool1d, nn.AdaptiveMaxPool1d, \
                                 nn.AdaptiveAvgPool1d, nn.AdaptiveMaxPool3d, nn.AdaptiveAvgPool3d)):
            handle = module.register_forward_hook(pool_flops_counter_hook)
        elif isinstance(module, (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d)):
            handle = module.register_forward_hook(bn_flops_counter_hook)
        elif isinstance(module, torch.nn.Upsample):
            handle = module.register_forward_hook(upsample_flops_counter_hook)
        elif isinstance(module, torch.nn.ConvTranspose2d):
            handle = module.register_forward_hook(deconv_flops_counter_hook)
        else:
            handle = module.register_forward_hook(empty_flops_counter_hook)
        module.__flops_handle__ = handle 
Example #15
Source File: utils.py    From WaveUNet with MIT License 5 votes vote down vote up
def __init__(self, channel, reduction=16):
        super(SELayer, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Sequential(
                nn.Linear(channel, channel // reduction),
                nn.ReLU(inplace=True),
                nn.Linear(channel // reduction, channel),
                nn.Sigmoid()
        ) 
Example #16
Source File: slowfast_my.py    From GAN_Review with MIT License 5 votes vote down vote up
def FastPath(self, input):
        lateral = []
        x = self.fast_conv1(input)
        x = self.fast_bn1(x)
        x = self.fast_relu(x)
        pool1 = self.fast_maxpool(x)
        lateral_p = self.lateral_p1(pool1)
        lateral.append(lateral_p)

        res2 = self.fast_res2(pool1)
        lateral_res2 = self.lateral_res2(res2)
        lateral.append(lateral_res2)
        
        res3 = self.fast_res3(res2)
        lateral_res3 = self.lateral_res3(res3)
        lateral.append(lateral_res3)

        res4 = self.fast_res4(res3)
        lateral_res4 = self.lateral_res4(res4)
        lateral.append(lateral_res4)

        x = self.fast_res5(res4)
        b, c, t, h, w = x.size()
        x = x.permute(0, 1, 3, 4, 2).contiguous().view(b, c*h*w, t)
        x = nn.AdaptiveAvgPool1d(1)(x)
        x = x.view(b, c, h, w).unsqueeze(2)
        #x = nn.AdaptiveAvgPool3d(1)(res5)
        #x = x.view(-1, x.size(1))

        return x, lateral 
Example #17
Source File: model_search.py    From sgas with MIT License 5 votes vote down vote up
def __init__(self, C, num_classes, num_cells, criterion, steps=4, multiplier=4, stem_multiplier=3, in_channels=3):
        super(Network, self).__init__()
        self._C = C
        self._num_classes = num_classes
        self._num_cells = num_cells
        self._criterion = criterion
        self._steps = steps
        self._multiplier = multiplier
        self._in_channels = in_channels
        C_curr = stem_multiplier * C
        self.stem = nn.Sequential(
            MLP([in_channels, C_curr], None, 'batch', bias=False),
        )

        C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
        self.cells = nn.ModuleList()
        for i in range(self._num_cells):
            cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr)
            self.cells += [cell]
            C_prev_prev, C_prev = C_prev, multiplier * C_curr

        self.global_pooling = nn.AdaptiveAvgPool1d(1)
        self.classifier = nn.Linear(C_prev + 1, num_classes)

        self._initialize_alphas()

        self.normal_selected_idxs = torch.tensor(len(self.alphas_normal) * [-1], requires_grad=False, dtype=torch.int)
        self.normal_candidate_flags = torch.tensor(len(self.alphas_normal) * [True],
                                                   requires_grad=False, dtype=torch.bool) 
Example #18
Source File: micro_multi_scale_resnet_1d.py    From candock with MIT License 5 votes vote down vote up
def __init__(self, kernel_size):
        super(Route, self).__init__()
        self.block1 = ResidualBlock(64, 64, kernel_size, stride=1)
        self.block2 = ResidualBlock(64, 128, kernel_size)
        self.block3 = ResidualBlock(128, 256, kernel_size)
        self.avgpool = nn.AdaptiveAvgPool1d(1) 
Example #19
Source File: filternet.py    From FilterNet with MIT License 5 votes vote down vote up
def conv_layer(window, ks=3, dilation=1):
    return nn.Sequential(
        nn.Conv1d(1, 1, kernel_size=ks, bias=False, dilation=dilation),
        nn.AdaptiveAvgPool1d(window),
        nn.LeakyReLU(negative_slope=0.1, inplace=True)) 
Example #20
Source File: resnet_1d.py    From candock with MIT License 5 votes vote down vote up
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, norm_layer=None):
        super(ResNet, self).__init__()
        if norm_layer is None:
            norm_layer = nn.BatchNorm1d
        self.inplanes = 64
        self.conv1 = nn.Conv1d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = norm_layer(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer)
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer)
        self.avgpool = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv1d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, (nn.BatchNorm1d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0) 
Example #21
Source File: cnn_1d.py    From candock with MIT License 5 votes vote down vote up
def __init__(self, inchannel, num_classes):
        super(cnn, self).__init__()
        self.conv1 = nn.Sequential(       
            nn.Conv1d(inchannel, 64, 7, 1, 0, bias=False),
            nn.BatchNorm1d(64),
            nn.ReLU(inplace = True),                    
            nn.MaxPool1d(2),   
        )
        self.conv2 = nn.Sequential(         
            nn.Conv1d(64, 128, 7, 1, 0, bias=False),
            nn.BatchNorm1d(128),    
            nn.ReLU(inplace = True),                  
            nn.MaxPool1d(2),                
        )
        self.conv3 = nn.Sequential(         
            nn.Conv1d(128, 256, 7, 1, 0, bias=False),
            nn.BatchNorm1d(256),    
            nn.ReLU(inplace = True),                     
            nn.MaxPool1d(2),               
        )
        self.conv4 = nn.Sequential(         
            nn.Conv1d(256, 512, 7, 1, 0, bias=False),
            nn.BatchNorm1d(512),    
            nn.ReLU(inplace = True),                     
            nn.MaxPool1d(2),               
        )
        self.conv5 = nn.Sequential(         
            nn.Conv1d(512, 1024, 7, 1, 0, bias=False),
            nn.BatchNorm1d(1024),    
            nn.ReLU(inplace = True),                                   
        )
        self.avgpool = nn.AdaptiveAvgPool1d(1)
        self.out = nn.Linear(1024, num_classes) 
Example #22
Source File: vector_model.py    From starsem2018-entity-linking with Apache License 2.0 5 votes vote down vote up
def __init__(self, parameters, word_embeddings=None):
        super(Words2VectorNet, self).__init__()
        self._p = parameters
        self._dropout = nn.Dropout(p=self._p.get('dropout', '0.1'))
        self._word_embedding = nn.Embedding(self._p['word.vocab.size'], self._p['word.emb.size'], padding_idx=0)
        if word_embeddings is not None:
            word_embeddings = torch.from_numpy(word_embeddings).float()
            self._word_embedding.weight = nn.Parameter(word_embeddings)
        self._word_embedding.weight.requires_grad = False

        self._pos_embedding = nn.Embedding(3, self._p['poss.emb.size'], padding_idx=0)

        self._word_encoding_conv = nn.Conv1d(self._p['word.emb.size'] + self._p['poss.emb.size'],
                                             self._p['word.conv.size'],
                                             self._p['word.conv.width'],
                                             padding=self._p['word.conv.width']//2)

        self._nonlinearity = nn.ReLU() if self._p.get('enc.activation', 'tanh') == 'relu' else nn.Tanh()
        self._convs = nn.ModuleList([
            nn.Sequential(nn.Conv1d(in_channels=self._p['word.conv.size'],
                                    out_channels=self._p['word.conv.size'],
                                    kernel_size=self._p['word.conv.width'],
                                    padding=self._p['char.conv.width']//2 * 2**(j + 1) if not self._p.get("legacy.mode", False) else self._p['char.conv.width']//2 + 2**(j + 1),
                                    dilation=2**(j + 1),
                                    bias=True),
                          self._nonlinearity
                          )
            for j in range(self._p.get('word.conv.depth', 1))
        ])
        self._block_conv = nn.Conv1d(self._p['word.conv.size'],
                                     self._p['word.conv.size'],
                                     self._p['word.conv.width'],
                                     padding=self._p['word.conv.width']//2)

        self.sem_layers = nn.Sequential(
            self._dropout,
            nn.Linear(self._p['word.conv.size'], self._p['word.enc.size']),
            self._nonlinearity,
        )

        self._pool = nn.AdaptiveMaxPool1d(1) if self._p.get('enc.pooling', 'max') == 'max' else nn.AdaptiveAvgPool1d(1) 
Example #23
Source File: flops_counter.py    From Efficient-Segmentation-Networks with MIT License 5 votes vote down vote up
def add_flops_counter_hook_function(module):
    if is_supported_instance(module):
        if hasattr(module, '__flops_handle__'):
            return

        if isinstance(module, (torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d)):
            handle = module.register_forward_hook(conv_flops_counter_hook)
        elif isinstance(module, (torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, \
                                 torch.nn.LeakyReLU, torch.nn.ReLU6)):
            handle = module.register_forward_hook(relu_flops_counter_hook)
        elif isinstance(module, torch.nn.Linear):
            handle = module.register_forward_hook(linear_flops_counter_hook)
        elif isinstance(module, (torch.nn.AvgPool2d, torch.nn.MaxPool2d, nn.AdaptiveMaxPool2d, \
                                 nn.AdaptiveAvgPool2d, torch.nn.MaxPool3d, torch.nn.AvgPool3d, \
                                 torch.nn.AvgPool1d, torch.nn.MaxPool1d, nn.AdaptiveMaxPool1d, \
                                 nn.AdaptiveAvgPool1d, nn.AdaptiveMaxPool3d, nn.AdaptiveAvgPool3d)):
            handle = module.register_forward_hook(pool_flops_counter_hook)
        elif isinstance(module, (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d)):
            handle = module.register_forward_hook(bn_flops_counter_hook)
        elif isinstance(module, torch.nn.Upsample):
            handle = module.register_forward_hook(upsample_flops_counter_hook)
        elif isinstance(module, torch.nn.ConvTranspose2d):
            handle = module.register_forward_hook(deconv_flops_counter_hook)
        else:
            handle = module.register_forward_hook(empty_flops_counter_hook)
        module.__flops_handle__ = handle 
Example #24
Source File: flops_counter.py    From Efficient-Segmentation-Networks with MIT License 5 votes vote down vote up
def is_supported_instance(module):
    if isinstance(module, (torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d,
                           torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, \
                           torch.nn.LeakyReLU, torch.nn.ReLU6, torch.nn.Linear, \
                           torch.nn.MaxPool2d, torch.nn.AvgPool2d, torch.nn.BatchNorm2d, \
                           torch.nn.Upsample, nn.AdaptiveMaxPool2d, nn.AdaptiveAvgPool2d, \
                           torch.nn.MaxPool1d, torch.nn.AvgPool1d, torch.nn.BatchNorm1d, \
                           nn.AdaptiveMaxPool1d, nn.AdaptiveAvgPool1d, \
                           nn.ConvTranspose2d, torch.nn.BatchNorm3d,
                           torch.nn.MaxPool3d, torch.nn.AvgPool3d, nn.AdaptiveMaxPool3d, nn.AdaptiveAvgPool3d)):
        return True

    return False 
Example #25
Source File: model_resnet1d.py    From ronin with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, in_planes, num_outputs):
        super(GlobAvgOutputModule, self).__init__()
        self.avg = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Linear(in_planes, num_outputs) 
Example #26
Source File: compose_excitation_network.py    From torecsys with MIT License 5 votes vote down vote up
def __init__(self, 
                 num_fields : int,
                 reduction  : int = 1,
                 activation : Callable[[torch.Tensor], torch.Tensor] = nn.ReLU()):
        r"""Initialize ComposeExcitationNetworkLayer
        
        Args:
            num_fields (int): Number of inputs' fields. 
            reduction (int, optional): Size of reduction in dense layer. 
                Defaults to 1.
            activation (Callable[[T], T], optional): Activation function in dense layers.
                Defaults to nn.ReLU().
        
        Attributes:
            pooling (torch.nn.Module): Adaptive average pooling layer to compose tensors.
            fc (torch.nn.Sequential): Sequential of linear and activation to calculate weights of 
                attention, which the linear layers are: 
                :math:`[Linear(N^2, \frac{N^2}{reduction}), Linear(\frac{N^2}{reduction}, N^2)]`. 
        """
        # Refer to parent class
        super(ComposeExcitationNetworkLayer, self).__init__()

        # Initialize 1d pooling layer
        self.pooling = nn.AdaptiveAvgPool1d(1)
        
        # Initialize dense layers
        squared_num_fields = num_fields ** 2
        reduced_num_fields = squared_num_fields // reduction

        self.fc = nn.Sequential()
        self.fc.add_module("ReductionLinear", nn.Linear(squared_num_fields, reduced_num_fields))
        self.fc.add_module("ReductionActivation", activation)
        self.fc.add_module("AdditionLinear", nn.Linear(reduced_num_fields, squared_num_fields))
        self.fc.add_module("AdditionActivation", activation) 
Example #27
Source File: elaborated_entire_space_supervised_multi_task.py    From torecsys with MIT License 5 votes vote down vote up
def __init__(self, 
                 num_fields  : int, 
                 layer_sizes : List[int], 
                 dropout_p   : List[float] = None, 
                 activation  : Callable[[torch.Tensor], torch.Tensor] = nn.ReLU()):
        r"""Initialize ElaboratedEntireSpaceSupervisedMultiTaskModel
        
        Args:
            num_fields (int): Number of inputs' fields
            layer_sizes (List[int]): Layer sizes of dense network
            dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            activation (Callable[[T], T], optional): Activation function of dense network. 
                Defaults to nn.ReLU().
        
        Attributes:
            impress_to_click_pooling (nn.Module): Module of 1D average pooling layer for impress_to_click
            click_to_daction_pooling (nn.Module): Module of 1D average pooling layer for click_to_daction
            daction_to_buy_pooling (nn.Module): Module of 1D average pooling layer for daction_to_buy
            oaction_to_buy_pooling (nn.Module): Module of 1D average pooling layer for oaction_to_buy
            impress_to_click_deep (nn.Module): Module of dense layer.
            click_to_daction_deep (nn.Module): Module of dense layer.
            daction_to_buy_deep (nn.Module): Module of dense layer.
            oaction_to_buy_deep (nn.Module): Module of dense layer.
        """
        # Refer to parent class
        super(ElaboratedEntireSpaceSupervisedMultiTaskModel, self).__init__()

        # Initialize pooling layers
        self.impress_to_click_pooling = nn.AdaptiveAvgPool1d(1)
        self.click_to_daction_pooling = nn.AdaptiveAvgPool1d(1)
        self.daction_to_buy_pooling = nn.AdaptiveAvgPool1d(1)
        self.oaction_to_buy_pooling = nn.AdaptiveAvgPool1d(1)

        # Initialize dense layers
        self.impress_to_click_deep = DNNLayer(num_fields, 1, layer_sizes, dropout_p, activation)
        self.click_to_daction_deep = DNNLayer(num_fields, 1, layer_sizes, dropout_p, activation)
        self.daction_to_buy_deep = DNNLayer(num_fields, 1, layer_sizes, dropout_p, activation)
        self.oaction_to_buy_deep = DNNLayer(num_fields, 1, layer_sizes, dropout_p, activation) 
Example #28
Source File: entire_space_multi_task.py    From torecsys with MIT License 5 votes vote down vote up
def __init__(self, 
                 num_fields  : int, 
                 layer_sizes : List[int], 
                 dropout_p   : List[float] = None, 
                 activation  : Callable[[torch.Tensor], torch.Tensor] = nn.ReLU()):
        r"""Initialize EntireSpaceMultiTaskModel
        
        Args:
            num_fields (int): Number of inputs' fields
            layer_sizes (List[int]): Layer sizes of dense network
            dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            activation (Callable[[T], T], optional): Activation function of dense network. 
                Defaults to nn.ReLU().
        
        Attributes:
            cvr_pooling (nn.Module): Module of 1D average pooling layer for CVR prediction.
            cvr_deep (nn.Module): Module of dense layer.
            ctr_pooling (nn.Module): Module of 1D average pooling layer for CTR prediction.
            ctr_deep (nn.Module): Module of dense layer.
        """
        # Refer to parent class
        super(EntireSpaceMultiTaskModel, self).__init__()

        # Initiailze pooling layer of CVR
        self.cvr_pooling = nn.AdaptiveAvgPool1d(1)

        # Initialize dense layer of CVR
        self.cvr_deep = DNNLayer(num_fields, 1, layer_sizes, dropout_p, activation)
        
        # Initialize pooling layer of CTR
        self.ctr_pooling = nn.AdaptiveAvgPool1d(1)

        # Initialize dense layer of CTR
        self.ctr_deep = DNNLayer(num_fields, 1, layer_sizes, dropout_p, activation) 
Example #29
Source File: Resnet1d.py    From UDTL with MIT License 5 votes vote down vote up
def __init__(self, block, layers, in_channel=1, out_channel=10, zero_init_residual=False):
        super(ResNet, self).__init__()
        self.inplanes = 64
        self.conv1 = nn.Conv1d(in_channel, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm1d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AdaptiveAvgPool1d(1)
        self.layer5 = nn.Sequential(
            nn.Linear(512 * block.expansion, 256),
            nn.ReLU(inplace=True),
            nn.Dropout(),
        )
        self.fc = nn.Linear(256, out_channel)

        for m in self.modules():
            if isinstance(m, nn.Conv1d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm1d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0) 
Example #30
Source File: resnet18_1d.py    From UDTL with MIT License 5 votes vote down vote up
def __init__(self, block, layers, in_channel=1, out_channel=10, zero_init_residual=False):
        super(ResNet, self).__init__()
        self.inplanes = 64
        self.conv1 = nn.Conv1d(in_channel, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm1d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AdaptiveAvgPool1d(1)


        for m in self.modules():
            if isinstance(m, nn.Conv1d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm1d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)