Python torch.nn.functional.dropout2d() Examples

The following are 30 code examples of torch.nn.functional.dropout2d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: unet_models.py    From open-solution-data-science-bowl-2018 with MIT License 6 votes vote down vote up
def forward(self, x):
        conv1 = self.conv1(x)
        conv2 = self.conv2(conv1)
        conv3 = self.conv3(conv2)
        conv4 = self.conv4(conv3)
        conv5 = self.conv5(conv4)

        pool = self.pool(conv5)
        center = self.center(pool)

        dec5 = self.dec5(torch.cat([center, conv5], 1))

        dec4 = self.dec4(torch.cat([dec5, conv4], 1))
        dec3 = self.dec3(torch.cat([dec4, conv3], 1))
        dec2 = self.dec2(torch.cat([dec3, conv2], 1))
        dec1 = self.dec1(dec2)
        dec0 = self.dec0(dec1)

        return self.final(F.dropout2d(dec0, p=self.dropout_2d)) 
Example #2
Source File: unet.py    From open-solution-salt-identification with MIT License 6 votes vote down vote up
def forward(self, x):
        encoder2, encoder3, encoder4, encoder5 = self.encoders(x)
        encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)

        center = self.center(encoder5)

        dec5 = self.dec5(center, encoder5)
        dec4 = self.dec4(dec5, encoder4)
        dec3 = self.dec3(dec4, encoder3)
        dec2 = self.dec2(dec3, encoder2)
        dec1 = self.dec1(dec2)

        if self.use_hypercolumn:
            dec1 = torch.cat([dec1,
                              F.upsample(dec2, scale_factor=2, mode='bilinear'),
                              F.upsample(dec3, scale_factor=4, mode='bilinear'),
                              F.upsample(dec4, scale_factor=8, mode='bilinear'),
                              F.upsample(dec5, scale_factor=16, mode='bilinear'),
                              ], 1)

        return self.final(dec1) 
Example #3
Source File: unet_models.py    From open-solution-mapping-challenge with MIT License 6 votes vote down vote up
def forward(self, x):
        conv1 = self.conv1(x)
        conv2 = self.conv2(self.pool(conv1))
        conv3 = self.conv3(self.pool(conv2))
        conv4 = self.conv4(self.pool(conv3))
        conv5 = self.conv5(self.pool(conv4))

        center = self.center(self.pool(conv5))

        dec5 = self.dec5(torch.cat([center, conv5], 1))

        dec4 = self.dec4(torch.cat([dec5, conv4], 1))
        dec3 = self.dec3(torch.cat([dec4, conv3], 1))
        dec2 = self.dec2(torch.cat([dec3, conv2], 1))
        dec1 = self.dec1(torch.cat([dec2, conv1], 1))

        return self.final(F.dropout2d(dec1, p=self.dropout_2d)) 
Example #4
Source File: deprecated.py    From open-solution-salt-identification with MIT License 6 votes vote down vote up
def forward(self, x, d=None):
        encoder2, encoder3, encoder4, encoder5 = self.encoders(x)
        encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)

        center = self.center(encoder5)

        dec5 = self.dec5(center, encoder5)
        dec4 = self.dec4(dec5, encoder4)
        dec3 = self.dec3(dec4, encoder3)
        dec2 = self.dec2(dec3, encoder2)
        dec1 = self.dec1(dec2)

        if self.use_hypercolumn:
            dec1 = torch.cat([dec1,
                              F.upsample(dec2, scale_factor=2, mode='bilinear'),
                              F.upsample(dec3, scale_factor=4, mode='bilinear'),
                              F.upsample(dec4, scale_factor=8, mode='bilinear'),
                              F.upsample(dec5, scale_factor=16, mode='bilinear'),
                              ], 1)

        depth_channel_excitation = self.depth_channel_excitation(dec1, d)
        return self.final(depth_channel_excitation) 
Example #5
Source File: deprecated.py    From open-solution-salt-identification with MIT License 6 votes vote down vote up
def forward(self, x):
        encoder2, encoder3, encoder4, encoder5 = self.encoders(x)
        encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)

        center = self.center(encoder5)

        dec5 = self.dec5(center, encoder5)
        dec4 = self.dec4(dec5, encoder4)
        dec3 = self.dec3(dec4, encoder3)
        dec2 = self.dec2(dec3, encoder2)
        dec1 = self.dec1(dec2)

        if self.use_hypercolumn:
            dec1 = torch.cat([dec1,
                              F.upsample(dec2, scale_factor=2, mode='bilinear'),
                              F.upsample(dec3, scale_factor=4, mode='bilinear'),
                              F.upsample(dec4, scale_factor=8, mode='bilinear'),
                              F.upsample(dec5, scale_factor=16, mode='bilinear'),
                              ], 1)

        return self.final(dec1) 
Example #6
Source File: unet_models.py    From open-solution-mapping-challenge with MIT License 6 votes vote down vote up
def forward(self, x):
        conv1 = self.conv1(x)
        conv2 = self.conv2(conv1)
        conv3 = self.conv3(conv2)
        conv4 = self.conv4(conv3)
        conv5 = self.conv5(conv4)

        pool = self.pool(conv5)
        center = self.center(pool)

        dec5 = self.dec5(torch.cat([center, conv5], 1))

        dec4 = self.dec4(torch.cat([dec5, conv4], 1))
        dec3 = self.dec3(torch.cat([dec4, conv3], 1))
        dec2 = self.dec2(torch.cat([dec3, conv2], 1))
        dec1 = self.dec1(dec2)
        dec0 = self.dec0(dec1)

        return self.final(F.dropout2d(dec0, p=self.dropout_2d)) 
Example #7
Source File: components.py    From diffai with MIT License 6 votes vote down vote up
def forward(self, x, time = 0, **kargs):
        if self.training:
            with torch.no_grad():
                p = self.p.getVal(time = time)
                mask = (F.dropout2d if self.use_2d else F.dropout)(h.ones(x.size()),p=p, training=True) 
            if self.alpha_dropout:
                with torch.no_grad():
                    keep_prob = 1 - p
                    alpha = -1.7580993408473766
                    a = math.pow(keep_prob + alpha * alpha * keep_prob * (1 - keep_prob), -0.5)
                    b = -a * alpha * (1 - keep_prob)
                    mask = mask * a
                return x * mask + b
            else:
                return x * mask
        else:
            return x 
Example #8
Source File: efficient.py    From LightNetPlusPlus with MIT License 6 votes vote down vote up
def forward(self, x):
        res = x

        # step 1. Expansion phase/Point-wise convolution
        if self.expand_ratio != 1:
            x = self.expansion(x)

        # step 2. Depth-wise convolution phase
        x = self.depth_wise(x)

        # step 3. Squeeze and Excitation
        if self.use_se:
            x = self.se_block(x)

        # step 4. Point-wise convolution phase
        x = self.point_wise(x)

        # step 5. Skip connection and drop connect
        if self.use_residual:
            if self.training and (self.dropout_rate is not None):
                x = F.dropout2d(input=x, p=self.dropout_rate,
                                training=self.training, inplace=True)
            x = x + res

        return x 
Example #9
Source File: mixnetseg.py    From LightNetPlusPlus with MIT License 6 votes vote down vote up
def forward(self, x):
        res = x

        # step 1. Expansion phase/Point-wise convolution
        if self.expand_ratio != 1:
            x = self.expansion(x)

        # step 2. Depth-wise convolution phase
        x = self.depth_wise(x)

        # step 3. Squeeze and Excitation
        if self.use_se:
            x = self.se_block(x)

        # step 4. Point-wise convolution phase
        x = self.point_wise(x)

        # step 5. Skip connection and drop connect
        if self.use_residual:
            if self.training and (self.dropout_rate is not None):
                x = F.dropout2d(input=x, p=self.dropout_rate,
                                training=self.training, inplace=True)
            x = x + res

        return x 
Example #10
Source File: unet.py    From open-solution-salt-identification with MIT License 6 votes vote down vote up
def forward(self, x):
        encoder2, encoder3, encoder4, encoder5 = self.encoders(x)
        encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)

        center = self.center(encoder5)

        dec5 = self.dec5(center, encoder5)
        dec4 = self.dec4(dec5, encoder4)
        dec3 = self.dec3(dec4, encoder3)
        dec2 = self.dec2(dec3, encoder2)
        dec1 = self.dec1(dec2)

        if self.use_hypercolumn:
            dec1 = torch.cat([dec1,
                              F.upsample(dec2, scale_factor=2, mode='bilinear'),
                              F.upsample(dec3, scale_factor=4, mode='bilinear'),
                              F.upsample(dec4, scale_factor=8, mode='bilinear'),
                              F.upsample(dec5, scale_factor=16, mode='bilinear'),
                              ], 1)

        return self.final(dec1) 
Example #11
Source File: unet.py    From open-solution-salt-identification with MIT License 6 votes vote down vote up
def forward(self, x):
        encoder2, encoder3, encoder4, encoder5 = self.encoders(x)
        encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)

        center = self.center(encoder5)

        dec5 = self.dec5(center, encoder5)
        dec4 = self.dec4(dec5, encoder4)
        dec3 = self.dec3(dec4, encoder3)
        dec2 = self.dec2(dec3, encoder2)
        dec1 = self.dec1(dec2)

        if self.use_hypercolumn:
            dec1 = torch.cat([dec1,
                              F.upsample(dec2, scale_factor=2, mode='bilinear'),
                              F.upsample(dec3, scale_factor=4, mode='bilinear'),
                              F.upsample(dec4, scale_factor=8, mode='bilinear'),
                              F.upsample(dec5, scale_factor=16, mode='bilinear'),
                              ], 1)

        return self.final(dec1) 
Example #12
Source File: models_with_depth.py    From open-solution-salt-identification with MIT License 6 votes vote down vote up
def forward(self, x, d=None):
        encoder2, encoder3, encoder4, encoder5 = self.encoders(x)
        encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)

        center = self.center(encoder5)

        dec5 = self.dec5(center, encoder5)
        dec4 = self.dec4(dec5, encoder4)
        dec3 = self.dec3(dec4, encoder3)
        dec2 = self.dec2(dec3, encoder2)
        dec1 = self.dec1(dec2)

        if self.use_hypercolumn:
            dec1 = torch.cat([dec1,
                              F.upsample(dec2, scale_factor=2, mode='bilinear'),
                              F.upsample(dec3, scale_factor=4, mode='bilinear'),
                              F.upsample(dec4, scale_factor=8, mode='bilinear'),
                              F.upsample(dec5, scale_factor=16, mode='bilinear'),
                              ], 1)

        depth_channel_excitation = self.depth_channel_excitation(dec1, d)
        return self.final(depth_channel_excitation) 
Example #13
Source File: ResAttUnit.py    From ACMN-Pytorch with MIT License 6 votes vote down vote up
def forward(self, input, att, word):
        ## FC q
        word_W = F.dropout(word, self.dropout, training = self.training)
        weight = F.tanh(self.fcq_w(word_W)).view(-1,self.num_features,1,1)
        ## FC v
        v = F.dropout2d(input, self.dropout, training = self.training)
        v = v * F.relu(1-att).unsqueeze(1).expand_as(input)
        v = F.tanh(self.conv1(v))
        ## attMap
        inputAttShift = F.tanh(self.fcShift1(torch.cat((att.view(-1,self.num_outputs*14*14),word),1)))
        inputAttShift = F.tanh(self.fcShift2(inputAttShift)).view(-1,self.num_features,1,1)
        ## v * q_tile
        v = v * weight.expand_as(v) * inputAttShift.expand_as(v) # v = self.cbn1(F.tanh(v),word) #apply non-linear before cbn equal to MLB
        # no tanh shoulb be here
        v = self.conv2(v)
        # Normalize to single area
        return F.softmax(v.view(-1,14*14), dim=1).view(-1,self.num_outputs,14,14) 
Example #14
Source File: adf.py    From SalsaNext with MIT License 6 votes vote down vote up
def forward(self, inputs_mean, inputs_variance):
        if self.training:
            binary_mask = torch.ones_like(inputs_mean)
            binary_mask = F.dropout2d(binary_mask, self.p, self.training, self.inplace)

            outputs_mean = inputs_mean * binary_mask
            outputs_variance = inputs_variance * binary_mask ** 2

            if self._keep_variance_fn is not None:
                outputs_variance = self._keep_variance_fn(outputs_variance)
            return outputs_mean, outputs_variance

        outputs_variance = inputs_variance
        if self._keep_variance_fn is not None:
            outputs_variance = self._keep_variance_fn(outputs_variance)
        return inputs_mean, outputs_variance 
Example #15
Source File: unet_models.py    From open-solution-data-science-bowl-2018 with MIT License 6 votes vote down vote up
def forward(self, x):
        conv1 = self.conv1(x)
        conv2 = self.conv2(self.pool(conv1))
        conv3 = self.conv3(self.pool(conv2))
        conv4 = self.conv4(self.pool(conv3))
        conv5 = self.conv5(self.pool(conv4))

        center = self.center(self.pool(conv5))

        dec5 = self.dec5(torch.cat([center, conv5], 1))

        dec4 = self.dec4(torch.cat([dec5, conv4], 1))
        dec3 = self.dec3(torch.cat([dec4, conv3], 1))
        dec2 = self.dec2(torch.cat([dec3, conv2], 1))
        dec1 = self.dec1(torch.cat([dec2, conv1], 1))

        return self.final(F.dropout2d(dec1, p=self.dropout_2d)) 
Example #16
Source File: adf.py    From uncertainty_estimation_deep_learning with MIT License 6 votes vote down vote up
def forward(self, inputs_mean, inputs_variance):
        if self.training:
            binary_mask = torch.ones_like(inputs_mean)
            binary_mask = F.dropout2d(binary_mask, self.p, self.training, self.inplace)
            
            outputs_mean = inputs_mean*binary_mask
            outputs_variance = inputs_variance*binary_mask**2
            
            if self._keep_variance_fn is not None:
                outputs_variance = self._keep_variance_fn(outputs_variance)
            return outputs_mean, outputs_variance
        
        outputs_variance = inputs_variance
        if self._keep_variance_fn is not None:
            outputs_variance = self._keep_variance_fn(outputs_variance)
        return inputs_mean, outputs_variance 
Example #17
Source File: pspnet.py    From open-solution-ship-detection with MIT License 6 votes vote down vote up
def forward(self, x):
        encoder2, encoder3, encoder4, encoder5 = self.encoder(x)
        encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)

        psp = self.psp(encoder5)
        up4 = self.up4(psp)
        up3 = self.up3(up4)
        up2 = self.up2(up3)
        up1 = self.up1(up2)
        if self.use_hypercolumn:
            hypercolumn = torch.cat([up1,
                                     F.upsample(up2, scale_factor=2, mode='bilinear'),
                                     F.upsample(up3, scale_factor=4, mode='bilinear'),
                                     F.upsample(up4, scale_factor=8, mode='bilinear'),
                                     ], 1)
            drop = F.dropout2d(hypercolumn, p=self.dropout_2d)
        else:
            drop = F.dropout2d(up1, p=self.dropout_2d)

        if self.pool0:
            drop = self.up0(drop)
        return self.final(drop) 
Example #18
Source File: unet.py    From open-solution-ship-detection with MIT License 6 votes vote down vote up
def forward(self, x):
        encoder2, encoder3, encoder4, encoder5 = self.encoder(x)
        encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)

        center = self.center(encoder5)

        dec5 = self.dec5(center, encoder5)
        dec4 = self.dec4(dec5, encoder4)
        dec3 = self.dec3(dec4, encoder3)
        dec2 = self.dec2(dec3, encoder2)
        dec1 = self.dec1(dec2)

        if self.use_hypercolumn:
            dec1 = torch.cat([dec1,
                              F.upsample(dec2, scale_factor=2, mode='bilinear'),
                              F.upsample(dec3, scale_factor=4, mode='bilinear'),
                              F.upsample(dec4, scale_factor=8, mode='bilinear'),
                              F.upsample(dec5, scale_factor=16, mode='bilinear'),
                              ], 1)

        if self.pool0:
            dec1 = self.dec0(dec1)

        return self.final(dec1) 
Example #19
Source File: large_kernel_matters.py    From open-solution-ship-detection with MIT License 6 votes vote down vote up
def forward(self, x):
        encoder2, encoder3, encoder4, encoder5 = self.encoder(x)
        encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)

        gcn2 = self.enc_br2(self.gcn2(encoder2))
        gcn3 = self.enc_br3(self.gcn3(encoder3))
        gcn4 = self.enc_br4(self.gcn4(encoder4))
        gcn5 = self.enc_br5(self.gcn5(encoder5))

        decoder5 = self.deconv5(gcn5)
        decoder4 = self.deconv4(self.dec_br4(decoder5 + gcn4))
        decoder3 = self.deconv3(self.dec_br3(decoder4 + gcn3))
        decoder2 = self.dec_br1(self.deconv2(self.dec_br2(decoder3 + gcn2)))

        if self.pool0:
            decoder2 = self.dec_br0_2(self.deconv1(self.dec_br0_1(decoder2)))

        return self.final(decoder2) 
Example #20
Source File: rfcn_cls.py    From MOTDT with MIT License 6 votes vote down vote up
def forward(self, x, gts=None):
        feats = self.feature_extractor(x)
        x_in = self.stage_0(feats[-1])

        # up conv
        n_feats = self.feature_extractor.n_feats[1:]
        for i in range(1, len(n_feats)):
            x_depth_out = getattr(self, 'upconv_{}'.format(i))(x_in)
            x_project = getattr(self, 'proj_{}'.format(i))(feats[-1-i])
            x_in = torch.cat((x_depth_out, x_project), 1)

        # cls features
        x_cls_in = x_in
        # x_cls_in = F.dropout2d(x_cls_in, training=self.training, inplace=True)
        cls_feat = self.cls_conv(x_cls_in)

        return cls_feat 
Example #21
Source File: models.py    From residual_adapters with Apache License 2.0 6 votes vote down vote up
def forward(self, x):
        task = config_task.task
        y = self.conv(x)
        if self.second == 0:
            if config_task.isdropout1:
                x = F.dropout2d(x, p=0.5, training = self.training)
        else:
            if config_task.isdropout2:
                x = F.dropout2d(x, p=0.5, training = self.training)
        if config_task.mode == 'parallel_adapters' and self.is_proj:
            y = y + self.parallel_conv[task](x)
        y = self.bns[task](y)

        return y

# No projection: identity shortcut 
Example #22
Source File: transformer.py    From flowseq with Apache License 2.0 6 votes vote down vote up
def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True):
        with torch.no_grad():
            x = self.embed_scale * self.tgt_embed(tgt_sents)
            x = F.dropout2d(x, p=self.dropword, training=self.training)
            x += self.pos_enc(tgt_sents)
            x = F.dropout(x, p=0.2, training=self.training)

            mask = tgt_masks.eq(0)
            key_mask = src_masks.eq(0)
            for layer in self.layers:
                x = layer.init(x, mask, src_enc, key_mask, init_scale=init_scale)

            x = x * tgt_masks.unsqueeze(2)
            mu = self.mu.init(x, init_scale=0.05 * init_scale) if init_mu else self.mu(x)
            logvar = self.logvar.init(x, init_scale=0.05 * init_scale) if init_var else self.logvar(x)
            mu = mu * tgt_masks.unsqueeze(2)
            logvar = logvar * tgt_masks.unsqueeze(2)
            return mu, logvar 
Example #23
Source File: evidence_pooling.py    From gap with MIT License 6 votes vote down vote up
def forward(self, X, mask=None, training=False):
        # Simple Pooling layers
        max_masked = self.replace_masked_values(X, mask.unsqueeze(2), -1e7)
        max_pool = torch.max(max_masked, 1)[0]
        min_masked = self.replace_masked_values(X, mask.unsqueeze(2), +1e7)
        min_pool = torch.min(min_masked, 1)[0]
        mean_pool = torch.sum(X, 1) / torch.sum((1-mask).float(), 1, keepdim=True)

        # Self-attentive pooling layer
        # Run through linear projection. Shape: (batch_size, sequence length, 1)
        # Then remove the last dimension to get the proper attention shape (batch_size, sequence length).
        # X = X.permute(0, 2, 1)   # convert to [batch, channels, time]
        # X = F.dropout2d(X, 0.5, training=training)
        # X = X.permute(0, 2, 1)   # back to [batch, time, channels]
        self_attentive_logits = self._self_attentive_pooling_projection(X).squeeze(2)
        self_weights = self.masked_softmax(self_attentive_logits, 1-mask)
        self_attentive_pool = self.weighted_sum(X, self_weights)

        pooled_representations = torch.cat([max_pool, min_pool, self_attentive_pool], 1)
        pooled_representations_dropped = self._integrator_dropout(self_attentive_pool)

        outputs = self._output_layer(pooled_representations_dropped)

        return outputs, self_weights 
Example #24
Source File: pspnet.py    From open-solution-salt-identification with MIT License 6 votes vote down vote up
def forward(self, x):
        encoder2, encoder3, encoder4, encoder5 = self.encoders(x)
        encoder5 = F.dropout2d(encoder5, p=self.dropout_2d)

        psp = self.psp(encoder5)

        up4 = self.up4(psp)
        up3 = self.up3(up4)
        up2 = self.up2(up3)
        up1 = self.up1(up2)
        if self.use_hypercolumn:
            hypercolumn = torch.cat([up1,
                                     F.upsample(up2, scale_factor=2, mode='bilinear'),
                                     F.upsample(up3, scale_factor=4, mode='bilinear'),
                                     F.upsample(up4, scale_factor=8, mode='bilinear'),
                                     ], 1)
            drop = F.dropout2d(hypercolumn, p=self.dropout_2d)
        else:
            drop = F.dropout2d(up4, p=self.dropout_2d)
        return self.final(drop) 
Example #25
Source File: ops.py    From torch2trt with MIT License 5 votes vote down vote up
def aten_dropout(inputs, attributes, scope):
    inp = inputs[0]
    ctx = current_context()
    net = ctx.network
    if ctx.is_tensorrt and has_trt_tensor(inputs):
        return [inputs[0]]
    elif ctx.is_tvm and has_tvm_tensor(inputs):
        return [inputs[0]]
    rate, training = inputs[1:3]
    res = F.dropout2d(inp, rate, bool(training))
    return [res] 
Example #26
Source File: transformer.py    From flowseq with Apache License 2.0 5 votes vote down vote up
def forward(self, tgt_sents, tgt_masks, src_enc, src_masks):
        x = self.embed_scale * self.tgt_embed(tgt_sents)
        x = F.dropout2d(x, p=self.dropword, training=self.training)
        x += self.pos_enc(tgt_sents)
        x = F.dropout(x, p=0.2, training=self.training)

        mask = tgt_masks.eq(0)
        key_mask = src_masks.eq(0)
        for layer in self.layers:
            x = layer(x, mask, src_enc, key_mask)

        mu = self.mu(x) * tgt_masks.unsqueeze(2)
        logvar = self.logvar(x) * tgt_masks.unsqueeze(2)
        return mu, logvar 
Example #27
Source File: micro_child.py    From NAS-Benchmark with GNU General Public License v3.0 5 votes vote down vote up
def _get_model(self, inputs):

        aux_logits = None
        # stem conv
        if self.dataset == "CIFAR10":
            x = self.stem_conv(inputs)
            layers = [x, x]
        elif self.dataset == "Sport8" or self.dataset == "MIT67" or self.dataset == "flowers102":
            s0 = self.stem0(inputs)
            s1 = self.stem1(s0)
            layers = [s0, s1]

        out_filters = self.out_filters
        for layer_id in range(self.num_layers + 2):
            if layer_id not in self.pool_layers:
                if self.fixed_arc is None:
                    x = self._enas_layer(layers, self.layer[layer_id], self.normal_arc, out_filters)
            else:
                out_filters *= 2
                if self.fixed_arc is None:
                    x = self._factorized_reduction(x, self.layer[layer_id].reduction)
                    layers = [layers[-1], x]
                    x = self._enas_layer(layers, self.layer[layer_id], self.reduce_arc, out_filters)
            layers = [layers[-1], x]

            if self.use_aux_heads and layer_id in self.aux_head_indices:
                aux = self.aux_head(x).view(x.size(0), -1)
                aux_logits = self.aux_fc(aux)
            '''self.num_aux_vars = 0
            if self.use_aux_heads and layer_id in self.aux_head_indices and is_training:
                cur_ctx = self._get_context(cur_ctx, 'aux_head', [x.size(1), x.size(2)])
                aux_logits = cur_ctx(x)'''

        x = F.dropout2d(F.adaptive_avg_pool2d(F.relu(x), 1), 0.1)
        x = self.final_fc(x.view(x.size(0),-1))
        # TODO: dropout
        #if is_training and self.keep_prob is not None and self.keep_prob < 1.0:
        #    x = F.dropout(x)
        #x = self.fc(x)
        return x, aux_logits 
Example #28
Source File: models.py    From nni with MIT License 5 votes vote down vote up
def forward(self, x):
        e1 = self.encoder1(x) #; print('e1:', e1.size())
        e2 = self.encoder2(e1) #; print('e2:', e2.size())
        e3 = self.encoder3(e2) #; print('e3:', e3.size())
        e4 = self.encoder4(e3) #; print('e4:', e4.size())
        e5 = self.encoder5(e4) #; print('e5:', e5.size())

        center = self.center(e5) #; print('center:', center.size())

        d5 = self.decoder5(center, e5, upsample=False) #; print('d5:', d5.size())
        d4 = self.decoder4(d5, e4) #; print('d4:', d4.size())
        d3 = self.decoder3(d4, e3) #; print('d3:', d3.size())
        d2 = self.decoder2(d3, e2) #; print('d2:', d2.size())
        d1 = self.decoder1(torch.cat([d2, e1], 1), x) #; print('d1:', d1.size())

        f = torch.cat([
            d1,
            F.interpolate(d2, scale_factor=2, mode='bilinear', align_corners=False),
            F.interpolate(d3, scale_factor=4, mode='bilinear', align_corners=False),
            F.interpolate(d4, scale_factor=8, mode='bilinear', align_corners=False),
            F.interpolate(d5, scale_factor=16, mode='bilinear', align_corners=False),
        ], 1)

        f = F.dropout2d(f, p=self.dropout_2d)

        # empty mask classifier
        img_f = F.adaptive_avg_pool2d(e5, 1).view(x.size(0), -1)
        img_f = F.dropout(img_f, p=0.5, training=self.training)
        img_logit = self.logit_image(img_f).view(-1)

        return self.logit(f), img_logit 
Example #29
Source File: models.py    From nni with MIT License 5 votes vote down vote up
def forward(self, x):
        e1 = self.encoder1(x) #; print('e1:', e1.size())
        e2 = self.encoder2(e1) #; print('e2:', e2.size())
        e3 = self.encoder3(e2) #; print('e3:', e3.size())
        e4 = self.encoder4(e3) #; print('e4:', e4.size())
        e5 = self.encoder5(e4) #; print('e5:', e5.size())

        center = self.center(e5) #; print('center:', center.size())

        d5 = self.decoder5(center, e5, upsample=False) #; print('d5:', d5.size())
        d4 = self.decoder4(d5, e4) #; print('d4:', d4.size())
        d3 = self.decoder3(d4, e3) #; print('d3:', d3.size())
        d2 = self.decoder2(d3, e2) #; print('d2:', d2.size())
        d1 = self.decoder1(d2, e1) #; print('d1:', d1.size())

        f = torch.cat([
            d1,
            F.interpolate(d2, scale_factor=2, mode='bilinear', align_corners=False),
            F.interpolate(d3, scale_factor=4, mode='bilinear', align_corners=False),
            F.interpolate(d4, scale_factor=8, mode='bilinear', align_corners=False),
            F.interpolate(d5, scale_factor=16, mode='bilinear', align_corners=False),
        ], 1)

        f = F.dropout2d(f, p=self.dropout_2d)

        # empty mask classifier
        img_f = F.adaptive_avg_pool2d(e5, 1).view(x.size(0), -1)
        img_f = F.dropout(img_f, p=0.5, training=self.training)
        img_logit = self.logit_image(img_f).view(-1)

        return self.logit(f), img_logit 
Example #30
Source File: dropout.py    From nested-ner-tacl2020-transformers with GNU General Public License v3.0 5 votes vote down vote up
def forward(self, input: Tensor) -> Tensor:
        return f.dropout2d(input.transpose(1, 2), self.p, self.training, self.inplace).transpose(1, 2)