Python torch.nn.AlphaDropout() Examples

The following are 14 code examples of torch.nn.AlphaDropout(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: body.py    From lumin with Apache License 2.0 6 votes vote down vote up
def _get_layer(self, idx:int, fan_in:Optional[int]=None, fan_out:Optional[int]=None) -> nn.Module:
        fan_in  = self.width if fan_in  is None else fan_in
        fan_out = self.width if fan_out is None else fan_out
        if fan_in  < 1: fan_in  = 1
        if fan_out < 1: fan_out = 1        
        
        layers = []
        for i in range(2 if self.res and idx > 0 else 1):
            layers.append(nn.Linear(fan_in, fan_out))
            self.lookup_init(self.act, fan_in, fan_out)(layers[-1].weight)
            nn.init.zeros_(layers[-1].bias)
            if self.act != 'linear': layers.append(self.lookup_act(self.act))
            if self.bn and i == 0:  layers.append(nn.BatchNorm1d(fan_out))  # In case of residual, BN will be added after addition
            if self.do: 
                if self.act == 'selu': layers.append(nn.AlphaDropout(self.do))
                else:                  layers.append(nn.Dropout(self.do))
        return nn.Sequential(*layers) 
Example #2
Source File: train_rels.py    From VCTree-Scene-Graph-Generation with MIT License 6 votes vote down vote up
def fix_batchnorm(model):
    if isinstance(model, list):
        for m in model:
            fix_batchnorm(m)
    else:
        for m in model.modules():
            if isinstance(m, nn.BatchNorm1d):
                #print('Fix BatchNorm1d')
                m.eval()
            elif isinstance(m, nn.BatchNorm2d):
                #print('Fix BatchNorm2d')
                m.eval()
            elif isinstance(m, nn.BatchNorm3d):
                #print('Fix BatchNorm3d')
                m.eval()
            elif isinstance(m, nn.Dropout):
                #print('Fix Dropout')
                m.eval()
            elif isinstance(m, nn.AlphaDropout):
                #print('Fix AlphaDropout')
                m.eval() 
Example #3
Source File: classifiers.py    From swagaf with MIT License 6 votes vote down vote up
def __init__(self, input_dim=5, hidden_dim=1024):
        """
        Averaged embeddings of ending -> label
        :param embed_dim: dimension to use
        """
        super(LMFeatsModel, self).__init__()
        self.mapping = nn.Sequential(
            nn.Linear(input_dim, hidden_dim, bias=True),
            nn.SELU(),
            nn.AlphaDropout(p=0.2),
        )
        self.prediction = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim, bias=True),
            nn.SELU(),
            nn.AlphaDropout(p=0.2),
            nn.Linear(hidden_dim, 1, bias=False),
        ) 
Example #4
Source File: classifiers.py    From swagaf with MIT License 6 votes vote down vote up
def __init__(self, vocab):
        super(Ensemble, self).__init__()

        self.fasttext_model = BoWModel(vocab, use_mean=True, embed_dim=100)
        self.mlp_model = LMFeatsModel(input_dim=8, hidden_dim=1024)
        self.lstm_pos_model = BLSTMModel(vocab, use_postags_only=True, maxpool=True)
        # self.lstm_lex_model = BLSTMModel(vocab, use_postags_only=False, maxpool=True)
        self.cnn_model = CNNModel(vocab)

        self.mlp = nn.Sequential(
            nn.Linear(100 + 1024 + 400 + 4 * 128, 2048, bias=True),
            # nn.SELU(),
            # nn.AlphaDropout(p=0.2),
            # nn.Linear(2048, 2048, bias=True),
            nn.SELU(),
            nn.AlphaDropout(p=0.2),
            nn.Linear(2048, 1, bias=False),
        ) 
Example #5
Source File: v1_neuro.py    From Attentive-Filtering-Network with MIT License 5 votes vote down vote up
def __init__(self, input_dim):

        super(FeedForward, self).__init__()

        self.classifier = nn.Sequential(
            nn.Linear(input_dim, 256),
            nn.BatchNorm1d(256),
            nn.ReLU(),

            nn.Linear(256, 256),
            nn.BatchNorm1d(256),
            nn.AlphaDropout(p=0.5),
            nn.ReLU(),
            nn.Linear(256, 256),
            nn.BatchNorm1d(256),
            nn.ReLU(),
            nn.Linear(256, 256),
            nn.BatchNorm1d(256),
            nn.ReLU(),
            nn.Linear(256, 256),
            nn.BatchNorm1d(256),
            nn.ReLU(),

            nn.Linear(256, 1),
            nn.Sigmoid()
        ) 
Example #6
Source File: selu.py    From verb-attributes with MIT License 5 votes vote down vote up
def __init__(self, p=0.5):
        super(AlphaDropout, self).__init__()
        if p < 0 or p > 1:
            raise ValueError("dropout probability has to be between 0 and 1, "
                             "but got {}".format(p))
        self.p = p 
Example #7
Source File: selu.py    From verb-attributes with MIT License 5 votes vote down vote up
def alpha_dropout(input, p=0.5, training=False):
    r"""Applies alpha dropout to the input.
    See :class:`~torch.nn.AlphaDropout` for details.
    Args:
        p (float, optional): the drop probability
        training (bool, optional): switch between training and evaluation mode
    """
    if p < 0 or p > 1:
        raise ValueError("dropout probability has to be between 0 and 1, "
                         "but got {}".format(p))

    if p == 0 or not training:
        return input

    alpha = -1.7580993408473766
    keep_prob = 1 - p
    noise = input.data.new().resize_(input.size())
    noise.bernoulli_(p)
    noise = Variable(noise.byte())

    output = input.masked_fill(noise, alpha)

    a = (keep_prob + alpha ** 2 * keep_prob * (1 - keep_prob)) ** (-0.5)
    b = -a * alpha * (1 - keep_prob)

    return output.mul_(a).add_(b) 
Example #8
Source File: head.py    From lumin with Apache License 2.0 5 votes vote down vote up
def _get_layer(self, fan_in:int, fan_out:int) -> nn.Module:   
        layers = []
        layers.append(nn.Linear(fan_in, fan_out))
        self.lookup_init(self.act, fan_in, fan_out)(layers[-1].weight)
        nn.init.zeros_(layers[-1].bias)
        if self.act != 'linear': layers.append(self.lookup_act(self.act))
        if self.bn:  layers.append(nn.BatchNorm1d(fan_out))
        if self.do: 
            if self.act == 'selu': layers.append(nn.AlphaDropout(self.do))
            else:                  layers.append(nn.Dropout(self.do))
        return nn.Sequential(*layers) 
Example #9
Source File: head.py    From lumin with Apache License 2.0 5 votes vote down vote up
def _get_layer(self, n_in:int, n_out:int, act:str, do:bool, bn:bool,
                   lookup_init:Callable[[str,Optional[int],Optional[int]],Callable[[Tensor],None]], lookup_act:Callable[[str],Any]) -> nn.Sequential:   
        layers = []
        layers.append(nn.Linear(n_in, n_out))
        lookup_init(act, n_in, n_out)(layers[-1].weight)
        nn.init.zeros_(layers[-1].bias)
        if act != 'linear': layers.append(lookup_act(act))
        if bn:  layers.append(nn.BatchNorm1d(n_out))
        if do: 
            if act == 'selu': layers.append(nn.AlphaDropout(do))
            else:             layers.append(nn.Dropout(do))
        return nn.Sequential(*layers) 
Example #10
Source File: rebalance_dataset_mlp.py    From swagaf with MIT License 5 votes vote down vote up
def __init__(self):
        super(MLPModel, self).__init__()
        # self.mapping = nn.Linear(train_data.feats.shape[2], 1, bias=False)

        self.mapping = nn.Sequential(
            nn.Linear(all_data.shape[-1], 2048, bias=True),
            nn.SELU(),
            nn.AlphaDropout(p=0.2),
            nn.Linear(2048, 2048, bias=True),
            nn.SELU(),
            nn.AlphaDropout(p=0.2),
            nn.Linear(2048, 1, bias=False),
        ) 
Example #11
Source File: drop_block.py    From Magic-VNet with MIT License 5 votes vote down vote up
def __init__(self, drop_type):
        super(Drop, self).__init__()
        if drop_type is None:
            self.drop = keep_origin
        elif drop_type == 'alpha':
            self.drop = nn.AlphaDropout(p=0.5)
        elif drop_type == 'dropout':
            self.drop = nn.Dropout3d(p=0.5)
        elif drop_type == 'drop_block':
            self.drop = DropBlock3D(drop_prob=0.2, block_size=2)
        else:
            raise NotImplementedError('{} not implemented'.format(drop_type)) 
Example #12
Source File: object_detector.py    From neural-motifs with MIT License 4 votes vote down vote up
def __init__(self, classes, mode='rpntrain', num_gpus=1, nms_filter_duplicates=True,
                 max_per_img=64, use_resnet=False, thresh=0.05):
        """
        :param classes: Object classes
        :param rel_classes: Relationship classes. None if were not using rel mode
        :param num_gpus: how many GPUS 2 use
        """
        super(ObjectDetector, self).__init__()

        if mode not in self.MODES:
            raise ValueError("invalid mode")
        self.mode = mode

        self.classes = classes
        self.num_gpus = num_gpus
        self.pooling_size = 7
        self.nms_filter_duplicates = nms_filter_duplicates
        self.max_per_img = max_per_img
        self.use_resnet = use_resnet
        self.thresh = thresh

        if not self.use_resnet:
            vgg_model = load_vgg()
            self.features = vgg_model.features
            self.roi_fmap = vgg_model.classifier
            rpn_input_dim = 512
            output_dim = 4096
        else:  # Deprecated
            self.features = load_resnet()
            self.compress = nn.Sequential(
                nn.Conv2d(1024, 256, kernel_size=1),
                nn.ReLU(inplace=True),
                nn.BatchNorm2d(256),
            )
            self.roi_fmap = nn.Sequential(
                nn.Linear(256 * 7 * 7, 2048),
                nn.SELU(inplace=True),
                nn.AlphaDropout(p=0.05),
                nn.Linear(2048, 2048),
                nn.SELU(inplace=True),
                nn.AlphaDropout(p=0.05),
            )
            rpn_input_dim = 1024
            output_dim = 2048

        self.score_fc = nn.Linear(output_dim, self.num_classes)
        self.bbox_fc = nn.Linear(output_dim, self.num_classes * 4)
        self.rpn_head = RPNHead(dim=512, input_dim=rpn_input_dim) 
Example #13
Source File: object_detector.py    From VCTree-Scene-Graph-Generation with MIT License 4 votes vote down vote up
def __init__(self, classes, mode='rpntrain', num_gpus=1, nms_filter_duplicates=True,
                 max_per_img=64, use_resnet=False, thresh=0.05, use_rl_tree = False):
        """
        :param classes: Object classes
        :param rel_classes: Relationship classes. None if were not using rel mode
        :param num_gpus: how many GPUS 2 use
        """
        super(ObjectDetector, self).__init__()

        if mode not in self.MODES:
            raise ValueError("invalid mode")
        self.mode = mode

        self.classes = classes
        self.num_gpus = num_gpus
        self.pooling_size = 7
        self.nms_filter_duplicates = nms_filter_duplicates
        self.max_per_img = max_per_img
        self.use_resnet = use_resnet
        self.thresh = thresh
        self.use_rl_tree = use_rl_tree

        if not self.use_resnet:
            vgg_model = load_vgg()
            self.features = vgg_model.features
            self.roi_fmap = vgg_model.classifier
            rpn_input_dim = 512
            output_dim = 4096
        else:  # Deprecated
            self.features = load_resnet()
            self.compress = nn.Sequential(
                nn.Conv2d(1024, 256, kernel_size=1),
                nn.ReLU(inplace=True),
                nn.BatchNorm2d(256),
            )
            self.roi_fmap = nn.Sequential(
                nn.Linear(256 * 7 * 7, 2048),
                nn.SELU(inplace=True),
                #nn.AlphaDropout(p=0.05),
                nn.Linear(2048, 2048),
                nn.SELU(inplace=True),
                #nn.AlphaDropout(p=0.05),
            )
            rpn_input_dim = 1024
            output_dim = 2048

        self.score_fc = nn.Linear(output_dim, self.num_classes)
        self.bbox_fc = nn.Linear(output_dim, self.num_classes * 4)
        self.rpn_head = RPNHead(dim=512, input_dim=rpn_input_dim) 
Example #14
Source File: object_detector.py    From KERN with MIT License 4 votes vote down vote up
def __init__(self, classes, mode='rpntrain', num_gpus=1, nms_filter_duplicates=True,
                 max_per_img=64, use_resnet=False, thresh=0.05):
        """
        :param classes: Object classes
        :param rel_classes: Relationship classes. None if were not using rel mode
        :param num_gpus: how many GPUS 2 use
        """
        super(ObjectDetector, self).__init__()

        if mode not in self.MODES:
            raise ValueError("invalid mode")
        self.mode = mode

        self.classes = classes
        self.num_gpus = num_gpus
        self.pooling_size = 7
        self.nms_filter_duplicates = nms_filter_duplicates
        self.max_per_img = max_per_img
        self.use_resnet = use_resnet
        self.thresh = thresh

        if not self.use_resnet:
            vgg_model = load_vgg()
            self.features = vgg_model.features
            self.roi_fmap = vgg_model.classifier
            rpn_input_dim = 512
            output_dim = 4096
        else:  # Deprecated
            self.features = load_resnet()
            self.compress = nn.Sequential(
                nn.Conv2d(1024, 256, kernel_size=1),
                nn.ReLU(inplace=True),
                nn.BatchNorm2d(256),
            )
            self.roi_fmap = nn.Sequential(
                nn.Linear(256 * 7 * 7, 2048),
                nn.SELU(inplace=True),
                nn.AlphaDropout(p=0.05),
                nn.Linear(2048, 2048),
                nn.SELU(inplace=True),
                nn.AlphaDropout(p=0.05),
            )
            rpn_input_dim = 1024
            output_dim = 2048

        self.score_fc = nn.Linear(output_dim, self.num_classes)
        self.bbox_fc = nn.Linear(output_dim, self.num_classes * 4)
        self.rpn_head = RPNHead(dim=512, input_dim=rpn_input_dim)