Python torch.nn.Threshold() Examples

The following are 13 code examples of torch.nn.Threshold(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: model_architecture.py    From models with MIT License 8 votes vote down vote up
def get_model(load_weights = True):
    deepsea_cpu = nn.Sequential( # Sequential,
        nn.Conv2d(4,320,(1, 8),(1, 1)),
        nn.Threshold(0, 1e-06),
        nn.MaxPool2d((1, 4),(1, 4)),
        nn.Dropout(0.2),
        nn.Conv2d(320,480,(1, 8),(1, 1)),
        nn.Threshold(0, 1e-06),
        nn.MaxPool2d((1, 4),(1, 4)),
        nn.Dropout(0.2),
        nn.Conv2d(480,960,(1, 8),(1, 1)),
        nn.Threshold(0, 1e-06),
        nn.Dropout(0.5),
        Lambda(lambda x: x.view(x.size(0),-1)), # Reshape,
        nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(50880,925)), # Linear,
        nn.Threshold(0, 1e-06),
        nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(925,919)), # Linear,
        nn.Sigmoid(),
    )
    if load_weights:
        deepsea_cpu.load_state_dict(torch.load('model_files/deepsea_cpu.pth'))
    return nn.Sequential(ReCodeAlphabet(), deepsea_cpu) 
Example #2
Source File: model_architecture.py    From models with MIT License 6 votes vote down vote up
def get_seqpred_model(load_weights = True):
    deepsea_cpu = nn.Sequential( # Sequential,
        nn.Conv2d(4,320,(1, 8),(1, 1)),
        nn.Threshold(0, 1e-06),
        nn.MaxPool2d((1, 4),(1, 4)),
        nn.Dropout(0.2),
        nn.Conv2d(320,480,(1, 8),(1, 1)),
        nn.Threshold(0, 1e-06),
        nn.MaxPool2d((1, 4),(1, 4)),
        nn.Dropout(0.2),
        nn.Conv2d(480,960,(1, 8),(1, 1)),
        nn.Threshold(0, 1e-06),
        nn.Dropout(0.5),
        Lambda(lambda x: x.view(x.size(0),-1)), # Reshape,
        nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(50880,925)), # Linear,
        nn.Threshold(0, 1e-06),
        nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(925,919)), # Linear,
        nn.Sigmoid(),
    )
    if load_weights:
        deepsea_cpu.load_state_dict(torch.load('model_files/deepsea_cpu.pth'))
    return nn.Sequential(ReCodeAlphabet(), ConcatenateRC(), deepsea_cpu, AverageRC()) 
Example #3
Source File: base.py    From pytorch-NMF with MIT License 5 votes vote down vote up
def __init__(self):
        super().__init__()
        self.fix_neg = nn.Threshold(0., 1e-8) 
Example #4
Source File: model.py    From CAE-ADMM with MIT License 5 votes vote down vote up
def __init__(self, num_resblocks,final_len):
        super(CAEP, self).__init__()
        self.num_resblocks = num_resblocks
        self.threshold = torch.Tensor([1e-4])
        self.prune = False

        # Encoder
        self.E_Conv_1 = conv_same(3, 32)  # 3,128,128 => 32,128,128
        self.E_PReLU_1 = nn.PReLU()
        self.E_Conv_2 = conv_downsample(32, 64)  # 32,128,128 => 64,64,64
        self.E_PReLU_2 = nn.PReLU()
        self.E_Conv_3 = conv_same(64, 128)  # 64,64,64 => 128,64,64
        self.E_PReLU_3 = nn.PReLU()
        self.E_Res = res_layers(128, num_blocks=self.num_resblocks)
        self.E_Conv_4 = conv_downsample(128, 64)  # 128,64,64 => 64,32,32
        self.E_Conv_5 = conv_downsample(64, 32)
        self.E_Conv_6 = conv_same(32, final_len)

        self.Pruner = nn.Threshold(self.threshold, 0, inplace=True)

        # max_bpp = 32*16*16/128/128 * bits per int = 1 * bits per int

        # Decoder
        self.D_SubPix_00 = sub_pix(final_len, 32, 1)
        self.D_SubPix_0 = sub_pix(32, 64, 2)  # for fine tuning
        self.D_SubPix_1 = sub_pix(64, 128, 2)  # 64,32,32 => 128,64,64
        self.D_PReLU_1 = nn.PReLU()
        self.D_Res = res_layers(128, num_blocks=self.num_resblocks)
        self.D_SubPix_2 = sub_pix(128, 64, 1)  # 128,64,64 => 64,64,64
        self.D_PReLU_2 = nn.PReLU()
        self.D_SubPix_3 = sub_pix(64, 32, 2)  # 64,64,64 => 32,128,128
        self.D_PReLU_3 = nn.PReLU()
        self.D_SubPix_4 = sub_pix(32, 3, 1)  # 32,128,128 => 3,128,128
        self.tanh = nn.Tanh()

        self.__init_parameters__() 
Example #5
Source File: simplenet_cifar.py    From dnn-quant-ocs with Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 16 * 5 * 5)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        #x = nn.Threshold(0.2, 0.0)#ActivationZeroThreshold(x)
        x = self.fc3(x)
        return x 
Example #6
Source File: model.py    From neutralizing-bias with MIT License 5 votes vote down vote up
def __init__(self, debias_model, tagging_model):
        super(JointModel, self).__init__()
    
        # TODO SHARING EMBEDDINGS FROM DEBIAS
        self.debias_model = debias_model
        self.tagging_model = tagging_model

        self.token_sm = nn.Softmax(dim=2)
        self.time_sm = nn.Softmax(dim=1)
        self.tok_threshold = nn.Threshold(
            ARGS.zero_threshold,
            -10000.0 if ARGS.sequence_softmax else 0.0) 
Example #7
Source File: DeepMask.py    From deepmask-pytorch with MIT License 5 votes vote down vote up
def createScoreBranch(self):
        scoreBranch = nn.Sequential(
            nn.Dropout(0.5),
            nn.Conv2d(512, 1024, 1),
            nn.Threshold(0, 1e-6),  # do not know why
            nn.Dropout(0.5),
            nn.Conv2d(1024, 1, 1),
        )
        return scoreBranch 
Example #8
Source File: activation.py    From claf with MIT License 5 votes vote down vote up
def get_activation_fn(name):
    """ PyTorch built-in activation functions """

    activation_functions = {
        "linear": lambda: lambda x: x,
        "relu": nn.ReLU,
        "relu6": nn.ReLU6,
        "elu": nn.ELU,
        "prelu": nn.PReLU,
        "leaky_relu": nn.LeakyReLU,
        "threshold": nn.Threshold,
        "hardtanh": nn.Hardtanh,
        "sigmoid": nn.Sigmoid,
        "tanh": nn.Tanh,
        "log_sigmoid": nn.LogSigmoid,
        "softplus": nn.Softplus,
        "softshrink": nn.Softshrink,
        "softsign": nn.Softsign,
        "tanhshrink": nn.Tanhshrink,
    }

    if name not in activation_functions:
        raise ValueError(
            f"'{name}' is not included in activation_functions. use below one. \n {activation_functions.keys()}"
        )

    return activation_functions[name] 
Example #9
Source File: threshold.py    From onnx2keras with MIT License 5 votes vote down vote up
def __init__(self):
        super(LayerThresholdTest, self).__init__()
        self.threshold = random.random()
        self.value = self.threshold + random.random()
        self.thresh = nn.Threshold(self.threshold, self.value) 
Example #10
Source File: nmf_cpu.py    From SigProfilerExtractor with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def __init__(self, V, rank, max_iterations=200000, tolerance=1e-8, test_conv=1000, gpu_id=0, seed=None,
                 init_method='nndsvd', floating_point_precision='float', min_iterations=2000):

        """
        Run non-negative matrix factorisation using GPU. Uses beta-divergence.

        Args:
          V: Matrix to be factorised
          rank: (int) number of latent dimensnions to use in factorisation
          max_iterations: (int) Maximum number of update iterations to use during fitting
          tolerance: tolerance to use in convergence tests. Lower numbers give longer times to convergence
          test_conv: (int) How often to test for convergnce
          gpu_id: (int) Which GPU device to use
          seed: random seed, if None (default) datetime is used
          init_method: how to initialise basis and coefficient matrices, options are:
            - random (will always be the same if seed != None)
            - NNDSVD
            - NNDSVDa (fill in the zero elements with the average),
            - NNDSVDar (fill in the zero elements with random values in the space [0:average/100]).
          floating_point_precision: (string or type). Can be `double`, `float` or any type/string which
              torch can interpret.
          min_iterations: the minimum number of iterations to execute before termination. Useful when using
              fp32 tensors as convergence can happen too early.
        """
        #torch.cuda.set_device(gpu_id)
        
        
            
        if seed is None:
            seed = datetime.now().timestamp()

        if floating_point_precision == 'float':
            self._tensor_type = torch.FloatTensor
        elif floating_point_precision == 'double':
            self._tensor_type = torch.DoubleTensor
        else:
            self._tensor_type = floating_point_precision

        torch.manual_seed(seed)
        #torch.cuda.manual_seed(seed)

        self.max_iterations = max_iterations
        self.min_iterations = min_iterations

        # If V is not in a batch, put it in a batch of 1
        if len(V.shape) == 2:
            V = V[None, :, :]

        self._V = V.type(self._tensor_type)
        self._fix_neg = nn.Threshold(0., 1e-8)
        self._tolerance = tolerance
        self._prev_loss = None
        self._iter = 0
        self._test_conv = test_conv
        #self._gpu_id = gpu_id
        self._rank = rank
        self._W, self._H = self._initialise_wh(init_method) 
Example #11
Source File: nmf_gpu.py    From SigProfilerExtractor with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def __init__(self, V, rank, max_iterations=200000, tolerance=1e-8, test_conv=1000, gpu_id=0, seed=None,
                 init_method='nndsvd', floating_point_precision='float', min_iterations=2000):

        """
        Run non-negative matrix factorisation using GPU. Uses beta-divergence.

        Args:
          V: Matrix to be factorised
          rank: (int) number of latent dimensnions to use in factorisation
          max_iterations: (int) Maximum number of update iterations to use during fitting
          tolerance: tolerance to use in convergence tests. Lower numbers give longer times to convergence
          test_conv: (int) How often to test for convergnce
          gpu_id: (int) Which GPU device to use
          seed: random seed, if None (default) datetime is used
          init_method: how to initialise basis and coefficient matrices, options are:
            - random (will always be the same if seed != None)
            - NNDSVD
            - NNDSVDa (fill in the zero elements with the average),
            - NNDSVDar (fill in the zero elements with random values in the space [0:average/100]).
          floating_point_precision: (string or type). Can be `double`, `float` or any type/string which
              torch can interpret.
          min_iterations: the minimum number of iterations to execute before termination. Useful when using
              fp32 tensors as convergence can happen too early.
        """
        torch.cuda.set_device(gpu_id)
        
        
        
        if seed is None:
            seed = datetime.now().timestamp()

        if floating_point_precision == 'float':
            self._tensor_type = torch.FloatTensor
        elif floating_point_precision == 'double':
            self._tensor_type = torch.DoubleTensor
        else:
            self._tensor_type = floating_point_precision

        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)

        self.max_iterations = max_iterations
        self.min_iterations = min_iterations

        # If V is not in a batch, put it in a batch of 1
        if len(V.shape) == 2:
            V = V[None, :, :]

        self._V = V.type(self._tensor_type).cuda()
        self._fix_neg = nn.Threshold(0., 1e-8)
        self._tolerance = tolerance
        self._prev_loss = None
        self._iter = 0
        self._test_conv = test_conv
        self._gpu_id = gpu_id
        self._rank = rank
        self._W, self._H = self._initialise_wh(init_method) 
Example #12
Source File: nmf_cpu.py    From SigProfilerExtractor with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def __init__(self, V, rank, max_iterations=200000, tolerance=1e-8, test_conv=1000, gpu_id=0, seed=None,
                 init_method='nndsvd', floating_point_precision='float', min_iterations=2000):

        """
        Run non-negative matrix factorisation using GPU. Uses beta-divergence.

        Args:
          V: Matrix to be factorised
          rank: (int) number of latent dimensnions to use in factorisation
          max_iterations: (int) Maximum number of update iterations to use during fitting
          tolerance: tolerance to use in convergence tests. Lower numbers give longer times to convergence
          test_conv: (int) How often to test for convergnce
          gpu_id: (int) Which GPU device to use
          seed: random seed, if None (default) datetime is used
          init_method: how to initialise basis and coefficient matrices, options are:
            - random (will always be the same if seed != None)
            - NNDSVD
            - NNDSVDa (fill in the zero elements with the average),
            - NNDSVDar (fill in the zero elements with random values in the space [0:average/100]).
          floating_point_precision: (string or type). Can be `double`, `float` or any type/string which
              torch can interpret.
          min_iterations: the minimum number of iterations to execute before termination. Useful when using
              fp32 tensors as convergence can happen too early.
        """
        #torch.cuda.set_device(gpu_id)
        
        
            
        if seed is None:
            seed = datetime.now().timestamp()

        if floating_point_precision == 'float':
            self._tensor_type = torch.FloatTensor
        elif floating_point_precision == 'double':
            self._tensor_type = torch.DoubleTensor
        else:
            self._tensor_type = floating_point_precision

        torch.manual_seed(seed)
        #torch.cuda.manual_seed(seed)

        self.max_iterations = max_iterations
        self.min_iterations = min_iterations

        # If V is not in a batch, put it in a batch of 1
        if len(V.shape) == 2:
            V = V[None, :, :]

        self._V = V.type(self._tensor_type)
        self._fix_neg = nn.Threshold(0., 1e-8)
        self._tolerance = tolerance
        self._prev_loss = None
        self._iter = 0
        self._test_conv = test_conv
        #self._gpu_id = gpu_id
        self._rank = rank
        self._W, self._H = self._initialise_wh(init_method) 
Example #13
Source File: nmf_gpu.py    From SigProfilerExtractor with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def __init__(self, V, rank, max_iterations=200000, tolerance=1e-8, test_conv=1000, gpu_id=0, seed=None,
                 init_method='nndsvd', floating_point_precision='float', min_iterations=2000):

        """
        Run non-negative matrix factorisation using GPU. Uses beta-divergence.

        Args:
          V: Matrix to be factorised
          rank: (int) number of latent dimensnions to use in factorisation
          max_iterations: (int) Maximum number of update iterations to use during fitting
          tolerance: tolerance to use in convergence tests. Lower numbers give longer times to convergence
          test_conv: (int) How often to test for convergnce
          gpu_id: (int) Which GPU device to use
          seed: random seed, if None (default) datetime is used
          init_method: how to initialise basis and coefficient matrices, options are:
            - random (will always be the same if seed != None)
            - NNDSVD
            - NNDSVDa (fill in the zero elements with the average),
            - NNDSVDar (fill in the zero elements with random values in the space [0:average/100]).
          floating_point_precision: (string or type). Can be `double`, `float` or any type/string which
              torch can interpret.
          min_iterations: the minimum number of iterations to execute before termination. Useful when using
              fp32 tensors as convergence can happen too early.
        """
        torch.cuda.set_device(gpu_id)
        
        
        
        if seed is None:
            seed = datetime.now().timestamp()

        if floating_point_precision == 'float':
            self._tensor_type = torch.FloatTensor
        elif floating_point_precision == 'double':
            self._tensor_type = torch.DoubleTensor
        else:
            self._tensor_type = floating_point_precision

        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)

        self.max_iterations = max_iterations
        self.min_iterations = min_iterations

        # If V is not in a batch, put it in a batch of 1
        if len(V.shape) == 2:
            V = V[None, :, :]

        self._V = V.type(self._tensor_type).cuda()
        self._fix_neg = nn.Threshold(0., 1e-8)
        self._tolerance = tolerance
        self._prev_loss = None
        self._iter = 0
        self._test_conv = test_conv
        self._gpu_id = gpu_id
        self._rank = rank
        self._W, self._H = self._initialise_wh(init_method)