Python torch.clamp_() Examples

The following are 7 code examples of torch.clamp_(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: glow_msc.py    From pde-surrogate with MIT License 5 votes vote down vote up
def __init__(self, mean, log_stddev):
        super().__init__()
        self.mean = mean
        self.log_stddev = log_stddev.clamp_(min=-10., max=math.log(5.))
        # self._backward_hook = self.log_stddev.register_hook(
        #     lambda grad: torch.clamp_(grad, -10., 10.)) 
Example #2
Source File: glow_msc.py    From pde-surrogate with MIT License 5 votes vote down vote up
def sample(self, eps=None):
        self.log_stddev.data.clamp_(min=-10., max=math.log(5.))
        if eps is None:
            eps = torch.randn_like(self.log_stddev)
        return self.mean + self.log_stddev.exp() * eps 
Example #3
Source File: glow_msc.py    From pde-surrogate with MIT License 5 votes vote down vote up
def forward(self, x):
        conditions = []
        for i in range(self.num_blocks):
            # denseblock
            x = self[i*2](x)
            conditions.append(x)
            # downsampling, the last one is top_latent
            x = self[i*2+1](x)
            if i == self.num_blocks - 1:
                mean, log_stddev = x.chunk(2, 1)
                log_stddev = log_stddev.data.clamp_(min=-10., max=math.log(5.))
        return conditions, GaussianDiag(mean, log_stddev) 
Example #4
Source File: array.py    From MONAI with Apache License 2.0 5 votes vote down vote up
def __call__(self, img):
        """
        Args:
            img: torch tensor data to extract the contour, with shape: [batch_size, channels, height, width[, depth]]

        Returns:
            A torch tensor with the same shape as img, note:
                1. it's the binary classification result of whether a pixel is edge or not.
                2. in order to keep the original shape of mask image, we use padding as default.
                3. the edge detection is just approximate because it defects inherent to Laplace kernel,
                   ideally the edge should be thin enough, but now it has a thickness.

        """
        channels = img.shape[1]
        if img.ndim == 4:
            kernel = torch.tensor([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype=torch.float32, device=img.device)
            kernel = kernel.repeat(channels, 1, 1, 1)
            contour_img = F.conv2d(img, kernel, bias=None, stride=1, padding=1, dilation=1, groups=channels)
        elif img.ndim == 5:
            kernel = -1 * torch.ones(3, 3, 3, dtype=torch.float32, device=img.device)
            kernel[1, 1, 1] = 26
            kernel = kernel.repeat(channels, 1, 1, 1, 1)
            contour_img = F.conv3d(img, kernel, bias=None, stride=1, padding=1, dilation=1, groups=channels)
        else:
            raise RuntimeError("the dimensions of img should be 4 or 5.")

        torch.clamp_(contour_img, min=0.0, max=1.0)
        return contour_img 
Example #5
Source File: common.py    From Pytorch_Lightweight_Network with MIT License 5 votes vote down vote up
def inverse_sigmoid_(x, eps=1e-6):
    if eps != 0:
        x = torch.clamp_(x, eps, 1 - eps)
    return x.div_(1 - x).log_() 
Example #6
Source File: torch_message.py    From deep_gcns_torch with MIT License 5 votes vote down vote up
def aggregate(self, inputs, index, ptr=None, dim_size=None):

        if self.aggr in ['add', 'mean', 'max', None]:
            return super(GenMessagePassing, self).aggregate(inputs, index, ptr, dim_size)

        elif self.aggr == 'softmax':
            out = scatter_softmax(inputs*self.t, index, dim=self.node_dim)
            out = scatter(inputs*out, index, dim=self.node_dim,
                          dim_size=dim_size, reduce='sum')
            return out

        elif self.aggr == 'softmax_sg':
            with torch.no_grad():
                out = scatter_softmax(inputs*self.t, index, dim=self.node_dim)
            out = scatter(inputs*out, index, dim=self.node_dim,
                          dim_size=dim_size, reduce='sum')
            return out

        elif self.aggr == 'power':
            min_value, max_value = 1e-7, 1e1
            torch.clamp_(inputs, min_value, max_value)
            out = scatter(torch.pow(inputs, self.p), index, dim=self.node_dim,
                          dim_size=dim_size, reduce='mean')
            torch.clamp_(out, min_value, max_value)
            return torch.pow(out, 1/self.p)

        else:
            raise NotImplementedError('To be implemented') 
Example #7
Source File: neural_agent.py    From phyre with Apache License 2.0 5 votes vote down vote up
def refine_actions(model, actions, single_observarion, learning_rate,
                   num_updates, batch_size, refine_loss):
    observations = torch.tensor(single_observarion,
                                device=model.device).unsqueeze(0)
    actions = torch.tensor(actions)

    refined_actions = []
    model.eval()
    preprocessed = model.preprocess(observations)
    preprocessed = {k: v.detach() for k, v in preprocessed.items()}
    for start in range(0, len(actions), batch_size):
        action_batch = actions[start:][:batch_size].to(model.device)
        action_batch = torch.nn.Parameter(action_batch)
        optimizer = torch.optim.Adam([action_batch], lr=learning_rate)
        losses = []
        for _ in range(num_updates):
            optimizer.zero_grad()
            logits = model(None, action_batch, preprocessed=preprocessed)
            if refine_loss == 'ce':
                loss = model.ce_loss(logits, actions.new_ones(len(logits)))
            elif refine_loss == 'linear':
                loss = -logits.sum()
            else:
                raise ValueError(f'Unknown loss: {refine_loss}')
            loss.backward()
            losses.append(loss.item())
            optimizer.step()
        action_batch = torch.clamp_(action_batch.data, 0, 1)
        refined_actions.append(action_batch.cpu().numpy())
    refined_actions = np.concatenate(refined_actions, 0).tolist()
    return refined_actions