Python torch.gels() Examples

The following are 8 code examples of torch.gels(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: baseline.py    From cavia with MIT License 6 votes vote down vote up
def fit(self, episodes):
        # sequence_length * batch_size x feature_size
        featmat = self._feature(episodes).view(-1, self.feature_size)
        # sequence_length * batch_size x 1
        returns = episodes.returns.view(-1, 1)

        reg_coeff = self._reg_coeff
        eye = torch.eye(self.feature_size, dtype=torch.float32,
                        device=self.linear.weight.device)
        for _ in range(5):
            try:
                coeffs, _ = torch.gels(
                    torch.matmul(featmat.t(), returns),
                    torch.matmul(featmat.t(), featmat) + reg_coeff * eye
                )
                break
            except RuntimeError:
                reg_coeff += 10
        else:
            raise RuntimeError('Unable to solve the normal equations in '
                               '`LinearFeatureBaseline`. The matrix X^T*X (with X the design '
                               'matrix) is not full-rank, regardless of the regularization '
                               '(maximum regularization: {0}).'.format(reg_coeff))
        self.linear.weight.data = coeffs.data.t() 
Example #2
Source File: channel.py    From nn-compression with MIT License 5 votes vote down vote up
def weight_reconstruction(next_module, next_input_feature, next_output_feature, cpu=True):
    """
    reconstruct the weight of the next layer to the one being pruned
    :param next_module: torch.nn.module, module of the next layer to the one being pruned
    :param next_input_feature: torch.(cuda.)Tensor, new input feature map of the next layer
    :param next_output_feature: torch.(cuda.)Tensor, original output feature map of the next layer
    :param cpu: bool, whether done in cpu
    :return:
        void
    """
    if next_module.bias is not None:
        bias_size = [1] * next_output_feature.dim()
        bias_size[1] = -1
        next_output_feature -= next_module.bias.view(bias_size)
    if cpu:
        next_input_feature = next_input_feature.cpu()
    if isinstance(next_module, torch.nn.modules.conv._ConvNd):
        unfold = torch.nn.Unfold(kernel_size=next_module.kernel_size,
                                 dilation=next_module.dilation,
                                 padding=next_module.padding,
                                 stride=next_module.stride)
        if not cpu:
            unfold = unfold.cuda()
        unfold.eval()
        next_input_feature = unfold(next_input_feature)
        next_input_feature = next_input_feature.transpose(1, 2)
        num_fields = next_input_feature.size(0) * next_input_feature.size(1)
        next_input_feature = next_input_feature.reshape(num_fields, -1)
        next_output_feature = next_output_feature.view(next_output_feature.size(0), next_output_feature.size(1), -1)
        next_output_feature = next_output_feature.transpose(1, 2).reshape(num_fields, -1)
    if cpu:
        next_output_feature = next_output_feature.cpu()
    param, _ = torch.gels(next_output_feature.data, next_input_feature.data)
    param = param[0:next_input_feature.size(1), :].clone().t().contiguous().view(next_output_feature.size(1), -1)
    if isinstance(next_module, torch.nn.modules.conv._ConvNd):
        param = param.view(next_module.out_channels, next_module.in_channels, *next_module.kernel_size)
    del next_module.weight
    next_module.weight = torch.nn.Parameter(param) 
Example #3
Source File: lighting.py    From hmd with MIT License 5 votes vote down vote up
def RGBalbedoSHToLight(colorImg, albedoImg, SH, confidence_map):
    
    #remove non-zeros [now confidence_map is the more clean] 
    confidence_map[colorImg==0] = 0
    confidence_map[albedoImg==0] = 0
    
    id_non_not = confidence_map.nonzero()
    idx_non = torch.unbind(id_non_not, 1) # this only works for two dimesion
    
    colorImg_non = colorImg[idx_non]
    albedoImg_non = albedoImg[idx_non]
    
    #get the shadingImg element-wise divide
    shadingImg_non = torch.div(colorImg_non, albedoImg_non)    
    shadingImg_non2 = shadingImg_non.view(-1,1)

    #:means 9 channels [get the shading image]
    SH0 = SH[0,:,:]; SH0_non = SH0[idx_non]
    SH1 = SH[1,:,:]; SH1_non = SH1[idx_non]
    SH2 = SH[2,:,:]; SH2_non = SH2[idx_non]
    SH3 = SH[3,:,:]; SH3_non = SH3[idx_non]
    SH4 = SH[4,:,:]; SH4_non = SH4[idx_non]
    SH5 = SH[5,:,:]; SH5_non = SH5[idx_non]
    SH6 = SH[6,:,:]; SH6_non = SH6[idx_non]
    SH7 = SH[7,:,:]; SH7_non = SH7[idx_non]
    SH8 = SH[8,:,:]; SH8_non = SH8[idx_non]
           
    SH_NON = torch.stack([SH0_non, SH1_non, SH2_non, SH3_non, SH4_non, SH5_non, SH6_non, SH7_non, SH8_non], dim=-1)
    
    ## only use the first N soultions if M>N  A(M*N) B(N*K) X should (N*K)[use N if M appears] 
    ## torch.gels(B, A, out=None)  Tensor
    ## https://pytorch.org/docs/stable/torch.html#torch.gels  
    light, _ = torch.gels(shadingImg_non2, SH_NON)  
    light_9 = light[0:9] # use first 9

    return (light_9, SH) 
Example #4
Source File: robotics.py    From cherry with Apache License 2.0 5 votes vote down vote up
def fit(self, states, returns):
        features = self._features(states)
        reg = self.reg * th.eye(features.size(1))
        reg = reg.to(states.device)
        A = features.t() @ features + reg
        b = features.t() @ returns
        if hasattr(th, 'lstsq'):  # Required for torch < 1.3.0
            coeffs, _ = th.lstsq(b, A)
        else:
            coeffs, _ = th.gels(b, A)
        self.linear.weight.data = coeffs.data.t() 
Example #5
Source File: affine_utils.py    From AutoDL-Projects with MIT License 5 votes vote down vote up
def solve2theta(source, target):
  source, target = source.clone(), target.clone()
  oks = source[2, :] == 1
  assert torch.sum(oks).item() >= 3, 'valid points : {:} is short'.format(oks)
  if target.size(0) == 2: target = torch.cat((target, oks.unsqueeze(0).float()), dim=0)
  source, target = source[:, oks], target[:, oks]
  source, target = source.transpose(1,0), target.transpose(1,0)
  assert source.size(1) == target.size(1) == 3
  #X, residual, rank, s = np.linalg.lstsq(target.numpy(), source.numpy())
  #theta = torch.Tensor(X.T[:2, :])
  X_, qr = torch.gels(source, target)
  theta = X_[:3, :2].transpose(1, 0)
  return theta

# shape = [H,W] 
Example #6
Source File: networks.py    From CGIntrinsics with MIT License 5 votes vote down vote up
def LinearScaleInvarianceFramework(self, prediction, gt, mask, w_grad):

        assert(prediction.size(1) == gt.size(1))
        assert(prediction.size(1) == mask.size(1))

        w_data = 1.0
        # w_grad = 0.5
        gt_vec = gt[mask > 0.1]
        pred_vec = prediction[mask > 0.1]
        gt_vec = gt_vec.unsqueeze(1).float().cpu()
        pred_vec = pred_vec.unsqueeze(1).float().cpu()

        scale, _ = torch.gels(gt_vec.data, pred_vec.data)
        scale = scale[0,0]

        # print("scale" , scale)
        # sys.exit()
        prediction_scaled = prediction * scale
        final_loss = w_data * self.L2Loss(prediction_scaled, mask, gt) 

        prediction_1 = prediction_scaled[:,:,::2,::2]
        prediction_2 = prediction_1[:,:,::2,::2]
        prediction_3 = prediction_2[:,:,::2,::2]

        mask_1 = mask[:,:,::2,::2]
        mask_2 = mask_1[:,:,::2,::2]
        mask_3 = mask_2[:,:,::2,::2]

        gt_1 = gt[:,:,::2,::2]
        gt_2 = gt_1[:,:,::2,::2]
        gt_3 = gt_2[:,:,::2,::2]

        final_loss += w_grad * self.L1GradientMatchingLoss(prediction_scaled , mask, gt) 
        final_loss += w_grad * self.L1GradientMatchingLoss(prediction_1, mask_1, gt_1)
        final_loss += w_grad * self.L1GradientMatchingLoss(prediction_2, mask_2, gt_2)
        final_loss += w_grad * self.L1GradientMatchingLoss(prediction_3, mask_3, gt_3)

        return final_loss 
Example #7
Source File: eval_utils.py    From SDPS-Net with MIT License 5 votes vote down vote up
def calIntsAcc(gt_i, pred_i, data_batch=1):
    n, c, h, w = gt_i.shape
    pred_i  = pred_i.view(n, c, h, w)
    ref_int = gt_i[:, :3].repeat(1, gt_i.shape[1] // 3, 1, 1)
    gt_i  = gt_i / ref_int
    scale = torch.gels(gt_i.view(-1, 1), pred_i.view(-1, 1))
    ints_ratio = (gt_i - scale[0][0] * pred_i).abs() / (gt_i + 1e-8)
    ints_error = torch.stack(ints_ratio.split(3, 1), 1).mean(2)
    return {'ints_ratio': ints_ratio.mean().item()}, ints_error.squeeze() 
Example #8
Source File: networks.py    From CGIntrinsics with MIT License 4 votes vote down vote up
def WeightedLinearScaleInvarianceFramework(self, prediction, gt, mask, w_grad):
        w_data = 1.0

        assert(prediction.size(1) == gt.size(1))
        assert(prediction.size(1) == mask.size(1))

        if torch.sum(mask.data) < 10:
            return 0

        # w_grad = 0.5
        gt_vec = gt[mask > 0.1]
        pred_vec = prediction[mask > 0.1]
        gt_vec = gt_vec.unsqueeze(1).float().cpu()
        pred_vec = pred_vec.unsqueeze(1).float().cpu()

        scale, _ = torch.gels(gt_vec.data, pred_vec.data)
        scale = scale[0,0]

        prediction_scaled = prediction * scale
        
        ones_matrix = Variable(torch.zeros(gt.size(0), gt.size(1), gt.size(2), gt.size(3)) + 1, requires_grad = False)
        weight = torch.min(1/gt,  ones_matrix.float().cuda())
        weight_mask = torch.mul(weight, mask)
        
        final_loss = w_data * self.L2Loss(prediction_scaled, weight_mask, gt)

        prediction_1 = prediction_scaled[:,:,::2,::2]
        prediction_2 = prediction_1[:,:,::2,::2]
        prediction_3 = prediction_2[:,:,::2,::2]

        mask_1 = weight_mask[:,:,::2,::2]
        mask_2 = mask_1[:,:,::2,::2]
        mask_3 = mask_2[:,:,::2,::2]

        gt_1 = gt[:,:,::2,::2]
        gt_2 = gt_1[:,:,::2,::2]
        gt_3 = gt_2[:,:,::2,::2]

        final_loss += w_grad * self.L1GradientMatchingLoss(prediction_scaled , weight_mask, gt) 
        final_loss += w_grad * self.L1GradientMatchingLoss(prediction_1, mask_1, gt_1)
        final_loss += w_grad * self.L1GradientMatchingLoss(prediction_2, mask_2, gt_2)
        final_loss += w_grad * self.L1GradientMatchingLoss(prediction_3, mask_3, gt_3)

        return final_loss