Python torch.det() Examples

The following are 30 code examples of torch.det(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: dice_pytorch.py    From DiCE with MIT License 6 votes vote down vote up
def dpp_style(self, submethod):
        """Computes the DPP of a matrix."""
        det_entries = torch.ones((self.total_CFs, self.total_CFs))
        if submethod == "inverse_dist":
            for i in range(self.total_CFs):
                for j in range(self.total_CFs):
                    det_entries[(i,j)] = 1.0/(1.0 + self.compute_dist(self.cfs[i], self.cfs[j]))
                    if i == j:
                        det_entries[(i,j)] += 0.0001

        elif submethod == "exponential_dist":
            for i in range(self.total_CFs):
                for j in range(self.total_CFs):
                    det_entries[(i,j)] = 1.0/(torch.exp(self.compute_dist(self.cfs[i], self.cfs[j])))
                    if i == j:
                        det_entries[(i,j)] += 0.0001

        diversity_loss = torch.det(det_entries)
        return diversity_loss 
Example #2
Source File: loss.py    From 6-PACK with MIT License 6 votes vote down vote up
def estimate_pose(self, pt0, pt1):
        pconf2 = self.pconf.view(1, self.num_key, 1)
        cent0 = torch.sum(pt0 * pconf2, dim=1).repeat(1, self.num_key, 1).contiguous()
        cent1 = torch.sum(pt1 * pconf2, dim=1).repeat(1, self.num_key, 1).contiguous()

        diag_mat = torch.diag(self.pconf).unsqueeze(0)
        x = (pt0 - cent0).transpose(2, 1).contiguous()
        y = pt1 - cent1

        pred_t = cent1 - cent0

        cov = torch.bmm(torch.bmm(x, diag_mat), y).contiguous().squeeze(0)

        u, _, v = torch.svd(cov)

        u = u.transpose(1, 0).contiguous()
        d = torch.det(torch.mm(v, u)).contiguous().view(1, 1, 1).contiguous()
        u = u.transpose(1, 0).contiguous().unsqueeze(0)

        ud = torch.cat((u[:, :, :-1], u[:, :, -1:] * d), dim=2)
        v = v.transpose(1, 0).contiguous().unsqueeze(0)

        pred_r = torch.bmm(ud, v).transpose(2, 1).contiguous()
        return pred_r, pred_t[:, 0, :].view(1, 3) 
Example #3
Source File: glow.py    From waveglow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, c):
        super(Invertible1x1Conv, self).__init__()
        self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
                                    bias=False)

        # Sample a random orthonormal matrix to initialize weights
        W = torch.qr(torch.FloatTensor(c, c).normal_())[0]

        # Ensure determinant is 1.0 not -1.0
        if torch.det(W) < 0:
            W[:,0] = -1*W[:,0]
        W = W.view(c, c, 1)
        self.conv.weight.data = W 
Example #4
Source File: utils.py    From vidreid_cosegmentation with Apache License 2.0 5 votes vote down vote up
def get_gaussian_kernel(channels, kernel_size=5, mean=0, sigma=[1, 4]):
    # CONVERT INTO NP ARRAY
    sigma_ = torch.zeros((2, 2)).float()
    sigma_[0, 0] = sigma[0]
    sigma_[1, 1] = sigma[1]
    sigma = sigma_

    # Create a x, y coordinate grid of shape (kernel_size, kernel_size, 2)
    x_cord = torch.linspace(-1, 1, kernel_size)
    x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size)
    y_grid = x_grid.t()
    xy_grid = torch.stack([x_grid, y_grid], dim=-1).float()

    variance = (sigma @ sigma.t()).float()
    inv_variance = torch.inverse(variance)

    # Calculate the 2-dimensional gaussian kernel which is
    # the product of two gaussian distributions for two different
    # variables (in this case called x and y)
    gaussian_kernel = (1.0 / (2.0 * math.pi * torch.det(variance))) * torch.exp(
        -torch.sum(
            ((xy_grid - mean) @ inv_variance.unsqueeze(0)) * (xy_grid - mean), dim=-1
        )
        / 2
    )

    # Make sure sum of values in gaussian kernel equals 1.
    gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)

    # Reshape to 2d depthwise convolutional weight
    gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size)
    gaussian_kernel = gaussian_kernel.repeat(1, channels, 1, 1)
    return gaussian_kernel 
Example #5
Source File: glow.py    From Tacotron2-Mandarin with MIT License 5 votes vote down vote up
def __init__(self, c):
        super(Invertible1x1Conv, self).__init__()
        self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
                                    bias=False)

        # Sample a random orthonormal matrix to initialize weights
        W = torch.qr(torch.FloatTensor(c, c).normal_())[0]

        # Ensure determinant is 1.0 not -1.0
        if torch.det(W) < 0:
            W[:, 0] = -1 * W[:, 0]
        W = W.view(c, c, 1)
        self.conv.weight.data = W 
Example #6
Source File: smpl_param_regressor.py    From GraphCMR with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def forward(self, x):
        """Forward pass.
        Input:
            x: size = (B, 1723*6)
        Returns:
            SMPL pose parameters as rotation matrices: size = (B,24,3,3)
            SMPL shape parameters: size = (B,10)
        """
        batch_size = x.shape[0]
        x = x.view(batch_size, -1)
        x = self.layers(x)
        rotmat = x[:, :24*3*3].view(-1, 24, 3, 3).contiguous()
        betas = x[:, 24*3*3:].contiguous()
        rotmat = rotmat.view(-1, 3, 3).contiguous()
        orig_device = rotmat.device
        if self.use_cpu_svd:
            rotmat = rotmat.cpu()
        U, S, V = batch_svd(rotmat)

        rotmat = torch.matmul(U, V.transpose(1,2))
        det = torch.zeros(rotmat.shape[0], 1, 1).to(rotmat.device)
        with torch.no_grad():
            for i in range(rotmat.shape[0]):
                det[i] = torch.det(rotmat[i])
        rotmat = rotmat * det
        rotmat = rotmat.view(batch_size, 24, 3, 3)
        rotmat = rotmat.to(orig_device)
        return rotmat, betas 
Example #7
Source File: glow.py    From fac-via-ppg with Apache License 2.0 5 votes vote down vote up
def __init__(self, c):
        super(Invertible1x1Conv, self).__init__()
        self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
                                    bias=False)

        # Sample a random orthonormal matrix to initialize weights
        W = torch.qr(torch.FloatTensor(c, c).normal_())[0]

        # Ensure determinant is 1.0 not -1.0
        if torch.det(W) < 0:
            W[:,0] = -1*W[:,0]
        W = W.view(c, c, 1)
        self.conv.weight.data = W 
Example #8
Source File: glow.py    From ffjord with MIT License 5 votes vote down vote up
def _logdetgrad(self):
        return torch.log(torch.abs(torch.det(self.weight.double()))).float() 
Example #9
Source File: arbitraryRotate.py    From NeuralRG with Apache License 2.0 5 votes vote down vote up
def forward(self,z):
        w_ = torch.inverse(self.w)
        forwardLogjac = torch.log(torch.abs(torch.det(w_)))*z.shape[-1]*z.shape[-2]*torch.ones(z.shape[0])
        print(w_)

        z = torch.matmul(z.permute([0,2,3,1]),w_.reshape(1,1,*w_.shape)).permute(0,3,1,2)
        return z,forwardLogjac 
Example #10
Source File: arbitraryRotate.py    From NeuralRG with Apache License 2.0 5 votes vote down vote up
def inverse(self,y):
        inverseLogjac = torch.log(torch.abs(torch.det(self.w)))*y.shape[-1]*y.shape[-2]*torch.ones(y.shape[0])
        print(self.w)

        y = torch.matmul(y.permute([0,2,3,1]),self.w.reshape(1,1,*self.w.shape)).permute(0,3,1,2)
        return y,inverseLogjac 
Example #11
Source File: glow.py    From FastSpeech with MIT License 5 votes vote down vote up
def __init__(self, c):
        super(Invertible1x1Conv, self).__init__()
        self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
                                    bias=False)

        # Sample a random orthonormal matrix to initialize weights
        W = torch.qr(torch.FloatTensor(c, c).normal_())[0]

        # Ensure determinant is 1.0 not -1.0
        if torch.det(W) < 0:
            W[:, 0] = -1*W[:, 0]
        W = W.view(c, c, 1)
        self.conv.weight.data = W 
Example #12
Source File: glow.py    From FastSpeech with MIT License 5 votes vote down vote up
def __init__(self, c):
        super(Invertible1x1Conv, self).__init__()
        self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
                                    bias=False)

        # Sample a random orthonormal matrix to initialize weights
        W = torch.qr(torch.FloatTensor(c, c).normal_())[0]

        # Ensure determinant is 1.0 not -1.0
        if torch.det(W) < 0:
            W[:,0] = -1*W[:,0]
        W = W.view(c, c, 1)
        self.conv.weight.data = W 
Example #13
Source File: echo_state_network.py    From pytorch-esn with MIT License 5 votes vote down vote up
def fit(self):
        if self.readout_training in {'gd', 'svd'}:
            return

        if self.readout_training == 'cholesky':
            W = torch.solve(self.XTy,
                           self.XTX + self.lambda_reg * torch.eye(
                               self.XTX.size(0), device=self.XTX.device))[0].t()
            self.XTX = None
            self.XTy = None

            self.readout.bias = nn.Parameter(W[:, 0])
            self.readout.weight = nn.Parameter(W[:, 1:])
        elif self.readout_training == 'inv':
            I = (self.lambda_reg * torch.eye(self.XTX.size(0))).to(
                self.XTX.device)
            A = self.XTX + I

            if torch.det(A) != 0:
                W = torch.mm(torch.inverse(A), self.XTy).t()
            else:
                pinv = torch.pinverse(A)
                W = torch.mm(pinv, self.XTy).t()

            self.readout.bias = nn.Parameter(W[:, 0])
            self.readout.weight = nn.Parameter(W[:, 1:])

            self.XTX = None
            self.XTy = None 
Example #14
Source File: waveglow.py    From NeMo with Apache License 2.0 5 votes vote down vote up
def __init__(self, c):
        super(Invertible1x1Conv, self).__init__()
        self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0, bias=False)

        # Sample a random orthonormal matrix to initialize weights
        W = torch.qr(torch.FloatTensor(c, c).normal_())[0]

        # Ensure determinant is 1.0 not -1.0
        if torch.det(W) < 0:
            W[:, 0] = -1 * W[:, 0]
        W = W.view(c, c, 1)
        self.conv.weight.data = W 
Example #15
Source File: basic_utils.py    From PVN3D with MIT License 5 votes vote down vote up
def best_fit_transform_torch(self, A, B):
        '''
        Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
        Input:
            A: Nxm numpy array of corresponding points, usually points on mdl
            B: Nxm numpy array of corresponding points, usually points on camera axis
        Returns:
        T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
        R: mxm rotation matrix
        t: mx1 translation vector
        '''
        assert A.size() == B.size()
        # get number of dimensions
        m = A.size()[1]
        # translate points to their centroids
        centroid_A = torch.mean(A, dim=0)
        centroid_B = torch.mean(B, dim=0)
        AA = A - centroid_A
        BB = B - centroid_B
        # rotation matirx
        H = torch.mm(AA.transpose(1, 0), BB)
        U, S, Vt = torch.svd(H)
        R = torch.mm(Vt.transpose(1, 0), U.transpose(1, 0))
        # special reflection case
        if torch.det(R) < 0:
            Vt[m-1, :] *= -1
            R = torch.mm(Vt.transpose(1, 0), U.transpose(1, 0))
        # translation
        t = centroid_B - torch.mm(R, centroid_A.view(3, 1))[:, 0]
        T = torch.zeros(3, 4).cuda()
        T[:, :3] = R
        T[:, 3] = t
        return  T 
Example #16
Source File: basic_utils.py    From PVN3D with MIT License 5 votes vote down vote up
def best_fit_transform(A, B):
    '''
    Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
    Input:
        A: Nxm numpy array of corresponding points, usually points on mdl
        B: Nxm numpy array of corresponding points, usually points on camera axis
    Returns:
    T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
    R: mxm rotation matrix
    t: mx1 translation vector
    '''

    assert A.shape == B.shape
    # get number of dimensions
    m = A.shape[1]
    # translate points to their centroids
    centroid_A = np.mean(A, axis=0)
    centroid_B = np.mean(B, axis=0)
    AA = A - centroid_A
    BB = B - centroid_B
    # rotation matirx
    H = np.dot(AA.T, BB)
    U, S, Vt = np.linalg.svd(H)
    R = np.dot(Vt.T, U.T)
    # special reflection case
    if np.linalg.det(R) < 0:
        Vt[m-1, :] *= -1
        R = np.dot(Vt.T, U.T)
    # translation
    t = centroid_B.T - np.dot(R, centroid_A.T)
    T = np.zeros((3, 4))
    T[:, :3] = R
    T[:, 3] = t
    return  T 
Example #17
Source File: glow.py    From residual-flows with MIT License 5 votes vote down vote up
def _logdetgrad(self):
        return torch.log(torch.abs(torch.det(self.weight))) 
Example #18
Source File: glow.py    From LightSpeech with MIT License 5 votes vote down vote up
def __init__(self, c):
        super(Invertible1x1Conv, self).__init__()
        self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
                                    bias=False)

        # Sample a random orthonormal matrix to initialize weights
        W = torch.qr(torch.FloatTensor(c, c).normal_())[0]

        # Ensure determinant is 1.0 not -1.0
        if torch.det(W) < 0:
            W[:,0] = -1*W[:,0]
        W = W.view(c, c, 1)
        self.conv.weight.data = W 
Example #19
Source File: glow.py    From LightSpeech with MIT License 5 votes vote down vote up
def __init__(self, c):
        super(Invertible1x1Conv, self).__init__()
        self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
                                    bias=False)

        # Sample a random orthonormal matrix to initialize weights
        W = torch.qr(torch.FloatTensor(c, c).normal_())[0]

        # Ensure determinant is 1.0 not -1.0
        if torch.det(W) < 0:
            W[:,0] = -1*W[:,0]
        W = W.view(c, c, 1)
        self.conv.weight.data = W 
Example #20
Source File: utils_torch_filter.py    From ai-imu-dr with MIT License 5 votes vote down vote up
def normalize_rot(rot):
        # U, S, V = torch.svd(A) returns the singular value
        # decomposition of a real matrix A of size (n x m) such that A=USVā€².
        # Irrespective of the original strides, the returned matrix U will
        # be transposed, i.e. with strides (1, n) instead of (n, 1).

        # pytorch SVD seems to be inaccurate, so just move to numpy immediately
        U, _, V = torch.svd(rot)
        S = torch.eye(3).double()
        S[2, 2] = torch.det(U) * torch.det(V)
        return U.mm(S).mm(V.t()) 
Example #21
Source File: rot6d.py    From manopth with GNU General Public License v3.0 5 votes vote down vote up
def robust_compute_rotation_matrix_from_ortho6d(poses):
    """
    Instead of making 2nd vector orthogonal to first
    create a base that takes into account the two predicted
    directions equally
    """
    x_raw = poses[:, 0:3]  # batch*3
    y_raw = poses[:, 3:6]  # batch*3

    x = normalize_vector(x_raw)  # batch*3
    y = normalize_vector(y_raw)  # batch*3
    middle = normalize_vector(x + y)
    orthmid = normalize_vector(x - y)
    x = normalize_vector(middle + orthmid)
    y = normalize_vector(middle - orthmid)
    # Their scalar product should be small !
    # assert torch.einsum("ij,ij->i", [x, y]).abs().max() < 0.00001
    z = normalize_vector(cross_product(x, y))

    x = x.view(-1, 3, 1)
    y = y.view(-1, 3, 1)
    z = z.view(-1, 3, 1)
    matrix = torch.cat((x, y, z), 2)  # batch*3*3
    # Check for reflection in matrix ! If found, flip last vector TODO
    assert (torch.stack([torch.det(mat) for mat in matrix ])< 0).sum() == 0
    return matrix 
Example #22
Source File: framework.py    From FrEIA with MIT License 5 votes vote down vote up
def log_jacobian_numerical(self, x, c=None, rev=False, h=1e-04):
        '''Approximate log Jacobian determinant via finite differences.'''
        if isinstance(x, (list, tuple)):
            batch_size = x[0].shape[0]
            ndim_x_separate = [np.prod(x_i.shape[1:]) for x_i in x]
            ndim_x_total = sum(ndim_x_separate)
            x_flat = torch.cat([x_i.view(batch_size, -1) for x_i in x], dim=1)
        else:
            batch_size = x.shape[0]
            ndim_x_total = np.prod(x.shape[1:])
            x_flat = x.reshape(batch_size, -1)

        J_num = torch.zeros(batch_size, ndim_x_total, ndim_x_total)
        for i in range(ndim_x_total):
            offset = x[0].new_zeros(batch_size, ndim_x_total)
            offset[:,i] = h
            if isinstance(x, (list, tuple)):
                x_upper = torch.split(x_flat + offset, ndim_x_separate, dim=1)
                x_upper = [x_upper[i].view(*x[i].shape) for i in range(len(x))]
                x_lower = torch.split(x_flat - offset, ndim_x_separate, dim=1)
                x_lower = [x_lower[i].view(*x[i].shape) for i in range(len(x))]
            else:
                x_upper = (x_flat + offset).view(*x.shape)
                x_lower = (x_flat - offset).view(*x.shape)
            y_upper = self.forward(x_upper, c=c)
            y_lower = self.forward(x_lower, c=c)
            if isinstance(y_upper, (list, tuple)):
                y_upper = torch.cat([y_i.view(batch_size, -1) for y_i in y_upper], dim=1)
                y_lower = torch.cat([y_i.view(batch_size, -1) for y_i in y_lower], dim=1)
            J_num[:,:,i] = (y_upper - y_lower).view(batch_size, -1) / (2*h)
        logdet_num = x[0].new_zeros(batch_size)
        for i in range(batch_size):
            logdet_num[i] = torch.det(J_num[i,:,:]).abs().log()

        return logdet_num 
Example #23
Source File: module.py    From pytorch-glow with MIT License 5 votes vote down vote up
def forward(self, x, logdet=None, reverse=False):
        """

        :param x: input
        :type x: torch.Tensor
        :param logdet: log determinant
        :type logdet:
        :param reverse: whether to reverse bias
        :type reverse: bool
        :return: output and logdet
        :rtype: tuple(torch.Tensor, torch.Tensor)
        """
        logdet_factor = ops.count_pixels(x)  # H * W
        dlogdet = torch.log(torch.abs(torch.det(self.weight))) * logdet_factor
        if not reverse:
            weight = self.weight.view(*self.weight.shape, 1, 1)
            z = F.conv2d(x, weight)
            if logdet is not None:
                logdet = logdet + dlogdet
            return z, logdet
        else:
            weight = self.weight.inverse().view(*self.weight.shape, 1, 1)
            z = F.conv2d(x, weight)
            if logdet is not None:
                logdet = logdet - dlogdet
            return z, logdet 
Example #24
Source File: loss.py    From 6-PACK with MIT License 5 votes vote down vote up
def estimate_rotation(self, pt0, pt1, sym_or_not):
        pconf2 = self.pconf.view(1, self.num_key, 1)
        cent0 = torch.sum(pt0 * pconf2, dim=1).repeat(1, self.num_key, 1).contiguous()
        cent1 = torch.sum(pt1 * pconf2, dim=1).repeat(1, self.num_key, 1).contiguous()

        diag_mat = torch.diag(self.pconf).unsqueeze(0)
        x = (pt0 - cent0).transpose(2, 1).contiguous()
        y = pt1 - cent1

        pred_t = cent1 - cent0

        cov = torch.bmm(torch.bmm(x, diag_mat), y).contiguous().squeeze(0)

        u, _, v = torch.svd(cov)

        u = u.transpose(1, 0).contiguous()
        d = torch.det(torch.mm(v, u)).contiguous().view(1, 1, 1).contiguous()
        u = u.transpose(1, 0).contiguous().unsqueeze(0)

        ud = torch.cat((u[:, :, :-1], u[:, :, -1:] * d), dim=2)
        v = v.transpose(1, 0).contiguous().unsqueeze(0)

        pred_r = torch.bmm(ud, v).transpose(2, 1).contiguous()

        if sym_or_not:
            pred_r = torch.bmm(pred_r, self.sym_axis).contiguous().view(-1).contiguous()

        return pred_r 
Example #25
Source File: glow.py    From tn2-wg with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, c):
        super(Invertible1x1Conv, self).__init__()
        self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
                                    bias=False)

        # Sample a random orthonormal matrix to initialize weights
        W = torch.qr(torch.FloatTensor(c, c).normal_())[0]

        # Ensure determinant is 1.0 not -1.0
        if torch.det(W) < 0:
            W[:,0] = -1*W[:,0]
        W = W.view(c, c, 1)
        self.conv.weight.data = W 
Example #26
Source File: glow_msc.py    From pde-surrogate with MIT License 5 votes vote down vote up
def forward(self, y, x, return_eps=False):
        """ p(y|x) where (x, y) are pair of input & output
        y --> z, evaluate det(dz/dy) and p(z|x) --> p(y|x)

        Args:
            y (Tensor): output
            x (Tensor): input

        Returns:
            z, logp(y|x), eps_list (None if return_eps is False)
        """
        logdet = 0.
        # list of conditioning features at different scales, and conditional prior
        conditions, cond_prior = self.encoder(x)
        eps_list = []
        for i, module in enumerate(self.flow._modules.values()):
            if i == 0:
                # first revblock, no squeeze and split
                y, dlogdet = module(y, conditions[i])
            elif i == len(self.flow_blocks) - 1:
                # last revblock, top latent
                y, dlogdet, _ = module(y, conditions[i])
                log_prior = cond_prior.log_prob(y)
                if return_eps:
                    eps = (y - cond_prior.mean) / cond_prior.log_stddev.exp()
                    eps_list.append(eps)
                logdet = logdet + log_prior
            else:
                # middel revblocks, squeeze and split latent
                y, dlogdet, eps = module(y, conditions[i], return_eps=return_eps)
                if return_eps:
                    eps_list.append(eps)
            logdet = logdet + dlogdet
        # y is actually z, latent
        if return_eps:
            return y, logdet, eps_list
        else:
            return y, logdet, None 
Example #27
Source File: glow_msc.py    From pde-surrogate with MIT License 5 votes vote down vote up
def log_determinant(self, x, W):
        h, w = x.shape[2:]
        det = torch.det(W.to(torch.float64)).to(torch.float32)
        if det.item() == 0:
            det += 1e-6
        return h * w * det.abs().log() 
Example #28
Source File: modules.py    From waveglow with Apache License 2.0 5 votes vote down vote up
def get_weight(self, input, reverse):
        w_shape = self.w_shape
        dlogdet = torch.log(torch.abs(torch.det(self.weight))) * input.size(-1)
        if not reverse:
            weight = self.weight.view(w_shape[0], w_shape[1], 1)
        else:
            weight = torch.inverse(self.weight).view(w_shape[0], w_shape[1], 1)
        return weight, dlogdet 
Example #29
Source File: glow.py    From residual-flows with MIT License 5 votes vote down vote up
def _logdetgrad(self):
        return torch.log(torch.abs(torch.det(self.weight))) 
Example #30
Source File: evaluation_matrix.py    From DSD-SATN with Apache License 2.0 4 votes vote down vote up
def p_mpjpe(predicted, target, with_sRt=False,full_torch=False,with_aligned=False,each_separate=False):
    """
    Pose error: MPJPE after rigid alignment (scale, rotation, and translation),
    often referred to as "Protocol #2" in many papers.
    """
    assert predicted.shape == target.shape

    muX = np.mean(target, axis=1, keepdims=True)
    muY = np.mean(predicted, axis=1, keepdims=True)

    X0 = target - muX
    Y0 = predicted - muY
    '''
    if (X0**2<1e-10).sum()>0 or (X0**2>1e10).sum()>0:
        print('Error !')
        print(X0[X0**2<1e-10],X0[X0**2>1e10])
        print(predicted[X0**2<1e-10],predicted[X0**2>1e10])
        return 1.,(np.ones(3),np.ones((3,3)),np.ones(3))
    '''
    normX = np.sqrt(np.sum(X0**2, axis=(1, 2), keepdims=True))
    normY = np.sqrt(np.sum(Y0**2, axis=(1, 2), keepdims=True))

    X0 /= (normX+1e-6)
    Y0 /= (normY+1e-6)


    H = np.matmul(X0.transpose(0, 2, 1), Y0).astype(np.float16).astype(np.float64)
    U, s, Vt = np.linalg.svd(H)
    V = Vt.transpose(0, 2, 1)
    R = np.matmul(V, U.transpose(0, 2, 1))

    # Avoid improper rotations (reflections), i.e. rotations with det(R) = -1
    sign_detR = np.sign(np.expand_dims(np.linalg.det(R), axis=1))
    V[:, :, -1] *= sign_detR
    s[:, -1] *= sign_detR.flatten()
    R = np.matmul(V, U.transpose(0, 2, 1)) # Rotation

    tr = np.expand_dims(np.sum(s, axis=1, keepdims=True), axis=2)

    a = tr * normX / normY # Scale
    t = muX - a*np.matmul(muY, R) # Translation

    # Perform rigid transformation on the input
    predicted_aligned = a*np.matmul(predicted, R) + t

    if with_sRt and not with_aligned:
        return np.mean(np.linalg.norm(predicted_aligned - target, axis=len(target.shape)-1)),(a,R,t)
    if with_aligned:
        return np.mean(np.linalg.norm(predicted_aligned - target, axis=len(target.shape)-1)),(a,R,t),predicted_aligned
    if each_separate:
        return np.linalg.norm(predicted_aligned - target, axis=len(target.shape)-1)
    # Return MPJPE
    return np.mean(np.linalg.norm(predicted_aligned - target, axis=len(target.shape)-1))