Python torch.svd() Examples

The following are 30 code examples of torch.svd(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: test_manifold_basic.py    From geoopt with Apache License 2.0 6 votes vote down vote up
def euclidean_stiefel_case():
    torch.manual_seed(42)
    shape = manifold_shapes[geoopt.manifolds.EuclideanStiefel]
    ex = torch.randn(*shape, dtype=torch.float64)
    ev = torch.randn(*shape, dtype=torch.float64)
    u, _, v = torch.svd(ex)
    x = u @ v.t()
    nonsym = x.t() @ ev
    v = ev - x @ (nonsym + nonsym.t()) / 2

    manifold = geoopt.manifolds.EuclideanStiefel()
    x = geoopt.ManifoldTensor(x, manifold=manifold)
    case = UnaryCase(shape, x, ex, v, ev, manifold)
    yield case
    manifold = geoopt.manifolds.EuclideanStiefelExact()
    x = geoopt.ManifoldTensor(x, manifold=manifold)
    case = UnaryCase(shape, x, ex, v, ev, manifold)
    yield case 
Example #2
Source File: regularizations.py    From incremental_learning.pytorch with MIT License 6 votes vote down vote up
def spectral_restricted_isometry_property_regularization(weights, config):
    """Requires that every set of columns of the weights, with cardinality no
    larger than k, shall behave like an orthogonal system.

    Also called SRIP.

    References:
        * Can We Gain More from Orthogonality Regularizations in Training Deep CNNs?
          Bansal et al.
          NeurIPS 2018

    :param weights: Learned parameters of shape (n_classes, n_features).
    :return: A float scalar loss.
    """
    wTw = torch.mm(weights.t(), weights)
    x = wTw - torch.eye(wTw.shape[0]).to(weights.device)

    _, s, _ = torch.svd(x)

    loss = s[0]
    return config["factor"] * loss 
Example #3
Source File: atom.py    From pytracking with GNU General Public License v3.0 6 votes vote down vote up
def init_projection_matrix(self, x):
        # Set if using projection matrix
        self.params.use_projection_matrix = self.params.get('use_projection_matrix', True)

        if self.params.use_projection_matrix:
            self.compressed_dim = self.fparams.attribute('compressed_dim', None)

            proj_init_method = self.params.get('proj_init_method', 'pca')
            if proj_init_method == 'pca':
                x_mat = TensorList([e.permute(1, 0, 2, 3).reshape(e.shape[1], -1).clone() for e in x])
                x_mat -= x_mat.mean(dim=1, keepdim=True)
                cov_x = x_mat @ x_mat.t()
                self.projection_matrix = TensorList(
                    [None if cdim is None else torch.svd(C)[0][:, :cdim].t().unsqueeze(-1).unsqueeze(-1).clone() for C, cdim in
                     zip(cov_x, self.compressed_dim)])
            elif proj_init_method == 'randn':
                self.projection_matrix = TensorList(
                    [None if cdim is None else ex.new_zeros(cdim,ex.shape[1],1,1).normal_(0,1/math.sqrt(ex.shape[1])) for ex, cdim in
                     zip(x, self.compressed_dim)])
        else:
            self.compressed_dim = x.size(1)
            self.projection_matrix = TensorList([None]*len(x)) 
Example #4
Source File: utils.py    From LinearStyleTransfer with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def whiten(cF):
    cFSize = cF.size()
    c_mean = torch.mean(cF,1) # c x (h x w)
    c_mean = c_mean.unsqueeze(1).expand_as(cF)
    cF = cF - c_mean

    contentConv = torch.mm(cF,cF.t()).div(cFSize[1]-1) + torch.eye(cFSize[0]).double()
    c_u,c_e,c_v = torch.svd(contentConv,some=False)

    k_c = cFSize[0]
    for i in range(cFSize[0]):
        if c_e[i] < 0.00001:
            k_c = i
            break

    c_d = (c_e[0:k_c]).pow(-0.5)
    step1 = torch.mm(c_v[:,0:k_c],torch.diag(c_d))
    step2 = torch.mm(step1,(c_v[:,0:k_c].t()))
    whiten_cF = torch.mm(step2,cF)
    return whiten_cF 
Example #5
Source File: _base.py    From liegroups with MIT License 6 votes vote down vote up
def _normalize_one(self, mat):
        # U, S, V = torch.svd(A) returns the singular value
        # decomposition of a real matrix A of size (n x m) such that A=USVā€².
        # Irrespective of the original strides, the returned matrix U will
        # be transposed, i.e. with strides (1, n) instead of (n, 1).

        # pytorch has native SVD function but not determinant...
        # U, _, V = mat.squeeze().svd()
        # S = torch.eye(self.dim)
        # if U.is_cuda:
        #     S = S.cuda()
        # S[self.dim - 1, self.dim - 1] = float(np.linalg.det(U.cpu().numpy()) *
        #                                       np.linalg.det(V.cpu().numpy()))
        # mat_normalized = U.mm(S).mm(V.t_())

        # pytorch SVD seems to be inaccurate, so just move to numpy immediately
        mat_cpu = mat.detach().cpu().numpy().squeeze()
        U, _, V = np.linalg.svd(mat_cpu, full_matrices=False)
        S = np.eye(self.dim)
        S[self.dim - 1, self.dim - 1] = np.linalg.det(U) * np.linalg.det(V)

        mat_normalized = mat.__class__(U.dot(S).dot(V))

        mat.copy_(mat_normalized)
        return mat 
Example #6
Source File: utils.py    From pyvot with MIT License 6 votes vote down vote up
def rigid_transform_3d_pytorch(p1, p2):
    center_p1 = torch.mean(p1, dim=0, keepdim=True)
    center_p2 = torch.mean(p2, dim=0, keepdim=True)

    pp1 = p1 - center_p1
    pp2 = p2 - center_p2

    h = torch.mm(pp1.t(), pp2)
    u, _, v = torch.svd(h)
    r = torch.mm(v.t(), u.t())

    # reflection
    if np.linalg.det(r.cpu().numpy()) < 0:
        v[2, :] *= -1
        r = torch.mm(v.t(), u.t())

    t = torch.mm(-r, center_p1.t()) + center_p2.t()

    return r, t 
Example #7
Source File: utils.py    From pyvot with MIT License 6 votes vote down vote up
def rigid_transform_3d_numpy(p1, p2):
    center_p1 = np.mean(p1, axis=0, keepdims=True)
    center_p2 = np.mean(p2, axis=0, keepdims=True)

    pp1 = p1 - center_p1
    pp2 = p2 - center_p2

    h = np.matmul(pp1.T, pp2)
    u, _, v = np.linalg.svd(h)
    r = np.matmul(v.T, u.T)

    # reflection
    if np.linalg.det(r) < 0:
        v[2, :] *= -1
        r = np.matmul(v.T, u.T)

    t = np.matmul(-r, center_p1.T) + center_p2.T

    return r, t 
Example #8
Source File: orthogonal.py    From FrEIA with MIT License 6 votes vote down vote up
def __init__(self, dims_in, correction_interval=256, clamp=5.):
        super().__init__()
        self.width = dims_in[0][0]
        self.clamp = clamp

        self.correction_interval = correction_interval
        self.back_counter = np.random.randint(0, correction_interval) // 2

        self.weights = torch.randn(self.width, self.width)
        self.weights = self.weights + self.weights.t()
        self.weights, S, V = torch.svd(self.weights)

        self.weights = nn.Parameter(self.weights)

        self.bias = nn.Parameter(0.05 * torch.randn(self.width))
        self.scaling = nn.Parameter(0.02 * torch.randn(self.width))

        self.register_backward_hook(correct_weights) 
Example #9
Source File: loss.py    From 6-PACK with MIT License 6 votes vote down vote up
def estimate_pose(self, pt0, pt1):
        pconf2 = self.pconf.view(1, self.num_key, 1)
        cent0 = torch.sum(pt0 * pconf2, dim=1).repeat(1, self.num_key, 1).contiguous()
        cent1 = torch.sum(pt1 * pconf2, dim=1).repeat(1, self.num_key, 1).contiguous()

        diag_mat = torch.diag(self.pconf).unsqueeze(0)
        x = (pt0 - cent0).transpose(2, 1).contiguous()
        y = pt1 - cent1

        pred_t = cent1 - cent0

        cov = torch.bmm(torch.bmm(x, diag_mat), y).contiguous().squeeze(0)

        u, _, v = torch.svd(cov)

        u = u.transpose(1, 0).contiguous()
        d = torch.det(torch.mm(v, u)).contiguous().view(1, 1, 1).contiguous()
        u = u.transpose(1, 0).contiguous().unsqueeze(0)

        ud = torch.cat((u[:, :, :-1], u[:, :, -1:] * d), dim=2)
        v = v.transpose(1, 0).contiguous().unsqueeze(0)

        pred_r = torch.bmm(ud, v).transpose(2, 1).contiguous()
        return pred_r, pred_t[:, 0, :].view(1, 3) 
Example #10
Source File: make_datasets.py    From nice_pytorch with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def zca_matrix(data_tensor):
    """
    Helper function: compute ZCA whitening matrix across a dataset ~ (N, C, H, W).
    """
    # 1. flatten dataset:
    X = data_tensor.view(data_tensor.shape[0], -1)
    
    # 2. zero-center the matrix:
    X = rescale(X, -1., 1.)
    
    # 3. compute covariances:
    cov = torch.t(X) @ X

    # 4. compute ZCA(X) == U @ (diag(1/S)) @ torch.t(V) where U, S, V = SVD(cov):
    U, S, V = torch.svd(cov)
    return (U @ torch.diag(torch.reciprocal(S)) @ torch.t(V)) 
Example #11
Source File: utils_regularizers.py    From KAIR with MIT License 6 votes vote down vote up
def regularizer_orth2(m):
    """
    # ----------------------------------------
    # Applies regularization to the training by performing the
    # orthogonalization technique described in the paper
    # This function is to be called by the torch.nn.Module.apply() method,
    # which applies svd_orthogonalization() to every layer of the model.
    # usage: net.apply(regularizer_orth2)
    # ----------------------------------------
    """
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        w = m.weight.data.clone()
        c_out, c_in, f1, f2 = w.size()
        # dtype = m.weight.data.type()
        w = w.permute(2, 3, 1, 0).contiguous().view(f1*f2*c_in, c_out)
        u, s, v = torch.svd(w)
        s_mean = s.mean()
        s[s > 1.5*s_mean] = s[s > 1.5*s_mean] - 1e-4
        s[s < 0.5*s_mean] = s[s < 0.5*s_mean] + 1e-4
        w = torch.mm(torch.mm(u, torch.diag(s)), v.t())
        m.weight.data = w.view(f1, f2, c_in, c_out).permute(3, 2, 0, 1)  # .type(dtype)
    else:
        pass 
Example #12
Source File: Conceptor.py    From EchoTorch with GNU General Public License v3.0 6 votes vote down vote up
def similarity(C1, C2):
        """
        Similarity between two conceptors
        :param C1:
        :param C2:
        :return:
        """
        # Compute singular values
        Ua, Sa, _ = torch.svd(C1.get_C())
        Ub, Sb, _ = torch.svd(C2.get_C())

        # Measure
        return generalized_squared_cosine(Sa, Ua, Sb, Ub)
    # end similarity

    ###############################################
    # OPERATORS
    ###############################################

    # Similarity with another conceptor 
Example #13
Source File: Conceptor.py    From EchoTorch with GNU General Public License v3.0 6 votes vote down vote up
def sim(self, cb, measure='gsc'):
        """
        Similarity with another conceptor
        :param cb:
        :return:
        """
        # Compute singular values
        Ua, Sa, _ = torch.svd(self.C)
        Ub, Sb, _ = torch.svd(cb.get_C())

        # Measure
        if measure == 'gsc':
            return generalized_squared_cosine(Sa, Ua, Sb, Ub)
        # end if
    # end sim

    # Positive evidence 
Example #14
Source File: Conceptor.py    From EchoTorch with GNU General Public License v3.0 6 votes vote down vote up
def finalize(self):
        """
        Finalize training with LU factorization or Pseudo-inverse
        """
        # Average
        self.R = self.R / self.n_samples

        # SVF
        (U, S, V) = torch.svd(self.R)

        # Compute new singular values
        Snew = torch.mm(torch.diag(S), torch.inverse(torch.diag(S) + math.pow(self.aperture, -2) * torch.eye(self.input_dim, dtype=self.dtype)))

        # Apply new SVs to get the conceptor
        self.C.data = torch.mm(torch.mm(U, Snew), U.t()).data

        # Not in training mode anymore
        self.train(False)
    # end finalize

    # Set conceptor 
Example #15
Source File: ConceptorPool.py    From EchoTorch with GNU General Public License v3.0 6 votes vote down vote up
def compute_A_SV(conceptors):
        """
        Get singular values of A
        :param conceptors:
        :return:
        """
        # A (OR of all conceptors)
        A = ConceptorPool.compute_A(conceptors)

        # Compute SVD
        _, S, _ = torch.svd(A.get_C())

        return S
    # end compute_A_SV

    # Compute A (OR of all conceptors 
Example #16
Source File: smpl_param_regressor.py    From GraphCMR with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def batch_svd(A):
    """Wrapper around torch.svd that works when the input is a batch of matrices."""
    U_list = []
    S_list = []
    V_list = []
    for i in range(A.shape[0]):
        U, S, V = torch.svd(A[i])
        U_list.append(U)
        S_list.append(S)
        V_list.append(V)
    U = torch.stack(U_list, dim=0)
    S = torch.stack(S_list, dim=0)
    V = torch.stack(V_list, dim=0)
    return U, S, V 
Example #17
Source File: util.py    From PytorchWCT with MIT License 5 votes vote down vote up
def whiten_and_color(self,cF,sF):
        cFSize = cF.size()
        c_mean = torch.mean(cF,1) # c x (h x w)
        c_mean = c_mean.unsqueeze(1).expand_as(cF)
        cF = cF - c_mean

        contentConv = torch.mm(cF,cF.t()).div(cFSize[1]-1) + torch.eye(cFSize[0]).double()
        c_u,c_e,c_v = torch.svd(contentConv,some=False)

        k_c = cFSize[0]
        for i in range(cFSize[0]):
            if c_e[i] < 0.00001:
                k_c = i
                break

        sFSize = sF.size()
        s_mean = torch.mean(sF,1)
        sF = sF - s_mean.unsqueeze(1).expand_as(sF)
        styleConv = torch.mm(sF,sF.t()).div(sFSize[1]-1)
        s_u,s_e,s_v = torch.svd(styleConv,some=False)

        k_s = sFSize[0]
        for i in range(sFSize[0]):
            if s_e[i] < 0.00001:
                k_s = i
                break

        c_d = (c_e[0:k_c]).pow(-0.5)
        step1 = torch.mm(c_v[:,0:k_c],torch.diag(c_d))
        step2 = torch.mm(step1,(c_v[:,0:k_c].t()))
        whiten_cF = torch.mm(step2,cF)

        s_d = (s_e[0:k_s]).pow(0.5)
        targetFeature = torch.mm(torch.mm(torch.mm(s_v[:,0:k_s],torch.diag(s_d)),(s_v[:,0:k_s].t())),whiten_cF)
        targetFeature = targetFeature + s_mean.unsqueeze(1).expand_as(targetFeature)
        return targetFeature 
Example #18
Source File: test_manifold_basic.py    From geoopt with Apache License 2.0 5 votes vote down vote up
def canonical_stiefel_case():
    torch.manual_seed(42)
    shape = manifold_shapes[geoopt.manifolds.CanonicalStiefel]
    ex = torch.randn(*shape)
    ev = torch.randn(*shape)
    u, _, v = torch.svd(ex)
    x = u @ v.t()
    v = ev - x @ ev.t() @ x
    manifold = geoopt.manifolds.CanonicalStiefel()
    x = geoopt.ManifoldTensor(x, manifold=manifold)
    case = UnaryCase(shape, x, ex, v, ev, manifold)
    yield case 
Example #19
Source File: core.py    From WCT2 with MIT License 5 votes vote down vote up
def svd(feat, iden=False, device='cpu'):
    size = feat.size()
    mean = torch.mean(feat, 1)
    mean = mean.unsqueeze(1).expand_as(feat)
    _feat = feat.clone()
    _feat -= mean
    if size[1] > 1:
        conv = torch.mm(_feat, _feat.t()).div(size[1] - 1)
    else:
        conv = torch.mm(_feat, _feat.t())
    if iden:
        conv += torch.eye(size[0]).to(device)
    u, e, v = torch.svd(conv, some=False)
    return u, e, v 
Example #20
Source File: core.py    From WCT2 with MIT License 5 votes vote down vote up
def wct_core(cont_feat, styl_feat, weight=1, registers=None, device='cpu'):
    cont_feat = get_squeeze_feat(cont_feat)
    cont_min = cont_feat.min()
    cont_max = cont_feat.max()
    cont_mean = torch.mean(cont_feat, 1).unsqueeze(1).expand_as(cont_feat)
    cont_feat -= cont_mean

    if not registers:
        _, c_e, c_v = svd(cont_feat, iden=True, device=device)

        styl_feat = get_squeeze_feat(styl_feat)
        s_mean = torch.mean(styl_feat, 1)
        _, s_e, s_v = svd(styl_feat, iden=True, device=device)
        k_s = get_rank(s_e, styl_feat.size()[0])
        s_d = (s_e[0:k_s]).pow(0.5)
        EDE = torch.mm(torch.mm(s_v[:, 0:k_s], torch.diag(s_d) * weight), (s_v[:, 0:k_s].t()))

        if registers is not None:
            registers['EDE'] = EDE
            registers['s_mean'] = s_mean
            registers['c_v'] = c_v
            registers['c_e'] = c_e
    else:
        EDE = registers['EDE']
        s_mean = registers['s_mean']
        _, c_e, c_v = svd(cont_feat, iden=True, device=device)

    k_c = get_rank(c_e, cont_feat.size()[0])
    c_d = (c_e[0:k_c]).pow(-0.5)
    # TODO could be more fast
    step1 = torch.mm(c_v[:, 0:k_c], torch.diag(c_d))
    step2 = torch.mm(step1, (c_v[:, 0:k_c].t()))
    whiten_cF = torch.mm(step2, cont_feat)

    targetFeature = torch.mm(EDE, whiten_cF)
    targetFeature = targetFeature + s_mean.unsqueeze(1).expand_as(targetFeature)
    targetFeature.clamp_(cont_min, cont_max)

    return targetFeature 
Example #21
Source File: basic_utils.py    From PVN3D with MIT License 5 votes vote down vote up
def best_fit_transform(A, B):
    '''
    Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
    Input:
        A: Nxm numpy array of corresponding points, usually points on mdl
        B: Nxm numpy array of corresponding points, usually points on camera axis
    Returns:
    T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
    R: mxm rotation matrix
    t: mx1 translation vector
    '''

    assert A.shape == B.shape
    # get number of dimensions
    m = A.shape[1]
    # translate points to their centroids
    centroid_A = np.mean(A, axis=0)
    centroid_B = np.mean(B, axis=0)
    AA = A - centroid_A
    BB = B - centroid_B
    # rotation matirx
    H = np.dot(AA.T, BB)
    U, S, Vt = np.linalg.svd(H)
    R = np.dot(Vt.T, U.T)
    # special reflection case
    if np.linalg.det(R) < 0:
        Vt[m-1, :] *= -1
        R = np.dot(Vt.T, U.T)
    # translation
    t = centroid_B.T - np.dot(R, centroid_A.T)
    T = np.zeros((3, 4))
    T[:, :3] = R
    T[:, 3] = t
    return  T 
Example #22
Source File: basic_utils.py    From PVN3D with MIT License 5 votes vote down vote up
def best_fit_transform_torch(self, A, B):
        '''
        Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
        Input:
            A: Nxm numpy array of corresponding points, usually points on mdl
            B: Nxm numpy array of corresponding points, usually points on camera axis
        Returns:
        T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
        R: mxm rotation matrix
        t: mx1 translation vector
        '''
        assert A.size() == B.size()
        # get number of dimensions
        m = A.size()[1]
        # translate points to their centroids
        centroid_A = torch.mean(A, dim=0)
        centroid_B = torch.mean(B, dim=0)
        AA = A - centroid_A
        BB = B - centroid_B
        # rotation matirx
        H = torch.mm(AA.transpose(1, 0), BB)
        U, S, Vt = torch.svd(H)
        R = torch.mm(Vt.transpose(1, 0), U.transpose(1, 0))
        # special reflection case
        if torch.det(R) < 0:
            Vt[m-1, :] *= -1
            R = torch.mm(Vt.transpose(1, 0), U.transpose(1, 0))
        # translation
        t = centroid_B - torch.mm(R, centroid_A.view(3, 1))[:, 0]
        T = torch.zeros(3, 4).cuda()
        T[:, :3] = R
        T[:, 3] = t
        return  T 
Example #23
Source File: echo_state_network.py    From pytorch-esn with MIT License 5 votes vote down vote up
def fit(self):
        if self.readout_training in {'gd', 'svd'}:
            return

        if self.readout_training == 'cholesky':
            W = torch.solve(self.XTy,
                           self.XTX + self.lambda_reg * torch.eye(
                               self.XTX.size(0), device=self.XTX.device))[0].t()
            self.XTX = None
            self.XTy = None

            self.readout.bias = nn.Parameter(W[:, 0])
            self.readout.weight = nn.Parameter(W[:, 1:])
        elif self.readout_training == 'inv':
            I = (self.lambda_reg * torch.eye(self.XTX.size(0))).to(
                self.XTX.device)
            A = self.XTX + I

            if torch.det(A) != 0:
                W = torch.mm(torch.inverse(A), self.XTy).t()
            else:
                pinv = torch.pinverse(A)
                W = torch.mm(pinv, self.XTy).t()

            self.readout.bias = nn.Parameter(W[:, 0])
            self.readout.weight = nn.Parameter(W[:, 1:])

            self.XTX = None
            self.XTy = None 
Example #24
Source File: data.py    From ssl_bad_gan with MIT License 5 votes vote down vote up
def get_zca_cuda(self, reg=1e-6):
        images = self.images.cuda()
        if images.dim() > 2:
            images = images.view(images.size(0), -1)
        mean = images.mean(0)
        images -= mean.expand_as(images)
        sigma = torch.mm(images.transpose(0, 1), images) / images.size(0)
        U, S, V = torch.svd(sigma)
        components = torch.mm(torch.mm(U, torch.diag(1.0 / torch.sqrt(S) + reg)), U.transpose(0, 1))
        return components, mean 
Example #25
Source File: feature_transforms.py    From deep-transfer with Apache License 2.0 5 votes vote down vote up
def wct_mask(cf, sf):
    cf = cf.double()
    cf_sizes = cf.size()
    c_mean = torch.mean(cf, 1)
    c_mean = c_mean.unsqueeze(1).expand_as(cf)
    cf -= c_mean

    c_covm = torch.mm(cf, cf.t()).div(cf_sizes[1] - 1)
    c_u, c_e, c_v = torch.svd(c_covm, some=False)

    k_c = cf_sizes[0]
    for i in range(cf_sizes[0]):
        if c_e[i] < 0.00001:
            k_c = i
            break
    c_d = (c_e[0:k_c]).pow(-0.5)
    whitened = torch.mm(torch.mm(torch.mm(c_v[:, 0:k_c], torch.diag(c_d)), (c_v[:, 0:k_c].t())), cf)


    sf = sf.double()
    sf_sizes = sf.size()
    sfv = sf.view(sf_sizes[0], sf_sizes[1] * sf_sizes[2])
    s_mean = torch.mean(sfv, 1)
    s_mean = s_mean.unsqueeze(1).expand_as(sfv)
    sfv -= s_mean

    s_covm = torch.mm(sfv, sfv.t()).div((sf_sizes[1] * sf_sizes[2]) - 1)
    s_u, s_e, s_v = torch.svd(s_covm, some=False)

    s_k = sf_sizes[0]
    for i in range(sf_sizes[0]):
        if s_e[i] < 0.00001:
            s_k = i
            break
    s_d = (s_e[0:s_k]).pow(0.5)
    ccsf = torch.mm(torch.mm(torch.mm(s_v[:, 0:s_k], torch.diag(s_d)), s_v[:, 0:s_k].t()), whitened)

    ccsf += s_mean.resize_as_(ccsf)
    return ccsf.float() 
Example #26
Source File: utils.py    From pyvot with MIT License 5 votes vote down vote up
def rigid_transform_3D(A, B):
    assert len(A) == len(B)

    num_rows, num_cols = A.shape;

    if num_rows != 3:
        raise Exception("matrix A is not 3xN, it is {}x{}".format(num_rows, num_cols))

    [num_rows, num_cols] = B.shape;
    if num_rows != 3:
        raise Exception("matrix B is not 3xN, it is {}x{}".format(num_rows, num_cols))

    # find mean column wise
    centroid_A = np.mean(A, axis=1)
    centroid_B = np.mean(B, axis=1)

    # subtract mean
    Am = A - np.tile(centroid_A, (1, num_cols))
    Bm = B - np.tile(centroid_B, (1, num_cols))

    # dot is matrix multiplication for array
    H = Am * np.transpose(Bm)

    # find rotation
    U, S, Vt = np.linalg.svd(H)
    R = Vt.T * U.T

    # special reflection case
    if np.linalg.det(R) < 0:
        print("det(R) < R, reflection detected!, correcting for it ...\n");
        Vt[2,:] *= -1
        R = Vt.T * U.T

    t = -R*centroid_A + centroid_B

    return R, t 
Example #27
Source File: Conceptor.py    From EchoTorch with GNU General Public License v3.0 5 votes vote down vote up
def singular_values(self):
        """
        Singular values
        :return:
        """
        # Compute SVD
        (Ua, Sa, Va) = torch.svd(self.get_C())
        return Ua, torch.diag(Sa), Va
    # end singular_values

    # Some of singular values 
Example #28
Source File: utility_functions.py    From EchoTorch with GNU General Public License v3.0 5 votes vote down vote up
def compute_singular_values(stats):
    """
    Compute singular values
    :param states:
    :return:
    """
    # Compute R (correlation matrix)
    R = stats.t().mm(stats) / stats.shape[0]

    # Compute singular values
    return torch.svd(R)
# end compute_singular_values


# Compute spectral radius of a square 2-D tensor 
Example #29
Source File: visualisation.py    From EchoTorch with GNU General Public License v3.0 5 votes vote down vote up
def show_sv_for_increasing_aperture(conceptor, factor, title):
    """
    Show singular values for increasing aperture
    :param conceptors:
    :param factor:
    :param title:
    :return:
    """
    # Fig
    fig = plt.figure()
    ax = fig.gca()
    ax.set_xlim(0, 100)
    ax.set_ylim(0, 1.5)
    ax.grid(True)

    # For each aperture multiplication
    for i in range(5):
        # Compute SVD
        _, S, _ = torch.svd(conceptor.get_C())

        # Plot
        ax.plot(S.numpy(), '--')

        # Multiply all conceptor's aperture by 10
        conceptor.multiply_aperture(factor)
    # end for

    # Show
    ax.set_xlabel(u"Singular values")
    ax.set_title(title)
    plt.show()
    plt.close()
# end show_sv_for_increasing_aperture


# Show conceptors similarity matrix 
Example #30
Source File: visualisation.py    From EchoTorch with GNU General Public License v3.0 5 votes vote down vote up
def plot_singular_values(stats, title, xmin, xmax, ymin, ymax, log=False):
    """
    Plot singular values
    :param stats:
    :param title:
    :param timestep:
    :param start:
    :return:
    """
    # Compute R (correlation matrix)
    R = stats.t().mm(stats) / stats.shape[0]

    # Compute singular values
    U, S, V = torch.svd(R)
    singular_values = S

    # Compute singular values
    if log:
        singular_values = np.log10(singular_values)
    # end if

    # Fig
    fig = plt.figure()
    ax = fig.gca()
    ax.set_xlim(xmin, xmax)
    ax.set_ylim(ymin, ymax)
    ax.grid(True)

    # For each plot
    ax.plot(singular_values.numpy(), '--o')

    ax.set_xlabel("Timesteps")
    ax.set_title(title)
    plt.show()
    plt.close()

    return singular_values, U
# end plot_singular_values


# Display neurons activities on a 3D plot