Python numpy.matlib.repmat() Examples

The following are code examples for showing how to use numpy.matlib.repmat(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: ibllib   Author: int-brain-lab   File: test_dsp.py    MIT License 6 votes vote down vote up
def test_fexpand(self):
        # test odd input
        res = np.random.rand(11)
        X = ft.freduce(np.fft.fft(res))
        R = np.real(np.fft.ifft(ft.fexpand(X, 11)))
        self.assertTrue(np.all((res - R) < 1e-6))
        # test even input
        res = np.random.rand(12)
        X = ft.freduce(np.fft.fft(res))
        R = np.real(np.fft.ifft(ft.fexpand(X, 12)))
        self.assertTrue(np.all((res - R) < 1e-6))
        # test with a 2 dimensional input along last dimension
        res = np.random.rand(2, 12)
        X = ft.freduce(np.fft.fft(res))
        R = np.real(np.fft.ifft(ft.fexpand(X, 12)))
        self.assertTrue(np.all((res - R) < 1e-6))
        # test with a 3 dimensional input along last dimension
        res = np.random.rand(3, 5, 12)
        X = ft.freduce(np.fft.fft(res))
        R = np.real(np.fft.ifft(ft.fexpand(X, 12)))
        self.assertTrue(np.all((res - R) < 1e-6))
        # test with 2 dimensional input along first dimension
        fs = np.transpose(mat.repmat(ft.fscale(500, 0.001, one_sided=True), 4, 1))
        self.assertTrue(ft.fexpand(fs, 500, axis=0).shape == (500, 4)) 
Example 2
Project: ibllib   Author: int-brain-lab   File: test_dsp.py    MIT License 6 votes vote down vote up
def test_filter_lp_hp(self):
        # test 1D time serie: subtracting lp filter removes DC
        ts1 = np.random.rand(500)
        out1 = ft.lp(ts1, 1, [.1, .2])
        self.assertTrue(np.mean(ts1 - out1) < 0.001)
        # test 2D case along the last dimension
        ts = mat.repmat(ts1, 11, 1)
        out = ft.lp(ts, 1, [.1, .2])
        self.assertTrue(np.allclose(out, out1))
        # test 2D case along the first dimension
        ts = mat.repmat(ts1[:, np.newaxis], 1, 11)
        out = ft.lp(ts, 1, [.1, .2], axis=0)
        self.assertTrue(np.allclose(np.transpose(out), out1))
        # test 1D time serie: subtracting lp filter removes DC
        out2 = ft.hp(ts1, 1, [.1, .2])
        self.assertTrue(np.allclose(out1, ts1 - out2)) 
Example 3
Project: safe-exploration   Author: befelix   File: utils.py    MIT License 6 votes vote down vote up
def sample_inside_polytope(x, a, b):
    """
    for a set of samples x = [x_1,..,x_k]^T
    check sample_wise
        Ax_i \leq b , i=1,..,k

    x: k x n np.ndarray[float]
        The samples (k samples of dimensionality n)
    a: m x n np.ndarray[float]
        the matrix of the linear inequality
    b: m x 1 np.ndarray[float]
        the vector of the linear inequality

    """
    k, _ = x.shape

    c = np.dot(a, x.T) - repmat(b, 1, k)

    return np.all(c < 0, axis=0).squeeze() 
Example 4
Project: VehicleTracking   Author: ChengeLi   File: sparse_subspace_clusteringClass.py    MIT License 6 votes vote down vote up
def construct_adjacency_non_fix_len(self):
        """samples not dimension aligned"""
        self.adjacency = np.zeros([self.dataset.shape[0], self.dataset.shape[0]])
        adjacency = np.zeros([self.dataset.shape[0], self.dataset.shape[0]])

        for i in range(self.dataset.shape[0]):
            print i
            idx = np.where(self.dataset[i, :] != 0)[0]
            temp_Y = self.dataset[i, idx] / np_lg.norm(self.dataset[i, idx])
            temp_X = np.zeros([self.dataset.shape[0] + idx.size, idx.size])
            temp_X_norm = 1.0 / (np_lg.norm(self.dataset[:, idx], axis=1) + np.power(10, -10))
            temp_X_norm[np.where(np.isinf(temp_X_norm))[0]] = 0
            temp_X[0:self.dataset.shape[0], :] = self.dataset[:, idx] * np.transpose(
                    np_mat.repmat(temp_X_norm, idx.size, 1))
            temp_X[i, :] = np.zeros(idx.size)
            temp_X[self.dataset.shape[0]:self.dataset.shape[0] + idx.size, :] = np.diag(np.ones(idx.size) * 0.1)

            clf = sklearn.linear_model.Lasso(1 / np.power(idx.size, 0.5) / 1000)
            clf.fit(temp_X.T * 100, temp_Y * 100)
            adjacency[i, :] = clf.sparse_coef_.todense()[:, 0:self.dataset.shape[0]]
        self.adjacency = np.abs(adjacency + np.transpose(adjacency)) 
Example 5
Project: ANHIR_MW   Author: lNefarin   File: initial_alignment.py    Apache License 2.0 6 votes vote down vote up
def ransac_rigid(source_points, target_points, confidence, threshold):
    try:
        max_iters = 25000
        transform, inliers = cv2.estimateAffinePartial2D(source_points, target_points, 0, ransacReprojThreshold = threshold, maxIters = max_iters, confidence = confidence)
        source_points = np.squeeze(source_points, None)
        target_points = np.squeeze(target_points, None)
        transform = cv2.estimateRigidTransform(
            np.resize(source_points[matlib.repmat(inliers.astype(bool), 1, 2)],
            (np.sum(inliers), 2)),
            np.resize(target_points[matlib.repmat(inliers.astype(bool), 1, 2)],
            (np.sum(inliers), 2)),
            0)
        if transform is not None:
            t_transform = transform
            transform = np.eye(3)
            transform[0:2, 0:3] = t_transform
            failed = False
        else:
            transform = np.eye(3)
            failed = True
    except:
        transform = np.eye(3)
        failed = True
    return transform, failed 
Example 6
Project: pykernels   Author: gmum   File: shortestpath.py    MIT License 6 votes vote down vote up
def _create_accum_list_labeled(self, shortest_paths, maxpath,
                                   labels_t, numlabels):
        """
        Construct accumulation array matrix for one dataset
        containing labaled graph data.
        """
        res = lil_matrix(
            np.zeros((len(shortest_paths),
                      (maxpath + 1) * numlabels * (numlabels + 1) / 2)))
        for i, s in enumerate(shortest_paths):
            labels = labels_t[i]
            labels_aux = matlib.repmat(labels, 1, len(labels))
            min_lab = np.minimum(labels_aux.T, labels_aux)
            max_lab = np.maximum(labels_aux.T, labels_aux)
            subsetter = np.triu(~(np.isinf(s)))
            min_lab = min_lab[subsetter]
            max_lab = max_lab[subsetter]
            ind = s[subsetter] * numlabels * (numlabels + 1) / 2 + \
                    (min_lab - 1) * (2*numlabels + 2 - min_lab) / 2 + \
                    max_lab - min_lab
            accum = np.zeros((maxpath + 1) * numlabels * (numlabels + 1) / 2)
            accum[:ind.max() + 1] += np.bincount(ind.astype(int))
            res[i] = lil_matrix(accum)
        return res 
Example 7
Project: dynamo-release   Author: aristoteleo   File: psl.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def sqdist (a,b):
    """calculate the square distance between a, b
    Arguments
    ---------
        a: 'np.ndarray'
            A matrix with :math:`D \times N` dimension
        b: 'np.ndarray'
            A matrix with :math:`D \times N` dimension

    Returns
    -------
    dist: 'np.ndarray'
        A numeric value for the different between a and b
    """
    aa = np.sum(a**2, axis=0)
    bb = np.sum(b**2, axis=0)
    ab = a.T.dot(b)

    aa_repmat = matlib.repmat(aa[:, None], 1, b.shape[1])
    bb_repmat = matlib.repmat(bb[None, :], a.shape[1], 1)

    dist = abs(aa_repmat + bb_repmat - 2 * ab)

    return dist 
Example 8
Project: dynamo-release   Author: aristoteleo   File: psl.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def repmat (X, m, n):
    """This function returns an array containing m (n) copies of A in the row (column) dimensions. The size of B is
    size(A)*n when A is a matrix.For example, repmat(np.matrix(1:4), 2, 3) returns a 4-by-6 matrix.
    Arguments
    ---------
        X: 'np.ndarray'
            An array like matrix.
        m: 'int'
            Number of copies on row dimension
        n: 'int'
            Number of copies on column dimension
    Returns
    -------
    xy_rep: 'np.ndarray'
        A matrix of repmat
    """
    xy_rep = matlib.repmat(X, m, n)

    return xy_rep 
Example 9
Project: dynamo-release   Author: aristoteleo   File: DDRTree.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def sqdist(a, b):
    """calculate the square distance between a, b

    Arguments
    ---------
        a: 'np.ndarray'
            A matrix with :math:`D \times N` dimension
        b: 'np.ndarray'
            A matrix with :math:`D \times N` dimension

    Returns
    -------
    dist: 'np.ndarray'
        A numeric value for the different between a and b
    """
    aa = np.sum(a ** 2, axis=0)
    bb = np.sum(b ** 2, axis=0)
    ab = a.T.dot(b)

    aa_repmat = matlib.repmat(aa[:, None], 1, b.shape[1])
    bb_repmat = matlib.repmat(bb[None, :], a.shape[1], 1)

    dist = abs(aa_repmat + bb_repmat - 2 * ab)

    return dist 
Example 10
Project: dynamo-release   Author: aristoteleo   File: DDRTree.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def repmat (X, m, n):
    """This function returns an array containing m (n) copies of A in the row (column) dimensions. The size of B is
    size(A)*n when A is a matrix.For example, repmat(np.matrix(1:4), 2, 3) returns a 4-by-6 matrix.

    Arguments
    ---------
        X: 'np.ndarray'
            An array like matrix.
        m: 'int'
            Number of copies on row dimension
        n: 'int'
            Number of copies on column dimension

    Returns
    -------
    xy_rep: 'np.ndarray'
        A matrix of repmat
    """
    xy_rep = matlib.repmat(X, m, n)

    return xy_rep 
Example 11
Project: ibllib   Author: int-brain-lab   File: test_dsp.py    MIT License 5 votes vote down vote up
def test_freduce(self):
        # test with 1D arrays
        fs = np.fft.fftfreq(5)
        self.assertTrue(np.all(ft.freduce(fs) == fs[:-2]))
        fs = np.fft.fftfreq(6)
        self.assertTrue(np.all(ft.freduce(fs) == fs[:-2]))

        # test 2D arrays along both dimensions
        fs = mat.repmat(ft.fscale(500, 0.001), 4, 1)
        self.assertTrue(ft.freduce(fs).shape == (4, 251))
        self.assertTrue(ft.freduce(np.transpose(fs), axis=0).shape == (251, 4)) 
Example 12
Project: TicTacToe   Author: Neural-Network   File: policyiteration.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def collapsedTransitions(Ts, policy):
    """ Collapses a list of transition matrices (one per action) and a list 
        of action probability vectors into a single transition matrix."""
    res = zeros_like(Ts[0])
    dim = len(Ts[0])
    for ai, ap in enumerate(policy.T):
        res += Ts[ai] * repmat(ap, dim, 1).T
    return res 
Example 13
Project: TicTacToe   Author: Neural-Network   File: leastsquares.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def trueFeatureStats(T, R, fMap, discountFactor, stateProp=1, MAT_LIMIT=1e8):
    """ Gather the statistics needed for LSTD,
    assuming infinite data (true probabilities).
    Option: if stateProp is  < 1, then only a proportion of all 
    states will be seen as starting state for transitions """
    dim = len(fMap)
    numStates = len(T)
    statMatrix = zeros((dim, dim))
    statResidual = zeros(dim)
    ss = list(range(numStates))
    repVersion = False
    
    if stateProp < 1:
        ss = random.sample(ss, int(numStates * stateProp))
    elif dim * numStates**2 < MAT_LIMIT:
        repVersion = True
    
    # two variants, depending on how large we can afford our matrices to become.        
    if repVersion:    
        tmp1 = tile(fMap, (numStates,1,1))
        tmp2 = transpose(tmp1, (2,1,0))
        tmp3 = tmp2 - discountFactor * tmp1            
        tmp4 = tile(T, (dim,1,1))
        tmp4 *= transpose(tmp1, (1,2,0))
        statMatrix = tensordot(tmp3, tmp4, axes=[[0,2], [1,2]]).T
        statResidual = dot(R, dot(fMap, T).T)
    else:
        for sto in ss:
            tmp = fMap - discountFactor * repmat(fMap[:, sto], numStates, 1).T
            tmp2 = fMap * repmat(T[:, sto], dim, 1)
            statMatrix += dot(tmp2, tmp.T)             
            statResidual += R[sto] * dot(fMap, T[:, sto])
    return statMatrix, statResidual 
Example 14
Project: adVNTR   Author: mehrdadbakhtiari   File: pattern_clustering.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_elbow_point_index(wcss):
    curve = wcss
    number_of_points = len(curve)
    all_coordinates = np.vstack((range(number_of_points), curve)).T
    np.array([range(number_of_points), curve])
    first_point = all_coordinates[0]
    line_vector = all_coordinates[-1] - all_coordinates[0]
    line_vector_norm = line_vector / np.sqrt(np.sum(line_vector**2))
    vec_from_first = all_coordinates - first_point
    scalar_product = np.sum(vec_from_first * matlib.repmat(line_vector_norm, number_of_points, 1), axis=1)
    vec_from_first_parallel = np.outer(scalar_product, line_vector_norm)
    vectors_to_line = vec_from_first - vec_from_first_parallel
    dists_to_line = np.sqrt(np.sum(vectors_to_line ** 2, axis=1))
    index_of_best_point = np.argmax(dists_to_line)
    return index_of_best_point 
Example 15
Project: Stein-Variational-Gradient-Descent   Author: dilinwang820   File: bayesian_logistic_regression.py    MIT License 5 votes vote down vote up
def dlnprob(self, theta):
        
        if self.batchsize > 0:
            batch = [ i % self.N for i in range(self.iter * self.batchsize, (self.iter + 1) * self.batchsize) ]
            ridx = self.permutation[batch]
            self.iter += 1
        else:
            ridx = np.random.permutation(self.X.shape[0])
            
        Xs = self.X[ridx, :]
        Ys = self.Y[ridx]
        
        w = theta[:, :-1]  # logistic weights
        alpha = np.exp(theta[:, -1])  # the last column is logalpha
        d = w.shape[1]
        
        wt = np.multiply((alpha / 2), np.sum(w ** 2, axis=1))
        
        coff = np.matmul(Xs, w.T)
        y_hat = 1.0 / (1.0 + np.exp(-1 * coff))
        
        dw_data = np.matmul(((nm.repmat(np.vstack(Ys), 1, theta.shape[0]) + 1) / 2.0 - y_hat).T, Xs)  # Y \in {-1,1}
        dw_prior = -np.multiply(nm.repmat(np.vstack(alpha), 1, d) , w)
        dw = dw_data * 1.0 * self.X.shape[0] / Xs.shape[0] + dw_prior  # re-scale
        
        dalpha = d / 2.0 - wt + (self.a0 - 1) - self.b0 * alpha + 1  # the last term is the jacobian term
        
        return np.hstack([dw, np.vstack(dalpha)])  # % first order derivative 
Example 16
Project: Stein-Variational-Gradient-Descent   Author: dilinwang820   File: bayesian_logistic_regression.py    MIT License 5 votes vote down vote up
def evaluation(self, theta, X_test, y_test):
        theta = theta[:, :-1]
        M, n_test = theta.shape[0], len(y_test)

        prob = np.zeros([n_test, M])
        for t in range(M):
            coff = np.multiply(y_test, np.sum(-1 * np.multiply(nm.repmat(theta[t, :], n_test, 1), X_test), axis=1))
            prob[:, t] = np.divide(np.ones(n_test), (1 + np.exp(coff)))
        
        prob = np.mean(prob, axis=1)
        acc = np.mean(prob > 0.5)
        llh = np.mean(np.log(prob))
        return [acc, llh] 
Example 17
Project: Stein-Variational-Gradient-Descent   Author: dilinwang820   File: multivariate_normal.py    MIT License 5 votes vote down vote up
def dlnprob(self, theta):
        return -1*np.matmul(theta-nm.repmat(self.mu, theta.shape[0], 1), self.A) 
Example 18
Project: pylocus   Author: LCAV   File: opt_space.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def Gp(X, m0, r):
    z = (X**2).sum(1) / (2 * m0 * r)
    z = 2 * np.exp((z - 1)**2) * (z - 1)
    z[z < 0] = 0

    return X * repmat(z, r, 1).T / (m0 * r) 
Example 19
Project: pyCFTrackers   Author: wwdguu   File: ldes.py    MIT License 5 votes vote down vote up
def PSR(response,rate):
    max_response=np.max(response)
    h,w=response.shape
    k=4/(h*w)
    yy,xx=np.unravel_index(np.argmax(response, axis=None),response.shape)
    idx=np.arange(w)-xx
    idy=np.arange(h)-yy
    idx=repmat(idx,h,1)
    idy=repmat(idy,w,1).T
    t=idx**2+idy**2
    delta=1-np.exp(-k*t.astype(np.float32))
    r=(max_response-response)/delta
    r[np.isnan(r)]=np.inf
    return np.min(r) 
Example 20
Project: generate-models   Author: tum-ens   File: util.py    GNU General Public License v3.0 5 votes vote down vote up
def resizem(A_in, row_new, col_new):
    """
    This function resizes regular data grid, by copying and pasting parts of the original array.

    :param A_in: Input matrix.
    :type A_in: numpy array
    :param row_new: New number of rows.
    :type row_new: integer
    :param col_new: New number of columns.
    :type col_new: integer

    :return A_out: Resized matrix.
    :rtype: numpy array
    """
    row_rep = row_new // np.shape(A_in)[0]
    col_rep = col_new // np.shape(A_in)[1]
    A_inf = A_in.flatten(order="F")[np.newaxis]
    A_out = reshape(
        repmat(
            reshape(reshape(repmat((A_in.flatten(order="F")[np.newaxis]), row_rep, 1), (row_new, -1), order="F").T, (-1, 1), order="F"), 1, col_rep
        ).T,
        (col_new, row_new),
        order="F",
    ).T

    return A_out 
Example 21
Project: LayoutNetv2   Author: zouchuhang   File: pano_gen.py    MIT License 5 votes vote down vote up
def lineFromTwoPoint(pt1, pt2):
    '''
    Generate line segment based on two points on panorama
    pt1, pt2: two points on panorama
    line:
        1~3-th dim: normal of the line
        4-th dim: the projection dimension ID
        5~6-th dim: the u of line segment endpoints in projection plane
    '''
    numLine = pt1.shape[0]
    lines = np.zeros((numLine, 6))
    n = np.cross(pt1, pt2)
    n = n / (matlib.repmat(np.sqrt(np.sum(n ** 2, 1, keepdims=1)), 1, 3) + 1e-9)
    lines[:, 0:3] = n

    areaXY = np.abs(np.sum(n * matlib.repmat([0, 0, 1], numLine, 1), 1, keepdims=True))
    areaYZ = np.abs(np.sum(n * matlib.repmat([1, 0, 0], numLine, 1), 1, keepdims=True))
    areaZX = np.abs(np.sum(n * matlib.repmat([0, 1, 0], numLine, 1), 1, keepdims=True))
    planeIDs = np.argmax(np.hstack([areaXY, areaYZ, areaZX]), axis=1) + 1
    lines[:, 3] = planeIDs

    for i in range(numLine):
        uv = xyz2uvN(np.vstack([pt1[i, :], pt2[i, :]]), lines[i, 3])
        umax = uv[:, 0].max() + np.pi
        umin = uv[:, 0].min() + np.pi
        if umax - umin > np.pi:
            lines[i, 4:6] = np.array([umax, umin]) / 2 / np.pi
        else:
            lines[i, 4:6] = np.array([umin, umax]) / 2 / np.pi

    return lines 
Example 22
Project: LayoutNetv2   Author: zouchuhang   File: pano.py    MIT License 5 votes vote down vote up
def lineFromTwoPoint(pt1, pt2):
    '''
    Generate line segment based on two points on panorama
    pt1, pt2: two points on panorama
    line:
        1~3-th dim: normal of the line
        4-th dim: the projection dimension ID
        5~6-th dim: the u of line segment endpoints in projection plane
    '''
    numLine = pt1.shape[0]
    lines = np.zeros((numLine, 6))
    n = np.cross(pt1, pt2)
    n = n / (matlib.repmat(np.sqrt(np.sum(n ** 2, 1, keepdims=1)), 1, 3) + 1e-9)
    lines[:, 0:3] = n

    areaXY = np.abs(np.sum(n * matlib.repmat([0, 0, 1], numLine, 1), 1, keepdims=True))
    areaYZ = np.abs(np.sum(n * matlib.repmat([1, 0, 0], numLine, 1), 1, keepdims=True))
    areaZX = np.abs(np.sum(n * matlib.repmat([0, 1, 0], numLine, 1), 1, keepdims=True))
    planeIDs = np.argmax(np.hstack([areaXY, areaYZ, areaZX]), axis=1) + 1
    lines[:, 3] = planeIDs

    for i in range(numLine):
        uv = xyz2uvN(np.vstack([pt1[i, :], pt2[i, :]]), lines[i, 3])
        umax = uv[:, 0].max() + np.pi
        umin = uv[:, 0].min() + np.pi
        if umax - umin > np.pi:
            lines[i, 4:6] = np.array([umax, umin]) / 2 / np.pi
        else:
            lines[i, 4:6] = np.array([umin, umax]) / 2 / np.pi

    return lines 
Example 23
Project: safe-exploration   Author: befelix   File: environments.py    MIT License 5 votes vote down vote up
def _sample_start_state(self, mean=None, std=None, n_samples=1):
        """ """
        init_std = self.init_std
        if not std is None:
            init_std = std

        init_m = mean
        if init_m is None:
            init_m = self.init_m

        samples = (repmat(init_std, n_samples, 1) * np.random.randn(n_samples, self.n_s)
                   + repmat(init_m, n_samples, 1))
        return samples.T.squeeze() 
Example 24
Project: Stock-Trading-using-RRL   Author: rajatgarg149   File: costFunction.py    MIT License 5 votes vote down vote up
def costFunction( Xn, X, theta):
    miu = 1
    delta = 0.001        
    M = len(theta) - 2
    T = len(X) - M
    a, b = symbols('a b', real=True)
    dSdA = diff(a/(b-a*a)**.5,a)
    dSdB = diff(a/(b-a*a)**.5,b)
    Ft = updateFt(Xn, theta, T)
    #print Ft
    Ret, sharpe = rewardFunction(X, miu, delta, Ft, M)
    J = sharpe * -1
    dFt = np.zeros((T+1,M+2))
    for i in range(1,T+1):
        xt = [1]
        xt.extend(Xn[i-1:i+M-1])
        xt.extend([Ft[i-1]])
        tanh_out = np.tanh(sum(xt*theta))
        dFt[i] = (1 - tanh_out*tanh_out) * (xt + theta[M+1]*dFt[i-1])

    dRtFt = -1 * miu * delta * np.sign(Ft[1:]-Ft[:T])
    dRtFt = np.reshape(dRtFt,(T,1))
    dRtFtt = miu * X[M:M+T] + miu * delta * np.sign(Ft[1:]-Ft[:T])
    dRtFtt = np.reshape(dRtFtt,(T,1))
    #print dFt[1:].T.shape
    
    A = float(sum(Ret)) / T
    B = float(sum(Ret**2)) / T
    #print dSdA ,dSdB
    #prefix = (repmat((dSdA.subs(a,A)).subs(b,B), T, 1)/T) + np.reshape(((dSdB.subs(a,A)).subs(b,B)*2*Ret/T),(T,1))
    #print prefix.T
    #prefix = repmat(subs(subs(dSdA,a,A),b,B), T, 1)/T + subs(subs(dSdB,a,A),b,B)*2*Ret/T    
    #grad = sum(repmat(prefix', M+2, 1) .* (repmat(dRtFt', M+2, 1) .* dFt(:,2:end) + repmat(dRtFtt', M+2, 1) .* dFt(:,1:T)), 2)
    #grad = np.sum(repmat(prefix.T, M+2, 1) * (repmat(dRtFt.T, M+2, 1) * dFt[1:].T + repmat(dRtFtt.T, M+2, 1) * dFt[:T].T), 1)
    #print grad
    grad = 0
    grad = grad * -1
    #print len(dFt) , T
    #print J , grad
    return J , grad 
Example 25
Project: pytorch-layoutnet   Author: sunset1995   File: pano.py    MIT License 5 votes vote down vote up
def lineFromTwoPoint(pt1, pt2):
    '''
    Generate line segment based on two points on panorama
    pt1, pt2: two points on panorama
    line:
        1~3-th dim: normal of the line
        4-th dim: the projection dimension ID
        5~6-th dim: the u of line segment endpoints in projection plane
    '''
    numLine = pt1.shape[0]
    lines = np.zeros((numLine, 6))
    n = np.cross(pt1, pt2)
    n = n / (matlib.repmat(np.sqrt(np.sum(n ** 2, 1, keepdims=1)), 1, 3) + 1e-9)
    lines[:, 0:3] = n

    areaXY = np.abs(np.sum(n * matlib.repmat([0, 0, 1], numLine, 1), 1, keepdims=True))
    areaYZ = np.abs(np.sum(n * matlib.repmat([1, 0, 0], numLine, 1), 1, keepdims=True))
    areaZX = np.abs(np.sum(n * matlib.repmat([0, 1, 0], numLine, 1), 1, keepdims=True))
    planeIDs = np.argmax(np.hstack([areaXY, areaYZ, areaZX]), axis=1) + 1
    lines[:, 3] = planeIDs

    for i in range(numLine):
        uv = xyz2uvN(np.vstack([pt1[i, :], pt2[i, :]]), lines[i, 3])
        umax = uv[:, 0].max() + np.pi
        umin = uv[:, 0].min() + np.pi
        if umax - umin > np.pi:
            lines[i, 4:6] = np.array([umax, umin]) / 2 / np.pi
        else:
            lines[i, 4:6] = np.array([umin, umax]) / 2 / np.pi

    return lines 
Example 26
Project: Lyssandra   Author: ektormak   File: sparse_coding.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def llc(X, D, knn=5):
    # the sparse coder introduced in
    # "Locality-constrained Linear Coding for Image Classification"

    n_samples = X.shape[1]
    n_atoms = D.shape[1]
    # has the distance of
    # each sample to each atom
    dist = np.zeros((n_samples, n_atoms))
    # calculate the distances
    for i in range(n_samples):
        for j in range(n_atoms):
            dist[i, j] = norm(X[:, i] - D[:, j])

    # has the indices of the atoms
    # that are nearest neighbour to each sample
    knn_idx = np.zeros((n_samples, knn)).astype(int)
    for i in xrange(n_samples):
        knn_idx[i, :] = np.argsort(dist[i, :])[:knn]
    # the sparse coding matrix
    Z = np.zeros((n_atoms, n_samples))
    II = np.eye(knn)
    beta = 1e-4
    b = np.ones(knn)
    for i in xrange(n_samples):
        idx = knn_idx[i, :]
        z = D.T[idx, :] - repmat(X.T[i, :], knn, 1)
        C = np.dot(z, z.T)
        C = C + II * beta * np.trace(C)
        # solve the linear system C*c=b
        c = solve(C, b)
        # enforce the constraint on the sparse codes
        # such that sum(c)=1
        c = c / float(np.sum(c))
        Z[idx, i] = c

    return Z 
Example 27
Project: pybrain2   Author: pybrain2   File: policyiteration.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def collapsedTransitions(Ts, policy):
    """ Collapses a list of transition matrices (one per action) and a list 
        of action probability vectors into a single transition matrix."""
    res = zeros_like(Ts[0])
    dim = len(Ts[0])
    for ai, ap in enumerate(policy.T):
        res += Ts[ai] * repmat(ap, dim, 1).T
    return res 
Example 28
Project: pybrain2   Author: pybrain2   File: leastsquares.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def trueFeatureStats(T, R, fMap, discountFactor, stateProp=1, MAT_LIMIT=1e8):
    """ Gather the statistics needed for LSTD,
    assuming infinite data (true probabilities).
    Option: if stateProp is  < 1, then only a proportion of all 
    states will be seen as starting state for transitions """
    dim = len(fMap)
    numStates = len(T)
    statMatrix = zeros((dim, dim))
    statResidual = zeros(dim)
    ss = list(range(numStates))
    repVersion = False
    
    if stateProp < 1:
        ss = random.sample(ss, int(numStates * stateProp))
    elif dim * numStates**2 < MAT_LIMIT:
        repVersion = True
    
    # two variants, depending on how large we can afford our matrices to become.        
    if repVersion:    
        tmp1 = tile(fMap, (numStates,1,1))
        tmp2 = transpose(tmp1, (2,1,0))
        tmp3 = tmp2 - discountFactor * tmp1            
        tmp4 = tile(T, (dim,1,1))
        tmp4 *= transpose(tmp1, (1,2,0))
        statMatrix = tensordot(tmp3, tmp4, axes=[[0,2], [1,2]]).T
        statResidual = dot(R, dot(fMap, T).T)
    else:
        for sto in ss:
            tmp = fMap - discountFactor * repmat(fMap[:, sto], numStates, 1).T
            tmp2 = fMap * repmat(T[:, sto], dim, 1)
            statMatrix += dot(tmp2, tmp.T)             
            statResidual += R[sto] * dot(fMap, T[:, sto])
    return statMatrix, statResidual 
Example 29
Project: BIS   Author: intelligent-control-lab   File: KinematicModel.py    MIT License 5 votes vote down vote up
def reset(self, dT, goals):
        """This function reset the robot state to initial, and set the goals to given goals. This function is useful when the user need to make sure all the robot are tested under the same goal sequence,
        
        Args:
            dT (float): the seperation between two control output
            goals (ndarray): n*6 array of goal specification. [x y z 0 0 0]
        """

        self.dT = dT
        self.set_goals(goals)

        self.init_x(self.init_state)
        self.x_his = repmat(self.x, 1, 50)
        self.n = np.shape(self.x)[0]
        self.H = matrix(eye(self.n))
        self.kalman_P = matrix(eye(self.n)) * (self.measure_noise**2)
        self.x_est = self.observe(self.x)
        self.m = matrix(zeros((6,1)))
        self.m_his = repmat(self.m, 1, 50)
        self.x_pred = zeros((self.n,1))
        self.trace = repmat(self.get_P(), 1, 100)

        self.goal_achieved = 0

        self.time = 0
        self.last_collision_time = 0
        self.score = dict()
        self.score['collision_cnt'] = 0
        self.score['safety'] = 0
        self.score['nearest_dis'] = 1e9
        self.score['efficiency'] = 0
        self.predictability = 0

        self.get_closest_X(np.vstack([10,10,10,0,0,0])) 
Example 30
Project: DeepESN   Author: lucapedrelli   File: DeepESN.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def computeLayerState(self, input, layer, initialStatesLayer = None, DeepIP = 0):  
        # compute the state of a layer with pre-training if DeepIP == 1                    
        
        state = np.zeros((self.Nr, input.shape[1]))   
        
        if initialStatesLayer is None:
            initialStatesLayer = np.zeros(state[:,0:1].shape)
        
        input = self.Win[layer][:,0:-1].dot(input) + np.expand_dims(self.Win[layer][:,-1],1)   
        
        if DeepIP:
            state_net = np.zeros((self.Nr, input.shape[1]))
            state_net[:,0:1] = input[:,0:1]
            state[:,0:1] = self.lis[layer] * np.tanh(np.multiply(self.Gain[layer], state_net[:,0:1]) + self.Bias[layer])
        else:
            #state[:,0:1] = self.lis[layer] * np.tanh(np.multiply(self.Gain[layer], input[:,0:1]) + self.Bias[layer])        
            state[:,0:1] = (1-self.lis[layer]) * initialStatesLayer + self.lis[layer] * np.tanh( np.multiply(self.Gain[layer], self.W[layer].dot(initialStatesLayer) + input[:,0:1]) + self.Bias[layer])        
 
        for t in range(1,state.shape[1]):
            if DeepIP:
                state_net[:,t:t+1] = self.W[layer].dot(state[:,t-1:t]) + input[:,t:t+1]
                state[:,t:t+1] = (1-self.lis[layer]) * state[:,t-1:t] + self.lis[layer] * np.tanh(np.multiply(self.Gain[layer], state_net[:,t:t+1]) + self.Bias[layer])
                
                eta = self.IPconf.eta
                mu = self.IPconf.mu
                sigma2 = self.IPconf.sigma**2
            
                # IP learning rule
                deltaBias = -eta*((-mu/sigma2)+ np.multiply(state[:,t:t+1], (2*sigma2+1-(state[:,t:t+1]**2)+mu*state[:,t:t+1])/sigma2))
                deltaGain = eta / npm.repmat(self.Gain[layer],1,state_net[:,t:t+1].shape[1]) + deltaBias * state_net[:,t:t+1]
                
                # update gain and bias of activation function
                self.Gain[layer] = self.Gain[layer] + deltaGain
                self.Bias[layer] = self.Bias[layer] + deltaBias
                
            else:
                state[:,t:t+1] = (1-self.lis[layer]) * state[:,t-1:t] + self.lis[layer] * np.tanh( np.multiply(self.Gain[layer], self.W[layer].dot(state[:,t-1:t]) + input[:,t:t+1]) + self.Bias[layer])
                
        return state 
Example 31
Project: hypercl   Author: chrhenning   File: dataset.py    Apache License 2.0 5 votes vote down vote up
def _to_one_hot(self, labels, reverse=False):
        """ Transform a list of labels into a 1-hot encoding.

        Args:
            labels: A list of class labels.
            reverse: If true, then one-hot encoded samples are transformed back
                to categorical labels.

        Returns:
            The 1-hot encoded labels.
        """
        if not self.classification:
            raise RuntimeError('This method can only be called for ' +
                                   'classification datasets.')

        # Initialize encoder.
        if self._one_hot_encoder is None:
            self._one_hot_encoder = OneHotEncoder( \
                categories=[range(self.num_classes)])
            num_time_steps = 1
            if self.sequence:
                num_time_steps = labels.shape[1] // np.prod(self.out_shape)
            self._one_hot_encoder.fit(npm.repmat(
                    np.arange(self.num_classes), num_time_steps, 1).T)

        if reverse:
            # Unfortunately, there is no inverse function in the OneHotEncoder
            # class. Therefore, we take the one-hot-encoded "labels" samples
            # and take the indices of all 1 entries. Note, that these indices
            # are returned as tuples, where the second column contains the
            # original column indices. These column indices from "labels"
            # mudolo the number of classes results in the original labels.
            return np.reshape(np.argwhere(labels)[:,1] % self.num_classes, 
                              (labels.shape[0], -1))
        else:
            return self._one_hot_encoder.transform(labels).toarray() 
Example 32
Project: end-to-end-multiview-lipreading   Author: lzuwei   File: preprocessing.py    Apache License 2.0 5 votes vote down vote up
def deltas(x, w=9):
    """
    Calculate the deltas (derivatives) of a sequence
    Use a W-point window (W odd, default 9) to calculate deltas using a
    simple linear slope.  This mirrors the delta calculation performed
    in feacalc etc.  Each row of X is filtered separately.

    Notes:
    x is your data matrix where each feature corresponds to a row (so you may have to transpose the data you
    pass as an argument) and then transpose the output of the function).

    :param x: x is your data matrix where each feature corresponds to a row.
        (so you may have to transpose the data you pass as an argument)
        and then transpose the output of the function)
    :param w: window size, defaults to 9
    :return: derivatives of a sequence
    """
    # compute the shape of the input
    num_row, num_cols = x.shape

    # define window shape
    hlen = w // 2  # floor integer divide
    w = 2 * hlen + 1  # odd number
    win = np.arange(hlen, -hlen - 1, -1, dtype=np.float32)

    # pad the data by repeating the first and last columns
    a = matlab.repmat(x[:, 1], 1, hlen).reshape((num_row, hlen), order='F')
    b = matlab.repmat(x[:, -1], 1, hlen).reshape((num_row, hlen), order='F')
    xx = np.concatenate((a, x, b), axis=1)

    # apply the delta filter, see matlab 1D filter
    d = signal.lfilter(win, 1, xx, 1)

    # trim the edges
    return d[:, hlen*2: hlen*2 + num_cols] 
Example 33
Project: pyh2nmf   Author: beedotkiran   File: pyh2nmf.py    GNU General Public License v3.0 5 votes vote down vote up
def fastsvds(M,r): 
    """
    "Fast" but less accurate SVD by computing the SVD of MM^T or M^TM 
    ***IF*** one of the dimensions of M is much smaller than the other. 
    Note. This is numerically less stable, but useful for large hyperspectral 
    images. 

    """

    m,n = M.shape 
    rationmn = 10 # Parameter, should be >= 1

    if m < rationmn*n: 
        MMt = np.dot(M,M.T)
        u,s,v = svds(MMt,r)
        s = np.diag(s)
        v = np.dot(M.T, u) 
        v = np.multiply(v,repmat( (sum(v**2)+1e-16)**(-0.5),n,1)) 
        s = np.sqrt(s) 
    elif n < rationmn*m:
        MtM = np.dot(M.T,M)
        u,s,v = svds(MtM,r) 
        s = np.diag(s)
        u = np.dot(M,v) 
        u = np.multiply(u,repmat( (sum(u**2)+1e-16)**(-0.5),m,1))
        s = np.sqrt(s) 
    else:
        u,s,v = svds(M,r) 
        s = np.diag(s)
    return (u,s,v) 
Example 34
Project: pyh2nmf   Author: beedotkiran   File: pyh2nmf.py    GNU General Public License v3.0 5 votes vote down vote up
def anls_entry_rank2_binary(left, right):
    """ Case where one entry in each column of H has to be equal to zero """
    n = right.shape[0]

    solve_either = np.zeros(shape=(n, 2))
    solve_either[:, 0] = np.maximum(0, np.divide(right[:, 0], left[0,0])) 
    solve_either[:, 1] = np.maximum(0, np.divide(right[:, 1], left[1,1])) 

    cosine_either = np.multiply(solve_either, repmat([np.sqrt(left[0,0]), np.sqrt(left[1,1])],n,1) )

    choose_first = (cosine_either[:, 0] >= cosine_either[:, 1])
    solve_either[choose_first, 1] = 0
    solve_either[~choose_first, 0] = 0
    return solve_either 
Example 35
Project: brainpipe   Author: EtienneCmb   File: _filtering.py    GNU General Public License v3.0 5 votes vote down vote up
def fir1(N, Wn):
    # Variables definition :
    nbands = len(Wn) + 1
    ff = np.array((0, Wn[0], Wn[0], Wn[1], Wn[1], 1))

    f0 = np.mean(ff[2:4])
    L = N + 1

    mags = np.array(range(nbands)) % 2
    aa = np.ravel(repmat(mags, 2, 1), order='F')

    # Get filter coefficients :
    h = firls(L - 1, ff, aa)

    # Apply a window to coefficients :
    Wind = np.hamming(L)
    b = np.matrix(h.T * Wind)
    c = np.matrix(np.exp(-1j * 2 * np.pi * (f0 / 2) * np.array(range(L))))
    b = b / abs(c * b.T)

    return np.ndarray.squeeze(np.array(b)), 1


####################################################################
# - Filt the signal :
#################################################################### 
Example 36
Project: pyebm   Author: 88vikram   File: core_utilities.py    GNU General Public License v3.0 5 votes vote down vote up
def patient_staging(pi0,event_centers,likeli_post,likeli_pre,type_staging):
    
    L_yes=np.divide(likeli_post,likeli_post+likeli_pre+1e-100)
    L_no = 1 - L_yes
    event_centers_pad=np.insert(event_centers,0,0)
    event_centers_pad=np.append(event_centers_pad,1)
    pk_s=np.diff(event_centers_pad)
    pk_s[:]=1;
    
    m=L_yes.shape
    prob_stage = np.zeros((m[0],m[1]+1))
    p_no_perm = L_no[:,pi0];
    p_yes_perm = L_yes[:,pi0];
    for j in range(m[1]+1):
        prob_stage[:,j]=pk_s[j]*np.multiply(np.nanprod(p_yes_perm[:,:j],axis=1),np.nanprod(p_no_perm[:,j:],axis=1))

    all_stages_rep2=matlib.repmat(event_centers_pad[:-1],m[0],1)
    
    if type_staging[0]=='exp':
        subj_stages = np.zeros(prob_stage.shape[0])
        for i in range(prob_stage.shape[0]):
            idx_nan=np.isnan(p_yes_perm[i,:])
            pr=prob_stage[i,1:]
            ev = event_centers_pad[1:-1]
            subj_stages[i]=np.mean(np.multiply(np.append(prob_stage[i,0],pr[~idx_nan]),np.append(event_centers_pad[0],ev[~idx_nan])))/np.mean(np.append(prob_stage[i,0],pr[~idx_nan]))
    elif type_staging[0]=='ml':
        subj_stages=np.argmax(prob_stage,axis=1)
    
    return subj_stages 
Example 37
Project: pylot   Author: erdos-project   File: utils.py    Apache License 2.0 5 votes vote down vote up
def depth_to_local_point_cloud(depth_frame, width, height, fov, max_depth=0.9):
    """
    Convert a CARLA-encoded depth-map to a 2D array containing
    the 3D position (relative to the camera) of each pixel.
    "max_depth" is used to omit the points that are far enough.

    Args:
        depth_frame: the normalized depth frame
        width: frame width
        height: frame height
        fov: camera field of view
    """
    far = 1000.0  # max depth in meters.
    normalized_depth = depth_frame
    intrinsic_mat = create_intrinsic_matrix(width, height, fov)
    # 2d pixel coordinates
    pixel_length = width * height
    u_coord = repmat(np.r_[0:width:1], height, 1).reshape(pixel_length)
    v_coord = repmat(np.c_[0:height:1], 1, width).reshape(pixel_length)
    normalized_depth = np.reshape(normalized_depth, pixel_length)

    # Search for pixels where the depth is greater than max_depth to
    # delete them
    max_depth_indexes = np.where(normalized_depth > max_depth)
    normalized_depth = np.delete(normalized_depth, max_depth_indexes)
    u_coord = np.delete(u_coord, max_depth_indexes)
    v_coord = np.delete(v_coord, max_depth_indexes)

    # p2d = [u,v,1]
    p2d = np.array([u_coord, v_coord, np.ones_like(u_coord)])

    # P = [X,Y,Z]
    p3d = np.dot(inv(intrinsic_mat), p2d)
    p3d *= normalized_depth * far

    # [[X1,Y1,Z1],[X2,Y2,Z2], ... [Xn,Yn,Zn]]
    # Return the points as location,
    locations = [
        Location(x, y, z) for x, y, z in np.asarray(np.transpose(p3d))
    ]
    return locations 
Example 38
Project: claude   Author: Rassibassi   File: SSFstatic.py    MIT License 5 votes vote down vote up
def randomizeSteps(stepSizes, spanLength, nSpans, sigma=0.01):
	stepSizes = matlib.repmat(stepSizes, nSpans, 1)
	stepSizes = stepSizes + np.random.normal(0, sigma*stepSizes[0,0], size=stepSizes.shape)
	stepSizes[:,-1] = stepSizes[:,-1] - (np.sum( stepSizes, axis=1 ) - spanLength)
	return stepSizes 
Example 39
Project: Neural_Network_VAD   Author: uarif1   File: fxpefac.py    GNU General Public License v3.0 5 votes vote down vote up
def smooth(x, n):
    # snx = x.shape[1]
    nf = x.shape[0]
    c = np.cumsum(x, 1)
    y = np.hstack((
        c[:, np.arange(0, n+1, 2)]/repmat(np.arange(1, n+1, 2), nf, 1),
        (c[:, n:]-c[:, :-n])/n,
        (repmat(c[:, -1], 1, int(np.floor(n/2)))-c[:, np.arange(-1-n+2, -1, 2)])
        / repmat(np.arange(n-2, 0, -2), nf, 1)))
    return y 
Example 40
Project: Neural_Network_VAD   Author: uarif1   File: fxpefac.py    GNU General Public License v3.0 5 votes vote down vote up
def timesm(x, n):

    if not np.mod(n, 2):
        n += 1
    nx = x.shape[1]
    # nf = x.shape[0]
    c = np.cumsum(x, 0)
    n = int(n)
    mid = int(np.round(n/2))
    y = np.vstack((c[mid:n, :]/repmat(np.arange(mid+1, n+1)[np.newaxis].T, 1, nx),
                   (c[n:, :]-c[: -n, :])/n,
                   (repmat(c[-1, :], mid, 1) - c[-1-n+1: -1-mid, :])
                   / repmat(np.arange(n-1, mid, -1)[np.newaxis].T, 1, nx)))

    return y 
Example 41
Project: Neural_Network_VAD   Author: uarif1   File: rdct.py    GNU General Public License v3.0 5 votes vote down vote up
def rdct(x, n=None, a=None, b=1):
    '''
    RDCT     Discrete cosine transform of real data Y=(X,N,A,B)
    Data is truncated/padded to length N.

    This routine is equivalent to multiplying by the matrix

    rdct(eye(n)) = diag([sqrt(2)*B/A repmat(2/A,1,n-1)]) * cos((0:n-1)'*(0.5:n)*pi/n)

    Default values of the scaling factors are A=sqrt(2N) and B=1 which
    results in an orthogonal matrix. Other common values are A=1 or N and/or B=1 or sqrt(2).
    If b~=1 then the columns are no longer orthogonal.

    '''

    m, k = x.shape
    if n is None:
        n = m
    if a is None:
        a = np.sqrt(2*n)
    if n > m:
        x = np.hstack((x, np.zeros(n-m, k)))
    elif n < m:
        x = np.vstack((x[:n+1, :], x[m:, :]))

    x = np.vstack((x[np.arange(0, n, 2), :],
                   x[np.arange(2*np.fix(n/2)-1, 0, -2).astype(int), :]))
    z = np.concatenate(([np.sqrt(2)],
                        2*np.exp((-0.5*1j*np.pi/n)*np.arange(1, n))))[np.newaxis].T

    y = np.real(fft(x, axis=0)*repmat(z, 1, k))/a
    y[0, :] = y[0, :]*b
    return y 
Example 42
Project: Neural_Network_VAD   Author: uarif1   File: enframe.py    GNU General Public License v3.0 5 votes vote down vote up
def enframe(s, win, hop):
    '''
    ENFRAME split signal up into (overlapping) frames: one per row.

    Parameters
    ----------
    s : np.array
        speech signal

    win : np.array
        window

    hop : int
        frame increment in samples

    Returns
    -------
    tuple

        f :   enframed data - one frame per row

        t :   fractional time in samples at the centre of each frame
              with the first sample being 1.

    '''
    nx = len(s)
    lw = len(win)  # lw
    nli = nx-lw+hop
    nf = int(max(np.fix(nli/hop), 0))  # number of full frames
    f = np.zeros((nf, lw))
    indf = hop*np.arange(nf)
    inds = np.arange(lw).astype(int)

    f = s[repmat(indf, lw, 1).T.astype(int) + repmat(inds, nf, 1)]
    f = f*repmat(win, nf, 1)
    t0 = (1+lw)/2
    t = t0+hop*np.array(range(nf))
    return f, t 
Example 43
Project: renewable-timeseries   Author: tum-ens   File: util.py    GNU General Public License v3.0 5 votes vote down vote up
def resizem(A_in, row_new, col_new):
    """
    This function resizes regular data grid, by copying and pasting parts of the original array.

    :param A_in: Input matrix.
    :type A_in: numpy array
    :param row_new: New number of rows.
    :type row_new: integer
    :param col_new: New number of columns.
    :type col_new: integer

    :return A_out: Resized matrix.
    :rtype: numpy array
    """
    row_rep = row_new // np.shape(A_in)[0]
    col_rep = col_new // np.shape(A_in)[1]
    A_inf = A_in.flatten(order="F")[np.newaxis]
    A_out = reshape(
        repmat(
            reshape(reshape(repmat((A_in.flatten(order="F")[np.newaxis]), row_rep, 1), (row_new, -1), order="F").T, (-1, 1), order="F"), 1, col_rep
        ).T,
        (col_new, row_new),
        order="F",
    ).T

    return A_out 
Example 44
Project: Structured3D   Author: bertjiazheng   File: panorama.py    MIT License 5 votes vote down vote up
def lineFromTwoPoint(pt1, pt2):
    """
    Generate line segment based on two points on panorama
    pt1, pt2: two points on panorama
    line:
        1~3-th dim: normal of the line
        4-th dim: the projection dimension ID
        5~6-th dim: the u of line segment endpoints in projection plane
    """
    numLine = pt1.shape[0]
    lines = np.zeros((numLine, 6))
    n = np.cross(pt1, pt2)
    n = n / (matlib.repmat(np.sqrt(np.sum(n ** 2, 1, keepdims=True)), 1, 3) + 1e-9)
    lines[:, 0:3] = n

    areaXY = np.abs(np.sum(n * matlib.repmat([0, 0, 1], numLine, 1), 1, keepdims=True))
    areaYZ = np.abs(np.sum(n * matlib.repmat([1, 0, 0], numLine, 1), 1, keepdims=True))
    areaZX = np.abs(np.sum(n * matlib.repmat([0, 1, 0], numLine, 1), 1, keepdims=True))
    planeIDs = np.argmax(np.hstack([areaXY, areaYZ, areaZX]), axis=1) + 1
    lines[:, 3] = planeIDs

    for i in range(numLine):
        uv = xyz2uvN(np.vstack([pt1[i, :], pt2[i, :]]), lines[i, 3])
        umax = uv[:, 0].max() + np.pi
        umin = uv[:, 0].min() + np.pi
        if umax - umin > np.pi:
            lines[i, 4:6] = np.array([umax, umin]) / 2 / np.pi
        else:
            lines[i, 4:6] = np.array([umin, umax]) / 2 / np.pi

    return lines 
Example 45
Project: dasp   Author: agb32   File: photosensor.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def ccd_FPN_models(self, sensor_signal_rows, sensor_signal_columns, \
                        noisetype, noisedistribution, noise_params):
        noiseout=[]
        if noisedistribution=='AR-ElGamal':
            if operator.eq(noisetype,'pixel')==0:
                x2=numpy.random.randn(sensor_signal_rows,sensor_signal_columns)
                noiseout=signal.lfilter([1,0],noise_params,x2)
            elif operator.eq(noisetype, 'column')==0:
                x=signal.lfilter([1,0],noise_params,numpy.random.randn(1,sensor_signal_columns))
                noiseout=repmat(x,sensor_signal_rows,1)
        elif noisedistribution=='Janesick-Gaussian':
            if operator.eq(noisetype, 'pixel')==0:
                noiseout =numpy.random.randn(sensor_signal_rows,sensor_signal_columns)
            elif operator.eq(noisetype, 'column')==0:
                x=numpy.random.randn(1,sensor_signal_columns)
                noiseout = repmat(x,sensor_signal_rows,1)
            elif operator.eq(noisetype, 'row')==0:
                x=numpy.random.randn(sensor_signal_rows,1)
                noiseout = repmat(x,1,sensor_signal_columns)
        elif noisedistribution=='Wald':
            if operator.eq(noisetype,'pixel')==0:
                noiseout = tool_rand_distributions_generator\
                ('wald',noise_params[0],[sensor_signal_rows, sensor_signal_columns])\
                +numpy.random.rand(sensor_signal_rows, sensor_signal_columns)
            elif operator.eq(noisetype, 'column')==0:
                x = tool_rand_distributions_generator\
                ('lognorm',[noise_params[0],noise_params[1]],[1, sensor_signal_columns])
                noiseout = repmat(x,sensor_signal_rows,1)
        return noiseout
        
        #Define light FPN 
Example 46
Project: esmlab   Author: NCAR   File: conftest.py    Apache License 2.0 4 votes vote down vote up
def xr_ds_ex(decode_times=True, nyrs=3, var_const=True):
    """return an example xarray.Dataset object, useful for testing functions"""

    # set up values for Dataset, 4 yrs of analytic monthly values
    days_1yr = np.array([31.0, 28.0, 31.0, 30.0, 31.0, 30.0, 31.0, 31.0, 30.0, 31.0, 30.0, 31.0])
    time_edges = np.insert(np.cumsum(npm.repmat(days_1yr, nyrs, 1)), 0, 0)
    time_bounds_vals = np.stack((time_edges[:-1], time_edges[1:]), axis=1)
    time_vals = np.mean(time_bounds_vals, axis=1)
    time_vals_yr = time_vals / 365.0
    if var_const:
        var_vals = np.ones_like(time_vals_yr)
    else:
        var_vals = np.sin(np.pi * time_vals_yr) * np.exp(-0.1 * time_vals_yr)

    time_units = 'days since 0001-01-01'
    calendar = 'noleap'

    if decode_times:
        time_vals = cftime.num2date(time_vals, time_units, calendar)
        time_bounds_vals = cftime.num2date(time_bounds_vals, time_units, calendar)

    # create Dataset, including time_bounds
    time_var = xr.DataArray(
        time_vals,
        name='time',
        dims='time',
        coords={'time': time_vals},
        attrs={'bounds': 'time_bounds'},
    )
    if not decode_times:
        time_var.attrs['units'] = time_units
        time_var.attrs['calendar'] = calendar
    time_bounds = xr.DataArray(
        time_bounds_vals, name='time_bounds', dims=('time', 'd2'), coords={'time': time_var}
    )
    var = xr.DataArray(var_vals, name='var_ex', dims='time', coords={'time': time_var})
    ds = var.to_dataset()
    ds = xr.merge((ds, time_bounds))

    if decode_times:
        ds.time.encoding['units'] = time_units
        ds.time.encoding['calendar'] = calendar

    return ds 
Example 47
Project: Conditional-Affordance-Learning   Author: niladridutt   File: image_converter.py    MIT License 4 votes vote down vote up
def depth_to_local_point_cloud(image, color=None, max_depth=0.9):
    """
    Convert an image containing CARLA encoded depth-map to a 2D array containing
    the 3D position (relative to the camera) of each pixel and its corresponding
    RGB color of an array.
    "max_depth" is used to omit the points that are far enough.
    """
    far = 1000.0  # max depth in meters.
    normalized_depth = depth_to_array(image)

    # (Intrinsic) K Matrix
    k = numpy.identity(3)
    k[0, 2] = image.width / 2.0
    k[1, 2] = image.height / 2.0
    k[0, 0] = k[1, 1] = image.width / \
        (2.0 * math.tan(image.fov * math.pi / 360.0))

    # 2d pixel coordinates
    pixel_length = image.width * image.height
    u_coord = repmat(numpy.r_[image.width-1:-1:-1],
                     image.height, 1).reshape(pixel_length)
    v_coord = repmat(numpy.c_[image.height-1:-1:-1],
                     1, image.width).reshape(pixel_length)
    if color is not None:
        color = color.reshape(pixel_length, 3)
    normalized_depth = numpy.reshape(normalized_depth, pixel_length)

    # Search for pixels where the depth is greater than max_depth to
    # delete them
    max_depth_indexes = numpy.where(normalized_depth > max_depth)
    normalized_depth = numpy.delete(normalized_depth, max_depth_indexes)
    u_coord = numpy.delete(u_coord, max_depth_indexes)
    v_coord = numpy.delete(v_coord, max_depth_indexes)
    if color is not None:
        color = numpy.delete(color, max_depth_indexes, axis=0)

    # pd2 = [u,v,1]
    p2d = numpy.array([u_coord, v_coord, numpy.ones_like(u_coord)])

    # P = [X,Y,Z]
    p3d = numpy.dot(numpy.linalg.inv(k), p2d)
    p3d *= normalized_depth * far

    # Formating the output to:
    # [[X1,Y1,Z1,R1,G1,B1],[X2,Y2,Z2,R2,G2,B2], ... [Xn,Yn,Zn,Rn,Gn,Bn]]
    if color is not None:
        # numpy.concatenate((numpy.transpose(p3d), color), axis=1)
        return sensor.PointCloud(
            image.frame_number,
            numpy.transpose(p3d),
            color_array=color)
    # [[X1,Y1,Z1],[X2,Y2,Z2], ... [Xn,Yn,Zn]]
    return sensor.PointCloud(image.frame_number, numpy.transpose(p3d)) 
Example 48
Project: safe-exploration   Author: befelix   File: sampling_models.py    MIT License 4 votes vote down vote up
def sample_n_step(self, x0, K, k, n=1, n_samples=1000):
        """ Sample from the distribution of the GP system of n steps



        Args:
            x0 (numpy.ndarray[float]): vector of shape n_s x 1 representing the
                initial (deterministic) state of the system
            K (numpy.ndarray[float]): array of shape n x n_u x n_s; the state feedback
                linear controller per time step.
            k (numpy.ndarray[float]) array of shape n x n_u; the feed-forward
                control modifications per time step.
            n (int, optional): number of time steps to propagate forward
            n_samples (int, optional): number of samples to propagate through
                the system.
        Returns:
            S (numpy.ndarray[float]): n_samples x n_s samples from the pdf of
                the n-step ahead propagation of the system
            S_all (numpy.ndarray[float]): n x n_samples x n_s samples for all
                intermediate states of the system in the n-step ahead propagation
                of the system.

        """

        assert n > 0, "The time horizon n for the multi-step sampling must be positive!"
        assert np.shape(K) == (
            n, self.n_u, self.n_s), "Required shape of K is ({},{},{})".format(n,
                                                                               self.n_u,
                                                                               self.n_s)
        assert np.shape(k) == (n, self.n_u), "Required shape of k is ({},{})".format(n,
                                                                                     self.n_u)

        S_all = np.empty((n, n_samples, self.n_s))  # The samples for each time step

        # Get samples from the predictive distribution for the first time step
        u0 = np.dot(K[0], x0) + k[0, :, None]
        inp0 = np.vstack((x0, u0)).T

        S = self.GP.sample_from_gp(inp0, size=n_samples).reshape((n_samples, self.n_s))
        S_all[0] = S

        # Sample from other time steps (if n > 1).
        for i in range(1, n):
            U = np.dot(S, K[i].T) + repmat(k[i, :, None], n_samples, 1)
            inp = np.hstack((S, U))
            S = self.GP.sample_from_gp(inp, size=1).reshape((n_samples, self.n_s))
            S_all[i] = S

        return S.squeeze(), S_all 
Example 49
Project: masif   Author: LPDI-EPFL   File: compute_normal.py    Apache License 2.0 4 votes vote down vote up
def compute_normal(vertex, face):

    """
    compute_normal - compute the normal of a triangulation
    vertex: 3xn matrix of vertices
    face: 3xm matrix of face indices.
    
      normal,normalf = compute_normal(vertex,face)
    
      normal(i,:) is the normal at vertex i.
      normalf(j,:) is the normal at face j.
    
    Copyright (c) 2004 Gabriel Peyr
    Converted to Python by Pablo Gainza LPDI EPFL 2017  
    """

    vertex = vertex.T
    face = face.T
    nface = np.size(face, 1)
    nvert = np.size(vertex, 1)
    normal = np.zeros((3, nvert))
    # unit normals to the faces
    normalf = crossp(
        vertex[:, face[1, :]] - vertex[:, face[0, :]],
        vertex[:, face[2, :]] - vertex[:, face[0, :]],
    )
    sum_squares = np.sum(normalf ** 2, 0)
    d = np.sqrt(sum_squares)
    d[d < eps] = 1
    normalf = normalf / repmat(d, 3, 1)
    # unit normal to the vertex
    normal = np.zeros((3, nvert))
    for i in np.arange(0, nface):
        f = face[:, i]
        for j in np.arange(3):
            normal[:, f[j]] = normal[:, f[j]] + normalf[:, i]

    # normalize
    d = np.sqrt(np.sum(normal ** 2, 0))
    d[d < eps] = 1
    normal = normal / repmat(d, 3, 1)
    # enforce that the normal are outward
    vertex_means = np.mean(vertex, 0)
    v = vertex - repmat(vertex_means, 3, 1)
    s = np.sum(np.multiply(v, normal), 1)
    if np.sum(s > 0) < np.sum(s < 0):
        # flip
        normal = -normal
        normalf = -normalf
    return normal.T 
Example 50
Project: coiltraine   Author: felipecode   File: image_converter.py    MIT License 4 votes vote down vote up
def depth_to_local_point_cloud(image, color=None, max_depth=0.9):
    """
    Convert an image containing CARLA encoded depth-map to a 2D array containing
    the 3D position (relative to the camera) of each pixel and its corresponding
    RGB color of an array.
    "max_depth" is used to omit the points that are far enough.
    """
    far = 1000.0  # max depth in meters.
    normalized_depth = depth_to_array(image)

    # (Intrinsic) K Matrix
    k = numpy.identity(3)
    k[0, 2] = image.width / 2.0
    k[1, 2] = image.height / 2.0
    k[0, 0] = k[1, 1] = image.width / \
        (2.0 * math.tan(image.fov * math.pi / 360.0))

    # 2d pixel coordinates
    pixel_length = image.width * image.height
    u_coord = repmat(numpy.r_[image.width-1:-1:-1],
                     image.height, 1).reshape(pixel_length)
    v_coord = repmat(numpy.c_[image.height-1:-1:-1],
                     1, image.width).reshape(pixel_length)
    if color is not None:
        color = color.reshape(pixel_length, 3)
    normalized_depth = numpy.reshape(normalized_depth, pixel_length)

    # Search for pixels where the depth is greater than max_depth to
    # delete them
    max_depth_indexes = numpy.where(normalized_depth > max_depth)
    normalized_depth = numpy.delete(normalized_depth, max_depth_indexes)
    u_coord = numpy.delete(u_coord, max_depth_indexes)
    v_coord = numpy.delete(v_coord, max_depth_indexes)
    if color is not None:
        color = numpy.delete(color, max_depth_indexes, axis=0)

    # pd2 = [u,v,1]
    p2d = numpy.array([u_coord, v_coord, numpy.ones_like(u_coord)])

    # P = [X,Y,Z]
    p3d = numpy.dot(numpy.linalg.inv(k), p2d)
    p3d *= normalized_depth * far

    # Formating the output to:
    # [[X1,Y1,Z1,R1,G1,B1],[X2,Y2,Z2,R2,G2,B2], ... [Xn,Yn,Zn,Rn,Gn,Bn]]
    if color is not None:
        # numpy.concatenate((numpy.transpose(p3d), color), axis=1)
        return sensor.PointCloud(
            image.frame_number,
            numpy.transpose(p3d),
            color_array=color)
    # [[X1,Y1,Z1],[X2,Y2,Z2], ... [Xn,Yn,Zn]]
    return sensor.PointCloud(image.frame_number, numpy.transpose(p3d)) 
Example 51
Project: pywim2   Author: lavaner   File: channel.py    GNU General Public License v3.0 4 votes vote down vote up
def filter(self, input, delay=0):
        """Filter the input signal with specified model

        Parameters
        ----------
        input : 2-dimensional array of int Shape of 
            tx antenna element x number of samples
        
        See Also 
        --------

        Notes
        -----

        Examples
        --------
        """

        coef = self.gen_coef()
        
        num_tx_ele, num_rx_ele, num_path, num_sample = coef.shape
        
        output = np.zeros((num_rx_ele, num_sample))

        delay_buffer = np.zeros((num_path, num_sample+np.max(delay)))
        for s in range(num_rx_ele):
            cluster_buffer = np.zeros((num_tx_ele, num_sample))

            for u in range(num_tx_ele):
                input_temp = matlib.repmat(input[u,:], num_path, 1)

                coef_matrix = coef[u,s,:,:]
                
                # multiple coef
                output_temp = input_temp*coef_matrix
      
                # delay
                for p in range(num_path):
                    delay_buffer[p, 
                                 delay[p]:delay[p]+num_sample] = output_temp[p, :]        

                cluster_buffer[u, :] = delay_buffer[:,:num_sample].sum(0) 

            # multipath combine
            output[s, :] = cluster_buffer.sum(0)

        return output 
Example 52
Project: pyh2nmf   Author: beedotkiran   File: pyh2nmf.py    GNU General Public License v3.0 4 votes vote down vote up
def splitclust(M,algo=1): 
    """
    Given a matrix M, split its columns into two subsets

    See Section 3 in 
    Gillis, Kuang, Park, `Hierarchical Clustering of Hyperspectral Images 
    using Rank-Two Nonnegative Matrix Factorization', arXiv. 
    ****** Input ******
     M     : m-by-n data matrix (or a H-by-L-by-m tensor) 
     algo  : algorithm used to split the clusters
             1. rank-two NMF (default)
             2. k-means
             3. spherical k-means
    ****** Output ******
      K    : two clusters 
      U    : corresponding centroids
      s    : first singular value of M
    """
    
    if algo == 1:  # rank-2 NMF
        U,V,s = rank2nmf(M)
        # Normalize columns of V to sum to one
        V = np.multiply(V,repmat( (sum(V)+1e-16)**(-1), 2,1)) 
        x = V[0,:].T 
        # Compute treshold to split cluster 
        threshold,_,_ = fquad(x) 
        K = {} #children dictionary
        K[1] = np.where(x >= threshold) 
        K[2] = np.where(x < threshold) 
        
    elif algo == 2: # k-means
        [u,s,v] = fastsvds(M,2) # Initialization: SVD+SPA
        Kf = FastSepNMF(s*v.T,2,0)
        U0 = u*s*v[Kf,:].T 

        IDX,U = KMeans(M.T, 2, 'EmptyAction','singleton','Start',U0.T)
        U = U.T 
        K = {} #children dictionary
        K[1] = np.where(IDX==1) 
        K[2] = np.where(IDX==2)
        s = s[1]
        
    elif algo == 3: # shperical k-means
        u,s,v = fastsvds(M,2) # Initialization: SVD+SPA 
        Kf = FastSepNMF(s*v.T,2,0)
        U0 = u*s*v[Kf,:].T 
        
        IDX,U = spkmeans(M, U0) 
        # or (?)
        #[IDX,U] = kmeans(M', 2, 'EmptyAction','singleton','Start',U0','Distance','cosine'): 
        K[1] = np.where(IDX==1) 
        K[2] = np.where(IDX==2) 
        s = s[1]
    
    return (K,U,s) 
Example 53
Project: pyh2nmf   Author: beedotkiran   File: pyh2nmf.py    GNU General Public License v3.0 4 votes vote down vote up
def spkmeans(X, init):
        
    """
    Perform spherical k-means clustering.
    X: d x n data matrix
    init: k (1 x 1) or label (1 x n, 1<=label(i)<=k) or center (d x k)
    Reference: Clustering on the Unit Hypersphere using Von Mises-Fisher Distributions.
    by A. Banerjee, I. Dhillon, J. Ghosh and S. Sra.
    Written by Michael Chen ([email protected]).
    Based on matlab version @ 
    http://www.mathworks.com/matlabcentral/fileexchange/28902-spherical-k-means/content/spkmeans.m 
    (and slightly modifed to run on previous verions of Matlab)
    initialization
    """
    d,n = X.shape

    if n <= init:
        label = range(1,init) 
        m = X 
        energy = 0
    else:
        # Normalize the columns of X
        X = np.dot(X, repmat( (sum(X**2)+1e-16)**(-0.5),d,1)) 

        if len(init) == 1:
            idx = randsample(n,init)
            m = X[:,idx]
            [ul,label] = np.maximum(np.dot(m.T,X),[],1)
        elif init.shape[0] == 1 and init.shape[1] == n:
            label = init
        elif init.shape[0] == d:
            m = np.multiply(init, repmat( (sum(init**2)+1e-16)**(-0.5),d,1))
            ul,label = np.maximum(np.dot(m.T,X),[],1)
        else:
            error('ERROR: init is not valid.')
		
		## main algorithm: final version 
        last = 0
        while (label != last).any():
            u,pipi,label = np.unique(label)   # remove empty clusters
            k = len(u)
            E = sparse.coo_matrix(range(n),label,1,n,k,n)
            m = np.dot(X,E) 
            m = np.dot(m, repmat( (sum(m**2)+1e-16)**(-0.5),d,1)) 
            last = label
            val = np.maximum(np.dot(m.T*X),[],1)
            label = np.argmax(np.dot(m.T*X),[],1)
        ul,ul,label = np.unique(label)   # remove empty clusters
        energy = np.sum(val)
    return (label, m, energy) 
Example 54
Project: CARLA-Segmentation-dataset   Author: hmtai6   File: image_converter.py    GNU General Public License v3.0 4 votes vote down vote up
def depth_to_local_point_cloud(image, color=None, max_depth=0.9):
    """
    Convert an image containing CARLA encoded depth-map to a 2D array containing
    the 3D position (relative to the camera) of each pixel and its corresponding
    RGB color of an array.
    "max_depth" is used to omit the points that are far enough.
    """
    far = 1000.0  # max depth in meters.
    normalized_depth = depth_to_array(image)

    # (Intrinsic) K Matrix
    k = numpy.identity(3)
    k[0, 2] = image.width / 2.0
    k[1, 2] = image.height / 2.0
    k[0, 0] = k[1, 1] = image.width / \
        (2.0 * math.tan(image.fov * math.pi / 360.0))

    # 2d pixel coordinates
    pixel_length = image.width * image.height
    u_coord = repmat(numpy.r_[image.width-1:-1:-1],
                     image.height, 1).reshape(pixel_length)
    v_coord = repmat(numpy.c_[image.height-1:-1:-1],
                     1, image.width).reshape(pixel_length)
    if color is not None:
        color = color.reshape(pixel_length, 3)
    normalized_depth = numpy.reshape(normalized_depth, pixel_length)

    # Search for pixels where the depth is greater than max_depth to
    # delete them
    max_depth_indexes = numpy.where(normalized_depth > max_depth)
    normalized_depth = numpy.delete(normalized_depth, max_depth_indexes)
    u_coord = numpy.delete(u_coord, max_depth_indexes)
    v_coord = numpy.delete(v_coord, max_depth_indexes)
    if color is not None:
        color = numpy.delete(color, max_depth_indexes, axis=0)

    # pd2 = [u,v,1]
    p2d = numpy.array([u_coord, v_coord, numpy.ones_like(u_coord)])

    # P = [X,Y,Z]
    p3d = numpy.dot(numpy.linalg.inv(k), p2d)
    p3d *= normalized_depth * far

    # Formating the output to:
    # [[X1,Y1,Z1,R1,G1,B1],[X2,Y2,Z2,R2,G2,B2], ... [Xn,Yn,Zn,Rn,Gn,Bn]]
    if color is not None:
        # numpy.concatenate((numpy.transpose(p3d), color), axis=1)
        return sensor.PointCloud(
            image.frame_number,
            numpy.transpose(p3d),
            color_array=color)
    # [[X1,Y1,Z1],[X2,Y2,Z2], ... [Xn,Yn,Zn]]
    return sensor.PointCloud(image.frame_number, numpy.transpose(p3d)) 
Example 55
Project: bdpy   Author: KamitaniLab   File: preprocessor.py    MIT License 4 votes vote down vote up
def proc(self, x, ind, opt):
        std = opt['std']                      # Bool
        maxmin = opt['maxmin']                # Bool
        dim = opt['dimension']                # int
        n_iter = opt['n_iter']                # int
        std_threshold = opt['std_threshold']  # float
        max_value = opt['max_value']          # float
        min_value = opt['min_value']          # float

        # TODO: add remove operation

        dim = dim - 1

        y = copy.deepcopy(x)

        if std:
            # Reduce outliers by SD
            for i in range(n_iter):
                mu = np.mean(y, axis=dim)
                sd = np.std(y, axis=dim)

                thres_up = mu + sd * std_threshold
                thres_lw = mu - sd * std_threshold

                if dim == 0:
                    thres_up = repmat(thres_up, y.shape[0], 1)
                    thres_lw = repmat(thres_lw, y.shape[0], 1)
                elif dim == 1:
                    thres_up = repmat(np.c_[thres_up], 1, y.shape[1])
                    thres_lw = repmat(np.c_[thres_lw], 1, y.shape[1])

                out_ind_up = thres_up < y
                out_ind_lw = thres_lw > y

                # Clip outliers
                y[out_ind_up] = thres_up[out_ind_up]
                y[out_ind_lw] = thres_lw[out_ind_lw]

            num_out = np.sum(out_ind_up) + np.sum(out_ind_lw)
            print('Num outliers (SD): %d (%f %%)' % (num_out, 100.0 * num_out / y.size))

        if maxmin:
            # Reduce outliers by max-min values
            if max_value is not None:
                out_ind_max = y > max_value
                y[out_ind_max] = max_value
            if min_value is not None:
                out_ind_min = y < min_value
                y[out_ind_min] = min_value

        ind_map = ind

        return y, ind_map 
Example 56
Project: gr-localization   Author: rwth-ti   File: transmitter_flowgraph.py    MIT License 4 votes vote down vote up
def __init__(self, options):
        gr.top_block.__init__(self)
        ##################################################
        # Variables
        ##################################################
        self.samp_rate = samp_rate = 10000000
        self.f = f = 2510000000

        ##################################################
        # Blocks
        ##################################################
        self.uhd_usrp_sink_0 = uhd.usrp_sink(
        	",".join(("", "")),
        	uhd.stream_args(
        		cpu_format="fc32",
        		channels=range(1),
        	),
        )
        self.uhd_usrp_sink_0.set_samp_rate(samp_rate)
        self.uhd_usrp_sink_0.set_center_freq(2510000000, 0)
        self.uhd_usrp_sink_0.set_gain(90, 0)
        self.blocks_float_to_complex_0 = blocks.float_to_complex(1)

        mcode = True
        nbits = 11
        if mcode == True:
            v = []
            for i in mls.mls(nbits): v.append(int(i))
            v = np.array(v)
        else:
            v = np.random.randint(0,2,np.power(nbits,2))
        z = np.zeros(np.power(nbits,2))
        v = v*2-1

        #v = nm.repmat(v,10,1).flatten("F")
        #z = nm.repmat(z,1,,1).flatten("F")

        self.blocks_vector_source_x_0 = blocks.vector_source_f((v), True, 1, [])
        self.blocks_vector_source_x_1 = blocks.vector_source_f((z), True, 1, [])

        ##################################################
        # Connections
        ##################################################
        self.connect((self.blocks_vector_source_x_0, 0), (self.blocks_float_to_complex_0, 0))    
        self.connect((self.blocks_vector_source_x_1, 0), (self.blocks_float_to_complex_0, 1))    
        self.connect((self.blocks_float_to_complex_0, 0), (self.uhd_usrp_sink_0, 0)) 
Example 57
Project: Hands-On-Intelligent-Agents-with-OpenAI-Gym   Author: PacktPublishing   File: image_converter.py    MIT License 4 votes vote down vote up
def depth_to_local_point_cloud(image, color=None, max_depth=0.9):
    """
    Convert an image containing CARLA encoded depth-map to a 2D array containing
    the 3D position (relative to the camera) of each pixel and its corresponding
    RGB color of an array.
    "max_depth" is used to omit the points that are far enough.
    """
    far = 1000.0  # max depth in meters.
    normalized_depth = depth_to_array(image)

    # (Intrinsic) K Matrix
    k = numpy.identity(3)
    k[0, 2] = image.width / 2.0
    k[1, 2] = image.height / 2.0
    k[0, 0] = k[1, 1] = image.width / \
        (2.0 * math.tan(image.fov * math.pi / 360.0))

    # 2d pixel coordinates
    pixel_length = image.width * image.height
    u_coord = repmat(numpy.r_[image.width-1:-1:-1],
                     image.height, 1).reshape(pixel_length)
    v_coord = repmat(numpy.c_[image.height-1:-1:-1],
                     1, image.width).reshape(pixel_length)
    if color is not None:
        color = color.reshape(pixel_length, 3)
    normalized_depth = numpy.reshape(normalized_depth, pixel_length)

    # Search for pixels where the depth is greater than max_depth to
    # delete them
    max_depth_indexes = numpy.where(normalized_depth > max_depth)
    normalized_depth = numpy.delete(normalized_depth, max_depth_indexes)
    u_coord = numpy.delete(u_coord, max_depth_indexes)
    v_coord = numpy.delete(v_coord, max_depth_indexes)
    if color is not None:
        color = numpy.delete(color, max_depth_indexes, axis=0)

    # pd2 = [u,v,1]
    p2d = numpy.array([u_coord, v_coord, numpy.ones_like(u_coord)])

    # P = [X,Y,Z]
    p3d = numpy.dot(numpy.linalg.inv(k), p2d)
    p3d *= normalized_depth * far

    # Formating the output to:
    # [[X1,Y1,Z1,R1,G1,B1],[X2,Y2,Z2,R2,G2,B2], ... [Xn,Yn,Zn,Rn,Gn,Bn]]
    if color is not None:
        # numpy.concatenate((numpy.transpose(p3d), color), axis=1)
        return sensor.PointCloud(
            image.frame_number,
            numpy.transpose(p3d),
            color_array=color)
    # [[X1,Y1,Z1],[X2,Y2,Z2], ... [Xn,Yn,Zn]]
    return sensor.PointCloud(image.frame_number, numpy.transpose(p3d)) 
Example 58
Project: Hands-On-Intelligent-Agents-with-OpenAI-Gym   Author: PacktPublishing   File: image_converter.py    MIT License 4 votes vote down vote up
def depth_to_local_point_cloud(image, color=None, max_depth=0.9):
    """
    Convert an image containing CARLA encoded depth-map to a 2D array containing
    the 3D position (relative to the camera) of each pixel and its corresponding
    RGB color of an array.
    "max_depth" is used to omit the points that are far enough.
    """
    far = 1000.0  # max depth in meters.
    normalized_depth = depth_to_array(image)

    # (Intrinsic) K Matrix
    k = numpy.identity(3)
    k[0, 2] = image.width / 2.0
    k[1, 2] = image.height / 2.0
    k[0, 0] = k[1, 1] = image.width / \
        (2.0 * math.tan(image.fov * math.pi / 360.0))

    # 2d pixel coordinates
    pixel_length = image.width * image.height
    u_coord = repmat(numpy.r_[image.width-1:-1:-1],
                     image.height, 1).reshape(pixel_length)
    v_coord = repmat(numpy.c_[image.height-1:-1:-1],
                     1, image.width).reshape(pixel_length)
    if color is not None:
        color = color.reshape(pixel_length, 3)
    normalized_depth = numpy.reshape(normalized_depth, pixel_length)

    # Search for pixels where the depth is greater than max_depth to
    # delete them
    max_depth_indexes = numpy.where(normalized_depth > max_depth)
    normalized_depth = numpy.delete(normalized_depth, max_depth_indexes)
    u_coord = numpy.delete(u_coord, max_depth_indexes)
    v_coord = numpy.delete(v_coord, max_depth_indexes)
    if color is not None:
        color = numpy.delete(color, max_depth_indexes, axis=0)

    # pd2 = [u,v,1]
    p2d = numpy.array([u_coord, v_coord, numpy.ones_like(u_coord)])

    # P = [X,Y,Z]
    p3d = numpy.dot(numpy.linalg.inv(k), p2d)
    p3d *= normalized_depth * far

    # Formating the output to:
    # [[X1,Y1,Z1,R1,G1,B1],[X2,Y2,Z2,R2,G2,B2], ... [Xn,Yn,Zn,Rn,Gn,Bn]]
    if color is not None:
        # numpy.concatenate((numpy.transpose(p3d), color), axis=1)
        return sensor.PointCloud(
            image.frame_number,
            numpy.transpose(p3d),
            color_array=color)
    # [[X1,Y1,Z1],[X2,Y2,Z2], ... [Xn,Yn,Zn]]
    return sensor.PointCloud(image.frame_number, numpy.transpose(p3d)) 
Example 59
Project: CAL   Author: xl-sr   File: image_converter.py    MIT License 4 votes vote down vote up
def depth_to_local_point_cloud(image, color=None, max_depth=0.9):
    """
    Convert an image containing CARLA encoded depth-map to a 2D array containing
    the 3D position (relative to the camera) of each pixel and its corresponding
    RGB color of an array.
    "max_depth" is used to omit the points that are far enough.
    """
    far = 1000.0  # max depth in meters.
    normalized_depth = depth_to_array(image)

    # (Intrinsic) K Matrix
    k = numpy.identity(3)
    k[0, 2] = image.width / 2.0
    k[1, 2] = image.height / 2.0
    k[0, 0] = k[1, 1] = image.width / \
        (2.0 * math.tan(image.fov * math.pi / 360.0))

    # 2d pixel coordinates
    pixel_length = image.width * image.height
    u_coord = repmat(numpy.r_[image.width-1:-1:-1],
                     image.height, 1).reshape(pixel_length)
    v_coord = repmat(numpy.c_[image.height-1:-1:-1],
                     1, image.width).reshape(pixel_length)
    if color is not None:
        color = color.reshape(pixel_length, 3)
    normalized_depth = numpy.reshape(normalized_depth, pixel_length)

    # Search for pixels where the depth is greater than max_depth to
    # delete them
    max_depth_indexes = numpy.where(normalized_depth > max_depth)
    normalized_depth = numpy.delete(normalized_depth, max_depth_indexes)
    u_coord = numpy.delete(u_coord, max_depth_indexes)
    v_coord = numpy.delete(v_coord, max_depth_indexes)
    if color is not None:
        color = numpy.delete(color, max_depth_indexes, axis=0)

    # pd2 = [u,v,1]
    p2d = numpy.array([u_coord, v_coord, numpy.ones_like(u_coord)])

    # P = [X,Y,Z]
    p3d = numpy.dot(numpy.linalg.inv(k), p2d)
    p3d *= normalized_depth * far

    # Formating the output to:
    # [[X1,Y1,Z1,R1,G1,B1],[X2,Y2,Z2,R2,G2,B2], ... [Xn,Yn,Zn,Rn,Gn,Bn]]
    if color is not None:
        # numpy.concatenate((numpy.transpose(p3d), color), axis=1)
        return sensor.PointCloud(
            image.frame_number,
            numpy.transpose(p3d),
            color_array=color)
    # [[X1,Y1,Z1],[X2,Y2,Z2], ... [Xn,Yn,Zn]]
    return sensor.PointCloud(image.frame_number, numpy.transpose(p3d))