Python numpy.triu_indices() Examples

The following are 30 code examples of numpy.triu_indices(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: multicomp.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def compare_ordered(vals, alpha):
    '''simple ordered sequential comparison of means

    vals : array_like
        means or rankmeans for independent groups

    incomplete, no return, not used yet
    '''
    vals = np.asarray(vals)
    alphaf = alpha  # Notation ?
    sortind = np.argsort(vals)
    pvals = vals[sortind]
    sortrevind = sortind.argsort()
    ntests = len(vals)
    #alphacSidak = 1 - np.power((1. - alphaf), 1./ntests)
    #alphacBonf = alphaf / float(ntests)
    v1, v2 = np.triu_indices(ntests, 1)
    #v1,v2 have wrong sequence
    for i in range(4):
        for j in range(4,i, -1):
            print(i,j) 
Example #2
Source File: so_nelder_mead.py    From pymoo with Apache License 2.0 6 votes vote down vote up
def _do_continue(self, algorithm):
        do_continue = self.default.do_continue(algorithm)

        # if the default says do not continue just follow that
        if not do_continue:
            return False

        # additionally check for degenerated simplex
        else:
            X = algorithm.pop.get("X")

            # degenerated simplex - get all edges and minimum and maximum length
            D = vectorized_cdist(X, X)
            val = D[np.triu_indices(len(X), 1)]
            min_e, max_e = val.min(), val.max()

            # either if the maximum length is very small or the ratio is degenerated
            is_degenerated = max_e < 1e-16 or min_e / max_e < 1e-16

            return not is_degenerated 
Example #3
Source File: control_.py    From kusanagi with MIT License 6 votes vote down vote up
def predict(self, m, s=None, t=None):
        D = m.shape[0]
        if t is not None:
            self.t = t
        t = self.t

        u, z, I, L = self.u_nominal, self.z_nominal, self.I_, self.L_

        if s is None:
            s = np.zeros((D, D))

        # construct flattened state covariance vector
        z_t = tt_.concatenate([m.flatten(), s[self.triu_indices]])
        # compute control
        u_t = u[t] + I[t] + L[t].dot(z_t - z[t])

        # limit the controller output
        #u_t = tt.clip(u_t, -self.maxU,  self.maxU)

        U = u_t.shape[0]
        self.t += 1
        return u_t, tt_.zeros((U, U)), tt_.zeros((D, U)) 
Example #4
Source File: so_nelder_mead.py    From pymoo with Apache License 2.0 6 votes vote down vote up
def _do_continue(self, algorithm):
        pop, problem = algorithm.pop, algorithm.problem

        X, F = pop.get("X", "F")

        ftol = np.abs(F[1:] - F[0]).max() <= self.ftol

        # if the problem has bounds we can normalize the x space to to be more accurate
        if problem.has_bounds():
            xtol = np.abs((X[1:] - X[0]) / (problem.xu - problem.xl)).max() <= self.xtol
        else:
            xtol = np.abs(X[1:] - X[0]).max() <= self.xtol

        # degenerated simplex - get all edges and minimum and maximum length
        D = vectorized_cdist(X, X)
        val = D[np.triu_indices(len(pop), 1)]
        min_e, max_e = val.min(), val.max()

        # either if the maximum length is very small or the ratio is degenerated
        is_degenerated = max_e < 1e-16 or min_e / max_e < 1e-16

        max_iter = algorithm.n_gen > self.n_max_iter
        max_evals = algorithm.evaluator.n_eval > self.n_max_evals

        return not (ftol or xtol or max_iter or max_evals or is_degenerated) 
Example #5
Source File: lu.py    From nsf with MIT License 6 votes vote down vote up
def __init__(self, features, using_cache=False, identity_init=True, eps=1e-3):
        super().__init__(features, using_cache)

        self.eps = eps

        self.lower_indices = np.tril_indices(features, k=-1)
        self.upper_indices = np.triu_indices(features, k=1)
        self.diag_indices = np.diag_indices(features)

        n_triangular_entries = ((features - 1) * features) // 2

        self.lower_entries = nn.Parameter(torch.zeros(n_triangular_entries))
        self.upper_entries = nn.Parameter(torch.zeros(n_triangular_entries))
        self.unconstrained_upper_diag = nn.Parameter(torch.zeros(features))

        self._initialize(identity_init) 
Example #6
Source File: utils.py    From brainiak with Apache License 2.0 6 votes vote down vote up
def from_tri_2_sym(tri, dim):
    """convert a upper triangular matrix in 1D format
       to 2D symmetric matrix


    Parameters
    ----------

    tri: 1D array
        Contains elements of upper triangular matrix

    dim : int
        The dimension of target matrix.


    Returns
    -------

    symm : 2D array
        Symmetric matrix in shape=[dim, dim]
    """
    symm = np.zeros((dim, dim))
    symm[np.triu_indices(dim)] = tri
    return symm 
Example #7
Source File: utils.py    From MagnetLoss-PyTorch with MIT License 6 votes vote down vote up
def compute_rand_index(emb, labels):
    """
    https://en.wikipedia.org/wiki/Rand_index
    """
    n = len(emb)
    k = np.unique(labels).size

    m = KMeans(k)
    m.fit(emb)
    emb_labels = m.predict(emb)

    agreements = 0
    for i, j in zip(*np.triu_indices(n, 1)):
        emb_same = emb_labels[i] == emb_labels[j]
        gt_same = labels[i] == labels[j]

        if emb_same == gt_same:
            agreements += 1

    return float(agreements) / (n * (n-1) / 2) 
Example #8
Source File: akita_scd.py    From basenji with Apache License 2.0 6 votes vote down vote up
def ut_dense(preds_ut, diagonal_offset):
  """Construct dense prediction matrix from upper triangular."""
  ut_len, num_targets = preds_ut.shape

  # infer original sequence length
  seq_len = int(np.sqrt(2*ut_len + 0.25) - 0.5)
  seq_len += diagonal_offset

  # get triu indexes
  ut_indexes = np.triu_indices(seq_len, diagonal_offset)
  assert(len(ut_indexes[0]) == ut_len)

  # assign to dense matrix
  preds_dense = np.zeros(shape=(seq_len,seq_len,num_targets), dtype=preds_ut.dtype)
  preds_dense[ut_indexes] = preds_ut

  # symmetrize
  preds_dense += np.transpose(preds_dense, axes=[1,0,2])

  return preds_dense 
Example #9
Source File: util.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def pairwise_distances(feature, squared=True):
  """Computes the pairwise distance matrix in numpy.

  Args:
    feature: 2-D numpy array of size [number of data, feature dimension]
    squared: Boolean. If true, output is the pairwise squared euclidean
      distance matrix; else, output is the pairwise euclidean distance matrix.

  Returns:
    pdists: 2-D numpy array of size
      [number of data, number of data].
  """
  triu = np.triu_indices(feature.shape[0], 1)
  upper_tri_pdists = np.linalg.norm(feature[triu[1]] - feature[triu[0]], axis=1)
  if squared:
    upper_tri_pdists **= 2.
  num_data = feature.shape[0]
  pdists = np.zeros((num_data, num_data))
  pdists[np.triu_indices(num_data, 1)] = upper_tri_pdists
  # Make symmetrical.
  pdists = pdists + pdists.T - np.diag(
      pdists.diagonal())
  return pdists 
Example #10
Source File: metric_learning_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def pairwise_distance_np(feature, squared=False):
  """Computes the pairwise distance matrix in numpy.

  Args:
    feature: 2-D numpy array of size [number of data, feature dimension]
    squared: Boolean. If true, output is the pairwise squared euclidean
      distance matrix; else, output is the pairwise euclidean distance matrix.

  Returns:
    pairwise_distances: 2-D numpy array of size
      [number of data, number of data].
  """
  triu = np.triu_indices(feature.shape[0], 1)
  upper_tri_pdists = np.linalg.norm(feature[triu[1]] - feature[triu[0]], axis=1)
  if squared:
    upper_tri_pdists **= 2.
  num_data = feature.shape[0]
  pairwise_distances = np.zeros((num_data, num_data))
  pairwise_distances[np.triu_indices(num_data, 1)] = upper_tri_pdists
  # Make symmetrical.
  pairwise_distances = pairwise_distances + pairwise_distances.T - np.diag(
      pairwise_distances.diagonal())
  return pairwise_distances 
Example #11
Source File: test_utils.py    From mne-features with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_idxiter():
    n_channels = data.shape[0]
    # Upper-triangular part, including diag
    idx0, idx1 = np.triu_indices(n_channels)
    triu_indices = np.array([np.arange(idx0.size), idx0, idx1])
    triu_indices2 = np.array(list(_idxiter(n_channels, include_diag=True)))
    # Upper-triangular part, without diag
    idx2, idx3 = np.triu_indices(n_channels, 1)
    triu_indices_nodiag = np.array([np.arange(idx2.size), idx2, idx3])
    triu_indices2_nodiag = np.array(list(_idxiter(n_channels,
                                                  include_diag=False)))
    assert_almost_equal(triu_indices, triu_indices2.transpose())
    assert_almost_equal(triu_indices_nodiag, triu_indices2_nodiag.transpose())
    # Upper and lower-triangular parts, without diag
    expected = [(i, j) for _, (i, j) in
                enumerate(np.ndindex((n_channels, n_channels))) if i != j]
    assert_equal(np.array([(i, j) for _, i, j in _idxiter(n_channels,
                                                          triu=False)]),
                 expected) 
Example #12
Source File: util.py    From yolo_v2 with Apache License 2.0 6 votes vote down vote up
def pairwise_distances(feature, squared=True):
  """Computes the pairwise distance matrix in numpy.

  Args:
    feature: 2-D numpy array of size [number of data, feature dimension]
    squared: Boolean. If true, output is the pairwise squared euclidean
      distance matrix; else, output is the pairwise euclidean distance matrix.

  Returns:
    pdists: 2-D numpy array of size
      [number of data, number of data].
  """
  triu = np.triu_indices(feature.shape[0], 1)
  upper_tri_pdists = np.linalg.norm(feature[triu[1]] - feature[triu[0]], axis=1)
  if squared:
    upper_tri_pdists **= 2.
  num_data = feature.shape[0]
  pdists = np.zeros((num_data, num_data))
  pdists[np.triu_indices(num_data, 1)] = upper_tri_pdists
  # Make symmetrical.
  pdists = pdists + pdists.T - np.diag(
      pdists.diagonal())
  return pdists 
Example #13
Source File: test_batch_hard_triplet_loss.py    From sentence-transformers with Apache License 2.0 6 votes vote down vote up
def pairwise_distance_np(feature, squared=False):
    """Computes the pairwise distance matrix in numpy.
    Args:
        feature: 2-D numpy array of size [number of data, feature dimension]
        squared: Boolean. If true, output is the pairwise squared euclidean
                 distance matrix; else, output is the pairwise euclidean distance matrix.
    Returns:
        pairwise_distances: 2-D numpy array of size
                            [number of data, number of data].
    """
    triu = np.triu_indices(feature.shape[0], 1)
    upper_tri_pdists = np.linalg.norm(feature[triu[1]] - feature[triu[0]], axis=1)
    if squared:
        upper_tri_pdists **= 2.
    num_data = feature.shape[0]
    pairwise_distances = np.zeros((num_data, num_data))
    pairwise_distances[np.triu_indices(num_data, 1)] = upper_tri_pdists
    # Make symmetrical.
    pairwise_distances = pairwise_distances + pairwise_distances.T - np.diag(
        pairwise_distances.diagonal())
    return pairwise_distances 
Example #14
Source File: infer_labels.py    From TheCannon with MIT License 6 votes vote down vote up
def _get_lvec(labels):
    """
    Constructs a label vector for an arbitrary number of labels
    Assumes that our model is quadratic in the labels

    Parameters
    ----------
    labels: numpy ndarray
        pivoted label values for one star

    Returns
    -------
    lvec: numpy ndarray
        label vector
    """
    nlabels = len(labels)
    # specialized to second-order model
    linear_terms = labels 
    quadratic_terms = np.outer(linear_terms, 
                               linear_terms)[np.triu_indices(nlabels)]
    lvec = np.hstack((linear_terms, quadratic_terms))
    return lvec 
Example #15
Source File: contrasts.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def _diff_contrast(self, levels):
        nlevels = len(levels)
        contr = np.zeros((nlevels, nlevels-1))
        int_range = np.arange(1, nlevels)
        upper_int = np.repeat(int_range, int_range)
        row_i, col_i = np.triu_indices(nlevels-1)
        # we want to iterate down the columns not across the rows
        # it would be nice if the index functions had a row/col order arg
        col_order = np.argsort(col_i)
        contr[row_i[col_order],
              col_i[col_order]] = (upper_int-nlevels)/float(nlevels)
        lower_int = np.repeat(int_range, int_range[::-1])
        row_i, col_i = np.tril_indices(nlevels-1)
        # we want to iterate down the columns not across the rows
        col_order = np.argsort(col_i)
        contr[row_i[col_order]+1, col_i[col_order]] = lower_int/float(nlevels)
        return contr 
Example #16
Source File: contrasts.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def _helmert_contrast(self, levels):
        n = len(levels)
        #http://www.ats.ucla.edu/stat/sas/webbooks/reg/chapter5/sasreg5.htm#HELMERT
        #contr = np.eye(n - 1)
        #int_range = np.arange(n - 1., 1, -1)
        #denom = np.repeat(int_range, np.arange(n - 2, 0, -1))
        #contr[np.tril_indices(n - 1, -1)] = -1. / denom

        #http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm#HELMERT
        #contr = np.zeros((n - 1., n - 1))
        #int_range = np.arange(n, 1, -1)
        #denom = np.repeat(int_range[:-1], np.arange(n - 2, 0, -1))
        #contr[np.diag_indices(n - 1)] = (int_range - 1.) / int_range
        #contr[np.tril_indices(n - 1, -1)] = -1. / denom
        #contr = np.vstack((contr, -1./int_range))

        #r-like
        contr = np.zeros((n, n - 1))
        contr[1:][np.diag_indices(n - 1)] = np.arange(1, n)
        contr[np.triu_indices(n - 1)] = -1
        return contr 
Example #17
Source File: discrete_model.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def _hessian_geom(self, params):
        exog = self.exog
        y = self.endog[:,None]
        mu = self.predict(params)[:,None]

        # for dl/dparams dparams
        dim = exog.shape[1]
        hess_arr = np.empty((dim, dim))
        const_arr = mu*(1+y)/(mu+1)**2
        for i in range(dim):
            for j in range(dim):
                if j > i:
                    continue
                hess_arr[i,j] = np.sum(-exog[:,i,None] * exog[:,j,None] *
                                       const_arr, axis=0)
        tri_idx = np.triu_indices(dim, k=1)
        hess_arr[tri_idx] = hess_arr.T[tri_idx]
        return hess_arr 
Example #18
Source File: energy.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def calc_potential_energy(A, d):
    i, j = anp.triu_indices(len(A), 1)
    D = anp.sqrt(squared_dist(A, A)[i, j])
    energy = anp.log((1 / D ** d).mean())
    return energy 
Example #19
Source File: metrics.py    From Probabilistic-Face-Embeddings with MIT License 5 votes vote down vote up
def ROC_by_mat(score_mat, label_mat, thresholds=None, FARs=None, get_false_indices=False, triu_k=None):
    ''' Compute ROC using a pairwise score matrix and a corresponding label matrix.
        A wapper of ROC function.
    '''
    assert score_mat.ndim == 2
    assert score_mat.shape == label_mat.shape
    assert label_mat.dtype == np.bool
    
    # Convert into vectors
    m,n  = score_mat.shape
    if triu_k is not None:
        assert m==n, "If using triu for ROC, the score matrix must be a sqaure matrix!"
        triu_indices = np.triu_indices(m, triu_k)
        score_vec = score_mat[triu_indices]
        label_vec = label_mat[triu_indices]
    else:
        score_vec = score_mat.flatten()
        label_vec = label_mat.flatten()

    # Compute ROC
    if get_false_indices:
        TARs, FARs, thresholds, false_accept_indices, false_reject_indices = \
                    ROC(score_vec, label_vec, thresholds, FARs, True)
    else:
        TARs, FARs, thresholds = ROC(score_vec, label_vec, thresholds, FARs, False)

    # Convert false accept/reject indices into [row, col] indices
    if get_false_indices:
        rows, cols = np.meshgrid(np.arange(m), np.arange(n), indexing='ij')
        rc = np.stack([rows, cols], axis=2)
        if triu_k is not None:
            rc = rc[triu_indices,:]
        else:
            rc = rc.reshape([-1,2])

        for i in range(len(FARs)):
            false_accept_indices[i] = rc[false_accept_indices[i]]
            false_reject_indices[i] = rc[false_reject_indices[i]]
        return TARs, FARs, thresholds, false_accept_indices, false_reject_indices
    else:
        return TARs, FARs, thresholds 
Example #20
Source File: lifted_test.py    From addons with Apache License 2.0 5 votes vote down vote up
def pairwise_distance_np(feature, squared=False):
    """Computes the pairwise distance matrix in numpy.

    Args:
      feature: 2-D numpy array of size [number of data, feature dimension]
      squared: Boolean. If true, output is the pairwise squared euclidean
        distance matrix; else, output is the pairwise euclidean distance
        matrix.

    Returns:
      pairwise_distances: 2-D numpy array of size
        [number of data, number of data].
    """
    triu = np.triu_indices(feature.shape[0], 1)
    upper_tri_pdists = np.linalg.norm(feature[triu[1]] - feature[triu[0]], axis=1)
    if squared:
        upper_tri_pdists **= 2.0
    num_data = feature.shape[0]
    pairwise_distances = np.zeros((num_data, num_data))
    pairwise_distances[np.triu_indices(num_data, 1)] = upper_tri_pdists
    # Make symmetrical.
    pairwise_distances = (
        pairwise_distances
        + pairwise_distances.T
        - np.diag(pairwise_distances.diagonal())
    )
    return pairwise_distances 
Example #21
Source File: matutils.py    From topical_word_embeddings with MIT License 5 votes vote down vote up
def triu_indices(n, k=0):
        m = numpy.ones((n, n), int)
        a = triu(m, k)
        return numpy.where(a != 0) 
Example #22
Source File: matutils.py    From topical_word_embeddings with MIT License 5 votes vote down vote up
def triu_indices(n, k=0):
        m = numpy.ones((n, n), int)
        a = triu(m, k)
        return numpy.where(a != 0) 
Example #23
Source File: test_stats.py    From conpy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_clustering():
    """Test basic functionality"""
    src = _load_src()
    n_verts = 40
    max_spread = 0.02
    vertices = [src[0]['vertno'][:n_verts], src[1]['vertno'][:n_verts]]
    grid_points = np.vstack([s['rr'][v] for s, v in zip(src, vertices)])

    # Create one-to-all pairs in left hemisphere with pairwise distance
    # below max_spread
    dist_lh = cdist(grid_points[:n_verts], grid_points[:n_verts])
    from_lh, to_lh = np.where(dist_lh < max_spread)
    counts = np.bincount(from_lh)
    min_size = counts.max()
    chosen_inds = (from_lh == np.argmax(counts))
    pairs = np.array([from_lh[chosen_inds], to_lh[chosen_inds]])

    # Add some random connections with large enough distances
    # that they should not survive
    dist = pdist(grid_points)
    pairs_rand = np.array(np.triu_indices(2*len(vertices[0]), k=1))
    indices = rand_gen.choice(np.where(dist > 2*max_spread + 0.01)[0], 20)
    pairs = np.hstack((pairs, pairs_rand[:, indices]))

    # Make random contrast
    contrast = _make_random_connectivity(pairs, sigma=0.6, vertices=vertices)
    # Check that the resulting connectivity makes sense
    contrast_thresh = cluster_threshold(contrast, src, min_size=min_size,
                                        max_spread=max_spread)
    assert contrast_thresh.n_connections == min_size
    assert contrast_thresh.n_sources == contrast.n_sources
    assert_array_equal(contrast_thresh.pairs, contrast.pairs[:, :min_size]) 
Example #24
Source File: DistanceUtils.py    From RaptorX-Contact with GNU General Public License v3.0 5 votes vote down vote up
def CalcLabelProb(data=None, numLabels=26, eps=np.finfo(np.float32).eps):

	freqs = [ ]
        for separation in config.RangeBoundaries:
		#print 'separation=', separation
		freq = []
        	for m in data:
                        index = np.triu_indices(m.shape[0], separation)
                        values = m[index]
                        res = np.bincount(values, minlength=numLabels )
                        freq.append(res)
		freqs.append(np.sum(freq, axis=0) )

        count = np.array(freqs)
	#print count.shape
	#print count

        ## the order of subtraction cannot be changed
        ## count[0], [1], [2], [3], [4] are for extra long-, long-, medium-, short- and near-range residue pairs, respectively
	for i in range(count.shape[0]-1, 0, -1):
		count[i] -= count[i-1]

        frequency = [ c/(eps + np.sum(c) ) for c in count ]
        return np.array(frequency)


## calculate label distribution from a list of distance matrices
## when invalidDistanceSeparated is True, it means that the invalid distance (represented by -1) is treated as an independent distance bin 
Example #25
Source File: sparse_gp_theano_internal.py    From icml18-jtnn with MIT License 5 votes vote down vote up
def initialize(self):

        input_means = np.array(theano.function([], self.input_means)())

        assert input_means.shape[ 0 ] >= self.n_inducing_points

        selected_points = np.random.choice(input_means.shape[ 0 ], self.n_inducing_points, replace = False)
        z = input_means[ selected_points, : ]

        # If we are not in the first layer, we initialize the length scales to one

        lls = np.zeros(input_means.shape[ 1 ])

        M = np.outer(np.sum(input_means**2, 1), np.ones(input_means.shape[ 0 ]))
        dist = M - 2 * np.dot(input_means, input_means.T) + M.T
        lls = np.log(0.5 * (np.median(dist[ np.triu_indices(input_means.shape[ 0 ], 1) ]) + 1e-3)) * np.ones(input_means.shape[ 1 ])
        
        self.lls.set_value(lls.astype(theano.config.floatX))
        self.z.set_value(z.astype(theano.config.floatX))
        self.lsf.set_value(np.zeros(1).astype(theano.config.floatX)[ 0 ])

        # We initialize the cavity and the posterior approximation to the prior but with a small random
        # mean so that the outputs are not equal to zero (otherwise the output of the gp will be zero and
        # the next layer will be initialized improperly).

        # If we are not in the first layer, we reduce the variance of the L and m

        L = np.random.normal(size = (self.n_inducing_points, self.n_inducing_points)) * 1.0
        m = self.training_targets.get_value()[ selected_points, : ]

        self.LParamPost.set_value(L.astype(theano.config.floatX))
        self.mParamPost.set_value(m.astype(theano.config.floatX))

    # This sets the node for prediction. It basically switches the cavity distribution to be the posterior approximation
    # Once set in this state the network cannot be trained any more. 
Example #26
Source File: clustering.py    From pycircstat with MIT License 5 votes vote down vote up
def train(self, alpha):
        """
        Finds the agglomerative clustering on the data alpha
        :param alpha: angles in radians
        :returns: data, cluster ids

        """
        assert len(alpha.shape) == 1, 'Clustering works only for 1d data'
        n = len(alpha)
        cid = np.arange(n, dtype=int)

        nu = n


        while nu > self.numclust:
            mu = np.asarray([descr.mean(alpha[cid == j]) if j in cid else np.Inf for j in range(n)])
            D = np.abs(descr.pairwise_cdiff(mu))
            idx = np.triu_indices(n,1)
            min = np.nanargmin(D[idx])
            cid[cid == cid[idx[0][min]]] = cid[idx[1][min]]
            nu -= 1


        cid2 = np.empty_like(cid)
        for i,j in enumerate(np.unique(cid)):
            cid2[cid == j] = i
        ucid = np.unique(cid2)
        self.centroids = np.asarray([descr.mean(alpha[cid2 == i]) for i in ucid])
        self.cluster_ids = ucid
        self.r = np.asarray([descr.resultant_vector_length(alpha[cid2 == i]) for i in ucid])

        return alpha, cid2 
Example #27
Source File: control_.py    From kusanagi with MIT License 5 votes vote down vote up
def init_params(self):
        H_steps = int(np.ceil(self.H/self.dt))
        self.state_changed = False

        # set random (uniform distribution) controls
        u = self.maxU*(2*np.random.random((H_steps, len(self.maxU))) - 1)
        self.u_nominal = theano.shared(u)

        # intialize the nominal states to the appropriate size
        m0, S0 = utils.gTrig2_np(np.array(self.m0)[None, :],
                                 np.array(self.S0)[None, :, :],
                                 self.angle_dims,
                                 len(self.m0))
        self.triu_indices = np.triu_indices(m0.size)
        z0 = np.concatenate([m0.flatten(), S0[0][self.triu_indices]])
        z = np.tile(z0, (H_steps, 1))
        self.z_nominal = theano.shared(z)

        # initialize the open loop and feedback matrices
        I_ = np.zeros((H_steps, len(self.maxU)))
        L_ = np.zeros((H_steps, len(self.maxU), z0.size))
        self.I_ = theano.shared(I_)
        self.L_ = theano.shared(L_)

        # set a meaningful filename
        self.filename = self.name+'_'+str(len(self.m0))+'_'+str(len(self.maxU)) 
Example #28
Source File: pddp.py    From kusanagi with MIT License 5 votes vote down vote up
def wrap_belief(mx, Sx, triu_indices):
    z_next = tt.concatenate([mx.flatten(), Sx[triu_indices]])
    return z_next 
Example #29
Source File: count_model.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def hessian(self, params):
        """
        Generic Zero Inflated model Hessian matrix of the loglikelihood

        Parameters
        ----------
        params : array-like
            The parameters of the model

        Returns
        -------
        hess : ndarray, (k_vars, k_vars)
            The Hessian, second derivative of loglikelihood function,
            evaluated at `params`

        Notes
        -----
        """
        hess_arr_main = self._hessian_main(params)
        hess_arr_infl = self._hessian_inflate(params)

        if hess_arr_main is None or hess_arr_infl is None:
            return approx_hess(params, self.loglike)

        dim = self.k_exog + self.k_inflate

        hess_arr = np.zeros((dim, dim))

        hess_arr[:self.k_inflate,:] = hess_arr_infl
        hess_arr[self.k_inflate:,self.k_inflate:] = hess_arr_main

        tri_idx = np.triu_indices(self.k_exog + self.k_inflate, k=1)
        hess_arr[tri_idx] = hess_arr.T[tri_idx]

        return hess_arr 
Example #30
Source File: pddp.py    From kusanagi with MIT License 5 votes vote down vote up
def unwrap_belief(z, D):
    mx, Sx_triu = z[:D], z[D:]
    Sx = tt.zeros((D, D))
    triu_indices = np.triu_indices(D)
    Sx = tt.set_subtensor(Sx[triu_indices], Sx_triu)
    Sx = Sx + Sx.T - tt.diag(tt.diag(Sx))
    return mx, Sx, triu_indices