Python numpy.infty() Examples

The following are 30 code examples for showing how to use numpy.infty(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: TransferRL   Author: yaserkl   File: beam_search.py    License: MIT License 6 votes vote down vote up
def extend(self, token, log_prob, state, decoder_output, encoder_mask, attn_dist, p_gen, coverage):
    """Return a NEW hypothesis, extended with the information from the latest step of beam search.

    Args:
      token: Integer. Latest token produced by beam search.
      log_prob: Float. Log prob of the latest token.
      state: Current decoder state, a LSTMStateTuple.
      attn_dist: Attention distribution from latest step. Numpy array shape (attn_length).
      p_gen: Generation probability on latest step. Float.
      coverage: Latest coverage vector. Numpy array shape (attn_length), or None if not using coverage.
    Returns:
      New Hypothesis for next step.
    """
    if FLAGS.avoid_trigrams and self._has_trigram(self.tokens + [token]):
        log_prob = -np.infty
    return Hypothesis(tokens = self.tokens + [token],
                      log_probs = self.log_probs + [log_prob],
                      state = state,
                      decoder_output= self.decoder_output + [decoder_output] if decoder_output is not None else [],
                      encoder_mask = self.encoder_mask + [encoder_mask] if encoder_mask is not None else [],
                      attn_dists = self.attn_dists + [attn_dist],
                      p_gens = self.p_gens + [p_gen],
                      coverage = coverage) 
Example 2
Project: sprocket   Author: k2kobayashi   File: diagGMM.py    License: MIT License 6 votes vote down vote up
def fit(self, X):
        """Fit GMM parameters to X

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)

        """
        # initialize
        self._initialize_parameters(X, self.random_state)
        lower_bound = -np.infty

        for n in range(self.n_iter):
            # E-step
            log_prob_norm, log_resp = self._e_step(X)

            # M-step
            self._m_step(X, log_resp)

            # check convergence
            back_lower_bound = lower_bound
            lower_bound = self._compute_lower_bound(
                log_resp, log_prob_norm) 
Example 3
Project: RLSeq2Seq   Author: yaserkl   File: beam_search.py    License: MIT License 6 votes vote down vote up
def extend(self, token, log_prob, state, decoder_output, encoder_mask, attn_dist, p_gen, coverage):
    """Return a NEW hypothesis, extended with the information from the latest step of beam search.

    Args:
      token: Integer. Latest token produced by beam search.
      log_prob: Float. Log prob of the latest token.
      state: Current decoder state, a LSTMStateTuple.
      attn_dist: Attention distribution from latest step. Numpy array shape (attn_length).
      p_gen: Generation probability on latest step. Float.
      coverage: Latest coverage vector. Numpy array shape (attn_length), or None if not using coverage.
    Returns:
      New Hypothesis for next step.
    """
    if FLAGS.avoid_trigrams and self._has_trigram(self.tokens + [token]):
        log_prob = -np.infty
    return Hypothesis(tokens = self.tokens + [token],
                      log_probs = self.log_probs + [log_prob],
                      state = state,
                      decoder_output= self.decoder_output + [decoder_output] if decoder_output is not None else [],
                      encoder_mask = self.encoder_mask + [encoder_mask] if encoder_mask is not None else [],
                      attn_dists = self.attn_dists + [attn_dist],
                      p_gens = self.p_gens + [p_gen],
                      coverage = coverage) 
Example 4
Project: POT   Author: PythonOT   File: da.py    License: MIT License 6 votes vote down vote up
def __init__(self, reg_e=1., reg_cl=0.1,
                 max_iter=10, max_inner_iter=200, log=False,
                 tol=10e-9, verbose=False,
                 metric="sqeuclidean", norm=None,
                 distribution_estimation=distribution_estimation_uniform,
                 out_of_sample_map='ferradans', limit_max=np.infty):
        self.reg_e = reg_e
        self.reg_cl = reg_cl
        self.max_iter = max_iter
        self.max_inner_iter = max_inner_iter
        self.tol = tol
        self.log = log
        self.verbose = verbose
        self.metric = metric
        self.norm = norm
        self.distribution_estimation = distribution_estimation
        self.out_of_sample_map = out_of_sample_map
        self.limit_max = limit_max 
Example 5
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_bayesian_mixture.py    License: MIT License 6 votes vote down vote up
def test_monotonic_likelihood():
    # We check that each step of the each step of variational inference without
    # regularization improve monotonically the training set of the bound
    rng = np.random.RandomState(0)
    rand_data = RandomData(rng, scale=20)
    n_components = rand_data.n_components

    for prior_type in PRIOR_TYPE:
        for covar_type in COVARIANCE_TYPE:
            X = rand_data.X[covar_type]
            bgmm = BayesianGaussianMixture(
                weight_concentration_prior_type=prior_type,
                n_components=2 * n_components, covariance_type=covar_type,
                warm_start=True, max_iter=1, random_state=rng, tol=1e-4)
            current_lower_bound = -np.infty
            # Do one training iteration at a time so we can make sure that the
            # training log likelihood increases after each iteration.
            for _ in range(600):
                prev_lower_bound = current_lower_bound
                current_lower_bound = bgmm.fit(X).lower_bound_
                assert_greater_equal(current_lower_bound, prev_lower_bound)

                if bgmm.converged_:
                    break
            assert(bgmm.converged_) 
Example 6
Project: platform-resource-manager   Author: intel   File: gmmfense.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, data, max_mixture=10, threshold=0.1):
        """
        Class constructor, arguments include:
            data - data to build GMM model
            max_mixture - max number of Gaussian mixtures
            threshold - probability threhold to determine fense
        """
        self.data = data
        self.thresh = threshold
        lowest_bic = np.infty
        components = 1
        bic = []
        n_components_range = range(1, max_mixture + 1)
        for n_components in n_components_range:
            # Fit a Gaussian mixture with EM
            gmm = mixture.GaussianMixture(n_components=n_components,
                                          random_state=1005)
            gmm.fit(data)
            bic.append(gmm.bic(data))
            if bic[-1] < lowest_bic:
                lowest_bic = bic[-1]
                best_gmm = gmm
                components = n_components
        log.debug('best gmm components number: %d, bic %f ', components, lowest_bic)
        self.gmm = best_gmm 
Example 7
Project: platform-resource-manager   Author: intel   File: gmmfense.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, data, max_mixture=10, threshold=0.1):
        """
        Class constructor, arguments include:
            data - data to build GMM model
            max_mixture - max number of Gaussian mixtures
            threshold - probability threhold to determine fense
        """
        self.data = data
        self.thresh = threshold
        lowest_bic = np.infty
        components = 1
        bic = []
        n_components_range = range(1, max_mixture + 1)
        for n_components in n_components_range:
            # Fit a Gaussian mixture with EM
            gmm = mixture.GaussianMixture(n_components=n_components,
                                          random_state=1005)
            gmm.fit(data)
            bic.append(gmm.bic(data))
            if bic[-1] < lowest_bic:
                lowest_bic = bic[-1]
                best_gmm = gmm
                components = n_components
        log.debug('best gmm components number: %d, bic %f ', components, lowest_bic)
        self.gmm = best_gmm 
Example 8
Project: chainer   Author: chainer   File: test_constants.py    License: MIT License 6 votes vote down vote up
def test_constants():
    assert chainerx.Inf is numpy.Inf
    assert chainerx.Infinity is numpy.Infinity
    assert chainerx.NAN is numpy.NAN
    assert chainerx.NINF is numpy.NINF
    assert chainerx.NZERO is numpy.NZERO
    assert chainerx.NaN is numpy.NaN
    assert chainerx.PINF is numpy.PINF
    assert chainerx.PZERO is numpy.PZERO
    assert chainerx.e is numpy.e
    assert chainerx.euler_gamma is numpy.euler_gamma
    assert chainerx.inf is numpy.inf
    assert chainerx.infty is numpy.infty
    assert chainerx.nan is numpy.nan
    assert chainerx.newaxis is numpy.newaxis
    assert chainerx.pi is numpy.pi 
Example 9
Project: opensurfaces   Author: seanbell   File: intrinsic.py    License: MIT License 6 votes vote down vote up
def weiss_retinex(image, multi_images, mask, threshold, L1=False):
    multi_images = np.clip(multi_images, 3., np.infty)
    log_multi_images = np.log(multi_images)

    i_y_all, i_x_all = poisson.get_gradients(log_multi_images)
    r_y = np.median(i_y_all, axis=2)
    r_x = np.median(i_x_all, axis=2)

    r_y *= (np.abs(r_y) > threshold)
    r_x *= (np.abs(r_x) > threshold)
    if L1:
        log_refl = poisson.solve_L1(r_y, r_x, mask)
    else:
        log_refl = poisson.solve(r_y, r_x, mask)
    refl = np.where(mask, np.exp(log_refl), 0.)
    shading = np.where(mask, image / refl, 0.)

    return shading, refl





#################### Wrapper classes for experiments ########################### 
Example 10
Project: mpnum   Author: dsuess   File: _testing.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def assert_mpa_identical(mpa1, mpa2, decimal=np.infty):
    """Verify that two MPAs are complety identical
    """
    assert len(mpa1) == len(mpa2)
    assert mpa1.canonical_form == mpa2.canonical_form
    assert mpa1.dtype == mpa2.dtype

    for i, lten1, lten2 in zip(it.count(), mpa1.lt, mpa2.lt):
        if decimal is np.infty:
            assert_array_equal(lten1, lten2,
                               err_msg='mismatch in lten {}'.format(i))
        else:
            assert_array_almost_equal(lten1, lten2, decimal=decimal,
                                      err_msg='mismatch in lten {}'.format(i))
    # TODO: We should make a comprehensive comparison between `mpa1`
    # and `mpa2`.  Are we missing other things? 
Example 11
Project: B-SOID   Author: YttriLab   File: train.py    License: GNU General Public License v3.0 6 votes vote down vote up
def bsoid_hdbscan(umap_embeddings, hdbscan_params=HDBSCAN_PARAMS):
    """
    Trains HDBSCAN (unsupervised) given learned UMAP space
    :param umap_embeddings: 2D array, embedded UMAP space
    :param hdbscan_params: dict, HDBSCAN params in GLOBAL_CONFIG
    :return assignments: HDBSCAN assignments
    """
    highest_numulab = -np.infty
    numulab = []
    min_cluster_range = range(6, 21)
    logging.info('Running HDBSCAN on {} instances in {} D space...'.format(*umap_embeddings.shape))
    for min_c in min_cluster_range:
        trained_classifier = hdbscan.HDBSCAN(prediction_data=True,
                                             min_cluster_size=int(round(0.001 * min_c * umap_embeddings.shape[0])),
                                             **hdbscan_params).fit(umap_embeddings)
        numulab.append(len(np.unique(trained_classifier.labels_)))
        if numulab[-1] > highest_numulab:
            logging.info('Adjusting minimum cluster size to maximize cluster number...')
            highest_numulab = numulab[-1]
            best_clf = trained_classifier
    assignments = best_clf.labels_
    soft_clusters = hdbscan.all_points_membership_vectors(best_clf)
    soft_assignments = np.argmax(soft_clusters, axis=1)
    logging.info('Done predicting labels for {} instances in {} D space...'.format(*umap_embeddings.shape))
    return assignments, soft_clusters, soft_assignments 
Example 12
Project: cupy   Author: cupy   File: gmm.py    License: MIT License 6 votes vote down vote up
def train_gmm(X, max_iter, tol, means, covariances):
    xp = cupy.get_array_module(X)
    lower_bound = -np.infty
    converged = False
    weights = xp.array([0.5, 0.5], dtype=np.float32)
    inv_cov = 1 / xp.sqrt(covariances)

    for n_iter in range(max_iter):
        prev_lower_bound = lower_bound
        log_prob_norm, log_resp = e_step(X, inv_cov, means, weights)
        weights, means, covariances = m_step(X, xp.exp(log_resp))
        inv_cov = 1 / xp.sqrt(covariances)
        lower_bound = log_prob_norm
        change = lower_bound - prev_lower_bound
        if abs(change) < tol:
            converged = True
            break

    if not converged:
        print('Failed to converge. Increase max-iter or tol.')

    return inv_cov, means, weights, covariances 
Example 13
Project: pySDC   Author: Parallel-in-Time   File: test_imexsweeper.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_sweepequalmatrix(self):
    for type in classes:
      self.swparams['collocation_class'] = type
      step, level, problem, nnodes = self.setupLevelStepProblem()
      step.levels[0].sweep.predict()
      u0full = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])

      # Perform node-to-node SDC sweep
      level.sweep.update_nodes()

      lambdas = [ problem.params.lambda_f[0] , problem.params.lambda_s[0] ]
      LHS, RHS = level.sweep.get_scalar_problems_sweeper_mats( lambdas = lambdas )

      unew = np.linalg.inv(LHS).dot( u0full + RHS.dot(u0full) )
      usweep = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])
      assert np.linalg.norm(unew - usweep, np.infty)<1e-14, "Single SDC sweeps in matrix and node-to-node formulation yield different results"

  #
  # Make sure the implemented update formula matches the matrix update formula
  # 
Example 14
Project: pySDC   Author: Parallel-in-Time   File: test_imexsweeper.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_updateformula(self):
    for type in classes:
      self.swparams['collocation_class'] = type
      step, level, problem, nnodes = self.setupLevelStepProblem()
      level.sweep.predict()
      u0full = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])

      # Perform update step in sweeper
      level.sweep.update_nodes()
      ustages = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])
      # Compute end value through provided function
      level.sweep.compute_end_point()
      uend_sweep = level.uend.values
      # Compute end value from matrix formulation
      if level.sweep.params.do_coll_update:
        uend_mat   = self.pparams['u0'] + step.dt*level.sweep.coll.weights.dot(ustages*(problem.params.lambda_s[0] + problem.params.lambda_f[0]))
      else:
        uend_mat = ustages[-1]
      assert np.linalg.norm(uend_sweep - uend_mat, np.infty)<1e-14, "Update formula in sweeper gives different result than matrix update formula"


  #
  # Compute the exact collocation solution by matrix inversion and make sure it is a fixed point
  # 
Example 15
Project: pySDC   Author: Parallel-in-Time   File: test_imexsweeper.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_updateformula_no_coll_update(self):
    for type in classes:
      self.swparams['collocation_class'] = type
      self.swparams['do_coll_update'] = False
      step, level, problem, nnodes = self.setupLevelStepProblem()
      # if type of nodes does not have right endpoint as quadrature nodes, cannot set do_coll_update to False and perform this test
      if not level.sweep.coll.right_is_node: break
      level.sweep.predict()
      u0full = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])

      # Perform update step in sweeper
      level.sweep.update_nodes()
      ustages = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])

      # Compute end value through provided function
      level.sweep.compute_end_point()
      uend_sweep = level.uend.values
      # Compute end value from matrix formulation
      q = np.zeros(nnodes)
      q[nnodes-1] = 1.0
      uend_mat   = q.dot(ustages)
      assert np.linalg.norm(uend_sweep - uend_mat, np.infty)<1e-14, "For do_coll_update=False, update formula in sweeper gives different result than matrix update formula with q=(0,..,0,1)" 
Example 16
Project: ALiPy   Author: NUAA-AL   File: cost_sensitive.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def cal_Informativeness(self, label_index, unlabel_index, models):
        """

        Returns:
        Info : 2d array-like 
        shape [n_unlabel_samples, n_classes]
        Informativeness of each unlabel samples
        """
        Infor = np.zeros((self.n_samples, self.n_classes))
        Uncertainty = self.cal_uncertainty(label_index, unlabel_index, models)
        label_mat = label_index.get_matrix_mask((self.n_samples, self.n_classes), sparse=False)
        unlabel_mat = unlabel_index.get_matrix_mask((self.n_samples, self.n_classes), sparse=False)
        for j in np.arange(self.n_classes):
            j_unlabel = np.where(unlabel_mat[:, j] == 1)[0]
            j_label = np.where(unlabel_mat[:, j] != 1)[0]
            for i in j_unlabel:
                flag = self.cal_relevance(i, j, label_index, models, k=5)
                if flag == 1:
                    Infor[i][j] = Uncertainty[i][j] * 2
                elif flag == -1:
                    Infor[i][j] = Uncertainty[i][j] + self.cal_Udes(i, j, Uncertainty)
            Infor[j_label][j] = -np.infty
        return Infor 
Example 17
Project: ALiPy   Author: NUAA-AL   File: cost_sensitive.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def cal_uncertainty(self, target, models):
        """Calculate the uncertainty.
        target: unlabel_martix
        """
        Uncertainty = np.zeros([self.n_samples, self.n_classes])
        # unlabel_data = self.X[unlabel_index, :]
        for j in np.arange(self.n_classes):
            model = models[j]
            j_target = target[:, j]
            j_label = np.where(j_target != 1)
            j_unlabel = np.where(j_target == 1)
            for i in j_unlabel[0]:
                d_v = model.decision_function([self.X[i]])
                Uncertainty[i][j] = np.abs(1 / d_v)
            Uncertainty[j_label, j] = -np.infty
        return Uncertainty 
Example 18
Project: sparsereg   Author: Ohjeah   File: __init__.py    License: MIT License 6 votes vote down vote up
def crowding_distance(models, *attrs):
    """
    Assumes models in lexicographical sorted.
    """

    get_fit = _get_fit(models, attrs)

    f = np.array(sorted([get_fit(m) for m in models]))

    scale = np.max(f, axis=0) - np.min(f, axis=0)

    with np.errstate(invalid="ignore"):
        dist = np.sum(abs(np.roll(f, 1, axis=0) - np.roll(f, -1, axis=0)) / scale, axis=1)
    dist[0] = np.infty
    dist[-1] = np.infty
    return dist 
Example 19
Project: semisup-learn   Author: tmadl   File: CPLELearning.py    License: MIT License 6 votes vote down vote up
def __init__(self, basemodel, pessimistic=True, predict_from_probabilities = False, use_sample_weighting = True, max_iter=3000, verbose = 1):
        self.model = basemodel
        self.pessimistic = pessimistic
        self.predict_from_probabilities = predict_from_probabilities
        self.use_sample_weighting = use_sample_weighting
        self.max_iter = max_iter
        self.verbose = verbose
        
        self.it = 0 # iteration counter
        self.noimprovementsince = 0 # log likelihood hasn't improved since this number of iterations
        self.maxnoimprovementsince = 3 # threshold for iterations without improvements (convergence is assumed when this is reached)
        
        self.buffersize = 200
        # buffer for the last few discriminative likelihoods (used to check for convergence)
        self.lastdls = [0]*self.buffersize
        
        # best discriminative likelihood and corresponding soft labels; updated during training
        self.bestdl = numpy.infty
        self.bestlbls = []
        
        # unique id
        self.id = str(unichr(numpy.random.randint(26)+97))+str(unichr(numpy.random.randint(26)+97)) 
Example 20
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_gives_adversarial_example_linfinity(self):
        self.help_generate_np_gives_adversarial_example(np.infty) 
Example 21
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_gives_adversarial_example_linfinity(self):
        self.help_generate_np_gives_adversarial_example(ord=np.infty, eps=.5,
                                                        nb_iter=20) 
Example 22
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_do_not_reach_lp_boundary(self):
        """
        Make sure that iterative attack don't reach boundary of Lp
        neighbourhood if nb_iter * eps_iter is relatively small compared to
        epsilon.
        """
        for ord in [1, 2, np.infty]:
            _, _, delta = self.generate_adversarial_examples_np(
                ord=ord, eps=.5, nb_iter=10, eps_iter=.01)
            self.assertTrue(np.max(0.5 - delta) > 0.25) 
Example 23
Project: OpenFermion-Cirq   Author: quantumlib   File: analysis.py    License: Apache License 2.0 5 votes vote down vote up
def mcweeny_purification(rho: np.ndarray,
                         threshold: Optional[float] = 1e-8) -> np.ndarray:
    """
    Implementation of McWeeny purification

    :param rho: density to purifiy.
    :param threshold: stop when ||P**2 - P|| falls below this value.
    :return: purified density matrix.
    """
    error = np.infty
    new_rho = rho.copy()
    while error > threshold:
        new_rho = 3 * (new_rho @ new_rho) - 2 * (new_rho @ new_rho @ new_rho)
        error = np.linalg.norm(new_rho @ new_rho - new_rho)
    return new_rho 
Example 24
def _find_decision_boundary_on_hypersphere(self, centroid, R, penalize_known=False):
        def objective(phi, grad=0):
            # search on hypersphere surface in polar coordinates - map back to cartesian
            cx = centroid + polar_to_cartesian(phi, R)
            try:
                cx2d = self.dimensionality_reduction.transform([cx])[0]
                error = self.decision_boundary_distance(cx)
                if penalize_known:
                    # slight penalty for being too close to already known decision boundary
                    # keypoints
                    db_distances = [
                        euclidean(cx2d, self.decision_boundary_points_2d[k])
                        for k in range(len(self.decision_boundary_points_2d))
                    ]
                    error += (
                        1e-8
                        * (
                            (self.mean_2d_dist - np.min(db_distances))
                            / self.mean_2d_dist
                        )
                        ** 2
                    )
                return error
            except (Exception, ex):
                print("Error in objective function:", ex)
                return np.infty

        optimizer = self._get_optimizer(
            D=self.X.shape[1] - 1,
            upper_bound=2 * np.pi,
            iteration_budget=self.hypersphere_iteration_budget,
        )
        optimizer.set_min_objective(objective)
        db_phi = optimizer.optimize(
            [random.random() * 2 * np.pi for k in range(self.X.shape[1] - 1)]
        )
        db_point = centroid + polar_to_cartesian(db_phi, R)
        return db_point 
Example 25
Project: recruit   Author: Frank-qlu   File: interval.py    License: Apache License 2.0 5 votes vote down vote up
def _get_next_label(label):
    dtype = getattr(label, 'dtype', type(label))
    if isinstance(label, (Timestamp, Timedelta)):
        dtype = 'datetime64'
    if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
        return label + np.timedelta64(1, 'ns')
    elif is_integer_dtype(dtype):
        return label + 1
    elif is_float_dtype(dtype):
        return np.nextafter(label, np.infty)
    else:
        raise TypeError('cannot determine next label for type {typ!r}'
                        .format(typ=type(label))) 
Example 26
Project: recruit   Author: Frank-qlu   File: interval.py    License: Apache License 2.0 5 votes vote down vote up
def _get_prev_label(label):
    dtype = getattr(label, 'dtype', type(label))
    if isinstance(label, (Timestamp, Timedelta)):
        dtype = 'datetime64'
    if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
        return label - np.timedelta64(1, 'ns')
    elif is_integer_dtype(dtype):
        return label - 1
    elif is_float_dtype(dtype):
        return np.nextafter(label, -np.infty)
    else:
        raise TypeError('cannot determine next label for type {typ!r}'
                        .format(typ=type(label))) 
Example 27
Project: typhon   Author: atmtools   File: ppath.py    License: MIT License 5 votes vote down vote up
def ppath_field_minmax_posbox(ppath_field):
    """ Return the minimum and maximum of all pos variables of ppath_field

    Parameters:
        ppath_field:
            (Ppath or Workspace)  Propagation path or Wosrkspace with the path

    Returns:
        min(alt), max(alt), min(lat), max(lat), min(lon), max(lon)
    """

    try:
        ppath_field = ppath_field.ppath_field.value
    except:
        pass

    minalt = minlat = minlon = np.infty
    maxalt = maxlat = maxlon = -np.infty

    for ppath in ppath_field:
        alt, lat, lon, za, aa = ppath.alt_lat_lon_za_aa()

        minalt = min(alt.min(), minalt)
        maxalt = max(alt.max(), maxalt)

        minlat = min(lat.min(), minlat)
        maxlat = max(lat.max(), maxlat)

        minlon = min(lon.min(), minlon)
        maxlon = max(lon.max(), maxlon)

    return minalt, maxalt, minlat, maxlat, minlon, maxlon 
Example 28
Project: pybeeswarm   Author: mgymrek   File: beeswarm.py    License: MIT License 5 votes vote down vote up
def swarm(x, xsize=0, ysize=0, colors="black"):
    """
    Implement the swarm arrangement method
    """
    gsize = xsize
    dsize = ysize
    out = pandas.DataFrame({"x": [item*1.0/dsize for item in x], "y": [0]*len(x), "color": colors, "order": range(len(x))})
    out.sort_index(by='x', inplace=True)
    if out.shape[0] > 1:
        for i in range(1, out.shape[0]):
            xi = out["x"].values[i]
            yi = out["y"].values[i]
            pre =  out[0:i] # previous points
            wh = (abs(xi-pre["x"]) < 1) # which are potentially overlapping
            if any(wh):
                pre = pre[wh]
                poty_off = pre["x"].apply(lambda x: math.sqrt(1-(xi-x)**2)) # potential y offset
                poty = pandas.Series([0] + (pre["y"] + poty_off).tolist() + (pre["y"]-poty_off).tolist()) # potential y values
                poty_bad = []
                for y in poty:
                    dists = (xi-pre["x"])**2 + (y-pre["y"])**2
                    if any([item < 0.999 for item in dists]): poty_bad.append(True)
                    else: poty_bad.append(False)
                poty[poty_bad] = numpy.infty
                abs_poty = [abs(item) for item in poty]
                newoffset = poty[abs_poty.index(min(abs_poty))]
                out.loc[i,"y"] = newoffset
            else:
                out.loc[i,"y"] = 0
    out.ix[numpy.isnan(out["x"]), "y"] = numpy.nan
    # Sort to maintain original order
    out.sort_index(by="order", inplace=True)
    return out["y"]*gsize, out["color"] 
Example 29
Project: meshrender   Author: BerkeleyAutomation   File: viewer.py    License: Apache License 2.0 5 votes vote down vote up
def _compute_scene_bounds(self):
        """The axis aligned bounds of the scene.

        Returns
        -------
        (2,3) float
            The bounding box with [min, max] coordinates.
        """
        lb = np.array([np.infty, np.infty, np.infty])
        ub = -1.0 * np.array([np.infty, np.infty, np.infty])
        for on in self.scene.objects:
            o = self.scene.objects[on]
            poses = [RigidTransform(from_frame=o.T_obj_world.from_frame, to_frame=o.T_obj_world.to_frame)]
            if isinstance(o, InstancedSceneObject):
                # Cheat for instanced objects -- just find the min/max translations and create poses from those
                # Complile translations
                translations = o.raw_pose_data[3::4,:3]
                min_trans = np.min(translations, axis=0)
                max_trans = np.max(translations, axis=0)
                poses = [RigidTransform(translation=min_trans),
                         RigidTransform(translation=max_trans)]
            for pose in poses:
                tf_verts = pose.matrix[:3,:3].dot(o.mesh.vertices.T).T + pose.matrix[:3,3]
                tf_verts = o.T_obj_world.matrix[:3,:3].dot(tf_verts.T).T + o.T_obj_world.matrix[:3,3]
                lb_mesh = np.min(tf_verts, axis=0)
                ub_mesh = np.max(tf_verts, axis=0)
                lb = np.minimum(lb, lb_mesh)
                ub = np.maximum(ub, ub_mesh)
        if np.any(lb > ub):
            return np.array([[-1.0, -1.0, -1.0], [1.0, 1.0, 1.0]])
        return np.array([lb, ub]) 
Example 30
Project: vnpy_crypto   Author: birforce   File: interval.py    License: MIT License 5 votes vote down vote up
def _get_next_label(label):
    dtype = getattr(label, 'dtype', type(label))
    if isinstance(label, (Timestamp, Timedelta)):
        dtype = 'datetime64'
    if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
        return label + np.timedelta64(1, 'ns')
    elif is_integer_dtype(dtype):
        return label + 1
    elif is_float_dtype(dtype):
        return np.nextafter(label, np.infty)
    else:
        raise TypeError('cannot determine next label for type {typ!r}'
                        .format(typ=type(label)))