Python numpy.infty() Examples

The following are 30 code examples of numpy.infty(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: _testing.py    From mpnum with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def assert_mpa_identical(mpa1, mpa2, decimal=np.infty):
    """Verify that two MPAs are complety identical
    """
    assert len(mpa1) == len(mpa2)
    assert mpa1.canonical_form == mpa2.canonical_form
    assert mpa1.dtype == mpa2.dtype

    for i, lten1, lten2 in zip(it.count(), mpa1.lt, mpa2.lt):
        if decimal is np.infty:
            assert_array_equal(lten1, lten2,
                               err_msg='mismatch in lten {}'.format(i))
        else:
            assert_array_almost_equal(lten1, lten2, decimal=decimal,
                                      err_msg='mismatch in lten {}'.format(i))
    # TODO: We should make a comprehensive comparison between `mpa1`
    # and `mpa2`.  Are we missing other things? 
Example #2
Source File: test_bayesian_mixture.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_monotonic_likelihood():
    # We check that each step of the each step of variational inference without
    # regularization improve monotonically the training set of the bound
    rng = np.random.RandomState(0)
    rand_data = RandomData(rng, scale=20)
    n_components = rand_data.n_components

    for prior_type in PRIOR_TYPE:
        for covar_type in COVARIANCE_TYPE:
            X = rand_data.X[covar_type]
            bgmm = BayesianGaussianMixture(
                weight_concentration_prior_type=prior_type,
                n_components=2 * n_components, covariance_type=covar_type,
                warm_start=True, max_iter=1, random_state=rng, tol=1e-4)
            current_lower_bound = -np.infty
            # Do one training iteration at a time so we can make sure that the
            # training log likelihood increases after each iteration.
            for _ in range(600):
                prev_lower_bound = current_lower_bound
                current_lower_bound = bgmm.fit(X).lower_bound_
                assert_greater_equal(current_lower_bound, prev_lower_bound)

                if bgmm.converged_:
                    break
            assert(bgmm.converged_) 
Example #3
Source File: gmm.py    From cupy with MIT License 6 votes vote down vote up
def train_gmm(X, max_iter, tol, means, covariances):
    xp = cupy.get_array_module(X)
    lower_bound = -np.infty
    converged = False
    weights = xp.array([0.5, 0.5], dtype=np.float32)
    inv_cov = 1 / xp.sqrt(covariances)

    for n_iter in range(max_iter):
        prev_lower_bound = lower_bound
        log_prob_norm, log_resp = e_step(X, inv_cov, means, weights)
        weights, means, covariances = m_step(X, xp.exp(log_resp))
        inv_cov = 1 / xp.sqrt(covariances)
        lower_bound = log_prob_norm
        change = lower_bound - prev_lower_bound
        if abs(change) < tol:
            converged = True
            break

    if not converged:
        print('Failed to converge. Increase max-iter or tol.')

    return inv_cov, means, weights, covariances 
Example #4
Source File: da.py    From POT with MIT License 6 votes vote down vote up
def __init__(self, reg_e=1., reg_cl=0.1,
                 max_iter=10, max_inner_iter=200, log=False,
                 tol=10e-9, verbose=False,
                 metric="sqeuclidean", norm=None,
                 distribution_estimation=distribution_estimation_uniform,
                 out_of_sample_map='ferradans', limit_max=np.infty):
        self.reg_e = reg_e
        self.reg_cl = reg_cl
        self.max_iter = max_iter
        self.max_inner_iter = max_inner_iter
        self.tol = tol
        self.log = log
        self.verbose = verbose
        self.metric = metric
        self.norm = norm
        self.distribution_estimation = distribution_estimation
        self.out_of_sample_map = out_of_sample_map
        self.limit_max = limit_max 
Example #5
Source File: test_imexsweeper.py    From pySDC with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_sweepequalmatrix(self):
    for type in classes:
      self.swparams['collocation_class'] = type
      step, level, problem, nnodes = self.setupLevelStepProblem()
      step.levels[0].sweep.predict()
      u0full = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])

      # Perform node-to-node SDC sweep
      level.sweep.update_nodes()

      lambdas = [ problem.params.lambda_f[0] , problem.params.lambda_s[0] ]
      LHS, RHS = level.sweep.get_scalar_problems_sweeper_mats( lambdas = lambdas )

      unew = np.linalg.inv(LHS).dot( u0full + RHS.dot(u0full) )
      usweep = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])
      assert np.linalg.norm(unew - usweep, np.infty)<1e-14, "Single SDC sweeps in matrix and node-to-node formulation yield different results"

  #
  # Make sure the implemented update formula matches the matrix update formula
  # 
Example #6
Source File: test_imexsweeper.py    From pySDC with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_updateformula(self):
    for type in classes:
      self.swparams['collocation_class'] = type
      step, level, problem, nnodes = self.setupLevelStepProblem()
      level.sweep.predict()
      u0full = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])

      # Perform update step in sweeper
      level.sweep.update_nodes()
      ustages = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])
      # Compute end value through provided function
      level.sweep.compute_end_point()
      uend_sweep = level.uend.values
      # Compute end value from matrix formulation
      if level.sweep.params.do_coll_update:
        uend_mat   = self.pparams['u0'] + step.dt*level.sweep.coll.weights.dot(ustages*(problem.params.lambda_s[0] + problem.params.lambda_f[0]))
      else:
        uend_mat = ustages[-1]
      assert np.linalg.norm(uend_sweep - uend_mat, np.infty)<1e-14, "Update formula in sweeper gives different result than matrix update formula"


  #
  # Compute the exact collocation solution by matrix inversion and make sure it is a fixed point
  # 
Example #7
Source File: gmmfense.py    From platform-resource-manager with Apache License 2.0 6 votes vote down vote up
def __init__(self, data, max_mixture=10, threshold=0.1):
        """
        Class constructor, arguments include:
            data - data to build GMM model
            max_mixture - max number of Gaussian mixtures
            threshold - probability threhold to determine fense
        """
        self.data = data
        self.thresh = threshold
        lowest_bic = np.infty
        components = 1
        bic = []
        n_components_range = range(1, max_mixture + 1)
        for n_components in n_components_range:
            # Fit a Gaussian mixture with EM
            gmm = mixture.GaussianMixture(n_components=n_components,
                                          random_state=1005)
            gmm.fit(data)
            bic.append(gmm.bic(data))
            if bic[-1] < lowest_bic:
                lowest_bic = bic[-1]
                best_gmm = gmm
                components = n_components
        log.debug('best gmm components number: %d, bic %f ', components, lowest_bic)
        self.gmm = best_gmm 
Example #8
Source File: gmmfense.py    From platform-resource-manager with Apache License 2.0 6 votes vote down vote up
def __init__(self, data, max_mixture=10, threshold=0.1):
        """
        Class constructor, arguments include:
            data - data to build GMM model
            max_mixture - max number of Gaussian mixtures
            threshold - probability threhold to determine fense
        """
        self.data = data
        self.thresh = threshold
        lowest_bic = np.infty
        components = 1
        bic = []
        n_components_range = range(1, max_mixture + 1)
        for n_components in n_components_range:
            # Fit a Gaussian mixture with EM
            gmm = mixture.GaussianMixture(n_components=n_components,
                                          random_state=1005)
            gmm.fit(data)
            bic.append(gmm.bic(data))
            if bic[-1] < lowest_bic:
                lowest_bic = bic[-1]
                best_gmm = gmm
                components = n_components
        log.debug('best gmm components number: %d, bic %f ', components, lowest_bic)
        self.gmm = best_gmm 
Example #9
Source File: test_imexsweeper.py    From pySDC with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_updateformula_no_coll_update(self):
    for type in classes:
      self.swparams['collocation_class'] = type
      self.swparams['do_coll_update'] = False
      step, level, problem, nnodes = self.setupLevelStepProblem()
      # if type of nodes does not have right endpoint as quadrature nodes, cannot set do_coll_update to False and perform this test
      if not level.sweep.coll.right_is_node: break
      level.sweep.predict()
      u0full = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])

      # Perform update step in sweeper
      level.sweep.update_nodes()
      ustages = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])

      # Compute end value through provided function
      level.sweep.compute_end_point()
      uend_sweep = level.uend.values
      # Compute end value from matrix formulation
      q = np.zeros(nnodes)
      q[nnodes-1] = 1.0
      uend_mat   = q.dot(ustages)
      assert np.linalg.norm(uend_sweep - uend_mat, np.infty)<1e-14, "For do_coll_update=False, update formula in sweeper gives different result than matrix update formula with q=(0,..,0,1)" 
Example #10
Source File: test_constants.py    From chainer with MIT License 6 votes vote down vote up
def test_constants():
    assert chainerx.Inf is numpy.Inf
    assert chainerx.Infinity is numpy.Infinity
    assert chainerx.NAN is numpy.NAN
    assert chainerx.NINF is numpy.NINF
    assert chainerx.NZERO is numpy.NZERO
    assert chainerx.NaN is numpy.NaN
    assert chainerx.PINF is numpy.PINF
    assert chainerx.PZERO is numpy.PZERO
    assert chainerx.e is numpy.e
    assert chainerx.euler_gamma is numpy.euler_gamma
    assert chainerx.inf is numpy.inf
    assert chainerx.infty is numpy.infty
    assert chainerx.nan is numpy.nan
    assert chainerx.newaxis is numpy.newaxis
    assert chainerx.pi is numpy.pi 
Example #11
Source File: cost_sensitive.py    From ALiPy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def cal_Informativeness(self, label_index, unlabel_index, models):
        """

        Returns:
        Info : 2d array-like 
        shape [n_unlabel_samples, n_classes]
        Informativeness of each unlabel samples
        """
        Infor = np.zeros((self.n_samples, self.n_classes))
        Uncertainty = self.cal_uncertainty(label_index, unlabel_index, models)
        label_mat = label_index.get_matrix_mask((self.n_samples, self.n_classes), sparse=False)
        unlabel_mat = unlabel_index.get_matrix_mask((self.n_samples, self.n_classes), sparse=False)
        for j in np.arange(self.n_classes):
            j_unlabel = np.where(unlabel_mat[:, j] == 1)[0]
            j_label = np.where(unlabel_mat[:, j] != 1)[0]
            for i in j_unlabel:
                flag = self.cal_relevance(i, j, label_index, models, k=5)
                if flag == 1:
                    Infor[i][j] = Uncertainty[i][j] * 2
                elif flag == -1:
                    Infor[i][j] = Uncertainty[i][j] + self.cal_Udes(i, j, Uncertainty)
            Infor[j_label][j] = -np.infty
        return Infor 
Example #12
Source File: cost_sensitive.py    From ALiPy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def cal_uncertainty(self, target, models):
        """Calculate the uncertainty.
        target: unlabel_martix
        """
        Uncertainty = np.zeros([self.n_samples, self.n_classes])
        # unlabel_data = self.X[unlabel_index, :]
        for j in np.arange(self.n_classes):
            model = models[j]
            j_target = target[:, j]
            j_label = np.where(j_target != 1)
            j_unlabel = np.where(j_target == 1)
            for i in j_unlabel[0]:
                d_v = model.decision_function([self.X[i]])
                Uncertainty[i][j] = np.abs(1 / d_v)
            Uncertainty[j_label, j] = -np.infty
        return Uncertainty 
Example #13
Source File: beam_search.py    From RLSeq2Seq with MIT License 6 votes vote down vote up
def extend(self, token, log_prob, state, decoder_output, encoder_mask, attn_dist, p_gen, coverage):
    """Return a NEW hypothesis, extended with the information from the latest step of beam search.

    Args:
      token: Integer. Latest token produced by beam search.
      log_prob: Float. Log prob of the latest token.
      state: Current decoder state, a LSTMStateTuple.
      attn_dist: Attention distribution from latest step. Numpy array shape (attn_length).
      p_gen: Generation probability on latest step. Float.
      coverage: Latest coverage vector. Numpy array shape (attn_length), or None if not using coverage.
    Returns:
      New Hypothesis for next step.
    """
    if FLAGS.avoid_trigrams and self._has_trigram(self.tokens + [token]):
        log_prob = -np.infty
    return Hypothesis(tokens = self.tokens + [token],
                      log_probs = self.log_probs + [log_prob],
                      state = state,
                      decoder_output= self.decoder_output + [decoder_output] if decoder_output is not None else [],
                      encoder_mask = self.encoder_mask + [encoder_mask] if encoder_mask is not None else [],
                      attn_dists = self.attn_dists + [attn_dist],
                      p_gens = self.p_gens + [p_gen],
                      coverage = coverage) 
Example #14
Source File: train.py    From B-SOID with GNU General Public License v3.0 6 votes vote down vote up
def bsoid_hdbscan(umap_embeddings, hdbscan_params=HDBSCAN_PARAMS):
    """
    Trains HDBSCAN (unsupervised) given learned UMAP space
    :param umap_embeddings: 2D array, embedded UMAP space
    :param hdbscan_params: dict, HDBSCAN params in GLOBAL_CONFIG
    :return assignments: HDBSCAN assignments
    """
    highest_numulab = -np.infty
    numulab = []
    min_cluster_range = range(6, 21)
    logging.info('Running HDBSCAN on {} instances in {} D space...'.format(*umap_embeddings.shape))
    for min_c in min_cluster_range:
        trained_classifier = hdbscan.HDBSCAN(prediction_data=True,
                                             min_cluster_size=int(round(0.001 * min_c * umap_embeddings.shape[0])),
                                             **hdbscan_params).fit(umap_embeddings)
        numulab.append(len(np.unique(trained_classifier.labels_)))
        if numulab[-1] > highest_numulab:
            logging.info('Adjusting minimum cluster size to maximize cluster number...')
            highest_numulab = numulab[-1]
            best_clf = trained_classifier
    assignments = best_clf.labels_
    soft_clusters = hdbscan.all_points_membership_vectors(best_clf)
    soft_assignments = np.argmax(soft_clusters, axis=1)
    logging.info('Done predicting labels for {} instances in {} D space...'.format(*umap_embeddings.shape))
    return assignments, soft_clusters, soft_assignments 
Example #15
Source File: diagGMM.py    From sprocket with MIT License 6 votes vote down vote up
def fit(self, X):
        """Fit GMM parameters to X

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)

        """
        # initialize
        self._initialize_parameters(X, self.random_state)
        lower_bound = -np.infty

        for n in range(self.n_iter):
            # E-step
            log_prob_norm, log_resp = self._e_step(X)

            # M-step
            self._m_step(X, log_resp)

            # check convergence
            back_lower_bound = lower_bound
            lower_bound = self._compute_lower_bound(
                log_resp, log_prob_norm) 
Example #16
Source File: __init__.py    From sparsereg with MIT License 6 votes vote down vote up
def crowding_distance(models, *attrs):
    """
    Assumes models in lexicographical sorted.
    """

    get_fit = _get_fit(models, attrs)

    f = np.array(sorted([get_fit(m) for m in models]))

    scale = np.max(f, axis=0) - np.min(f, axis=0)

    with np.errstate(invalid="ignore"):
        dist = np.sum(abs(np.roll(f, 1, axis=0) - np.roll(f, -1, axis=0)) / scale, axis=1)
    dist[0] = np.infty
    dist[-1] = np.infty
    return dist 
Example #17
Source File: intrinsic.py    From opensurfaces with MIT License 6 votes vote down vote up
def weiss_retinex(image, multi_images, mask, threshold, L1=False):
    multi_images = np.clip(multi_images, 3., np.infty)
    log_multi_images = np.log(multi_images)

    i_y_all, i_x_all = poisson.get_gradients(log_multi_images)
    r_y = np.median(i_y_all, axis=2)
    r_x = np.median(i_x_all, axis=2)

    r_y *= (np.abs(r_y) > threshold)
    r_x *= (np.abs(r_x) > threshold)
    if L1:
        log_refl = poisson.solve_L1(r_y, r_x, mask)
    else:
        log_refl = poisson.solve(r_y, r_x, mask)
    refl = np.where(mask, np.exp(log_refl), 0.)
    shading = np.where(mask, image / refl, 0.)

    return shading, refl





#################### Wrapper classes for experiments ########################### 
Example #18
Source File: beam_search.py    From TransferRL with MIT License 6 votes vote down vote up
def extend(self, token, log_prob, state, decoder_output, encoder_mask, attn_dist, p_gen, coverage):
    """Return a NEW hypothesis, extended with the information from the latest step of beam search.

    Args:
      token: Integer. Latest token produced by beam search.
      log_prob: Float. Log prob of the latest token.
      state: Current decoder state, a LSTMStateTuple.
      attn_dist: Attention distribution from latest step. Numpy array shape (attn_length).
      p_gen: Generation probability on latest step. Float.
      coverage: Latest coverage vector. Numpy array shape (attn_length), or None if not using coverage.
    Returns:
      New Hypothesis for next step.
    """
    if FLAGS.avoid_trigrams and self._has_trigram(self.tokens + [token]):
        log_prob = -np.infty
    return Hypothesis(tokens = self.tokens + [token],
                      log_probs = self.log_probs + [log_prob],
                      state = state,
                      decoder_output= self.decoder_output + [decoder_output] if decoder_output is not None else [],
                      encoder_mask = self.encoder_mask + [encoder_mask] if encoder_mask is not None else [],
                      attn_dists = self.attn_dists + [attn_dist],
                      p_gens = self.p_gens + [p_gen],
                      coverage = coverage) 
Example #19
Source File: CPLELearning.py    From semisup-learn with MIT License 6 votes vote down vote up
def __init__(self, basemodel, pessimistic=True, predict_from_probabilities = False, use_sample_weighting = True, max_iter=3000, verbose = 1):
        self.model = basemodel
        self.pessimistic = pessimistic
        self.predict_from_probabilities = predict_from_probabilities
        self.use_sample_weighting = use_sample_weighting
        self.max_iter = max_iter
        self.verbose = verbose
        
        self.it = 0 # iteration counter
        self.noimprovementsince = 0 # log likelihood hasn't improved since this number of iterations
        self.maxnoimprovementsince = 3 # threshold for iterations without improvements (convergence is assumed when this is reached)
        
        self.buffersize = 200
        # buffer for the last few discriminative likelihoods (used to check for convergence)
        self.lastdls = [0]*self.buffersize
        
        # best discriminative likelihood and corresponding soft labels; updated during training
        self.bestdl = numpy.infty
        self.bestlbls = []
        
        # unique id
        self.id = str(unichr(numpy.random.randint(26)+97))+str(unichr(numpy.random.randint(26)+97)) 
Example #20
Source File: mice.py    From ycimpute with Apache License 2.0 5 votes vote down vote up
def __init__(self,
                 visit_sequence='monotone',
                 n_imputations=100,
                 n_burn_in=10,
                 n_pmm_neighbors=5,
                 impute_type='pmm',
                 model=LinearRegression(),
                 n_nearest_columns=np.infty,
                 init_fill_method="mean",
                 min_value=None,
                 max_value=None,
                 verbose=False,
                 normalizer='min_max'):


        Solver.__init__(self,
                        normalizer=normalizer)

        self.visit_sequence = visit_sequence
        self.n_burn_in = n_burn_in
        self.n_pmm_neighbors = n_pmm_neighbors
        self.impute_type = impute_type
        self.model = model
        self.n_nearest_columns = n_nearest_columns
        self.verbose = verbose
        self.fill_method = init_fill_method
        self.min_value = min_value
        self.max_value = max_value
        self.n_imputations = n_imputations 
Example #21
Source File: __init__.py    From e3sm_diags with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def std(variable, axis='xy'):
    std = -numpy.infty
    try:
        std = float(genutil.statistics.std(
            variable, axis=axis, weights='generate'))
    except Exception as err:
        print(err)

    return std 
Example #22
Source File: gan.py    From ad_examples with MIT License 5 votes vote down vote up
def fit_gmm(x, val_x, min_k=1, max_k=10):
    cv_type = 'diag'  # ['spherical', 'tied', 'diag', 'full']
    lowest_bic = np.infty
    bic = []
    best_gmm = None
    for k in range(min_k, max_k+1):
        gmm = mixture.GaussianMixture(n_components=k, covariance_type=cv_type)
        gmm.fit(x)
        bic.append(gmm.bic(val_x))
        if bic[-1] < lowest_bic:
            lowest_bic = bic[-1]
            best_gmm = gmm
    return best_gmm, lowest_bic, bic 
Example #23
Source File: Tektronix_AWG7062.py    From qkit with GNU General Public License v2.0 5 votes vote down vote up
def get_seq_loop(self, position):
        '''
        Get how often the sequencer item at position is looped during playback.
        
        Input:
            position (int) - sequence element index (starting from 1)
        Output: 
            loop count (int)
        '''
        if(self._visainstrument.ask('SEQ:ELEM%d:LOOP:INF?'%position) == 1):
            return numpy.infty
        else:
            return int(self._visainstrument.ask('SEQ:ELEM%d:LOOP:COUN?'%position)) 
Example #24
Source File: parameterized_truncated_normal_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testRightTail(self):
    self.validateMoments([10**5], 0.0, 1.0, 4.0, np.infty) 
Example #25
Source File: electrostatics.py    From electrostatics with GNU General Public License v3.0 5 votes vote down vote up
def E(self, x):  # pylint: disable=invalid-name
        """Electric field vector.
        Ref: http://www.phys.uri.edu/gerhard/PHY204/tsl31.pdf
        """
        x = array(x)
        x1, x2, lam = self.x1, self.x2, self.lam

        # Get lengths and angles for the different triangles
        theta1, theta2 = angle(x, x1, x2), pi - angle(x, x2, x1)
        a = point_line_distance(x, x1, x2)
        r1, r2 = norm(x - x1), norm(x - x2)

        # Calculate the parallel and perpendicular components
        sign = where(is_left(x, x1, x2), 1, -1)

        # pylint: disable=invalid-name, invalid-unary-operand-type
        Epara = lam*(1/r2-1/r1)
        Eperp = -sign*lam*(cos(theta2)-cos(theta1))/where(a == 0, infty, a)

        # Transform into the coordinate space and return
        dx = x2 - x1

        if len(x.shape) == 2:
            Epara = Epara[::, newaxis]
            Eperp = Eperp[::, newaxis]

        return Eperp * (array([-dx[1], dx[0]])/norm(dx)) + Epara * (dx/norm(dx)) 
Example #26
Source File: train.py    From B-SOID with GNU General Public License v3.0 5 votes vote down vote up
def bsoid_hdbscan(umap_embeddings, hdbscan_params=HDBSCAN_PARAMS):
    """
    Trains HDBSCAN (unsupervised) given learned UMAP space
    :param umap_embeddings: 2D array, embedded UMAP space
    :param hdbscan_params: dict, HDBSCAN params in GLOBAL_CONFIG
    :return assignments: HDBSCAN assignments
    """
    highest_numulab = -np.infty
    numulab = []
    min_cluster_range = range(6, 21)
    logging.info('Running HDBSCAN on {} instances in {} D space...'.format(*umap_embeddings.shape))
    for min_c in min_cluster_range:
        trained_classifier = hdbscan.HDBSCAN(prediction_data=True,
                                             min_cluster_size=int(round(0.001 * min_c * umap_embeddings.shape[0])),
                                             **hdbscan_params).fit(umap_embeddings)
        numulab.append(len(np.unique(trained_classifier.labels_)))
        if numulab[-1] > highest_numulab:
            logging.info('Adjusting minimum cluster size to maximize cluster number...')
            highest_numulab = numulab[-1]
            best_clf = trained_classifier
    assignments = best_clf.labels_
    soft_clusters = hdbscan.all_points_membership_vectors(best_clf)
    soft_assignments = np.argmax(soft_clusters, axis=1)
    # trained_classifier = hdbscan.HDBSCAN(prediction_data=True,
    #                                      min_cluster_size=round(umap_embeddings.shape[0] * 0.007),  # just < 1%/cluster
    #                                      **hdbscan_params).fit(umap_embeddings)
    # assignments = best_clf.labels_
    logging.info('Done predicting labels for {} instances in {} D space...'.format(*umap_embeddings.shape))
    return assignments, soft_clusters, soft_assignments 
Example #27
Source File: parameterized_truncated_normal_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testLeftTail(self):
    self.validateMoments([10**5], 0.0, 1.0, -np.infty, -4.0) 
Example #28
Source File: parameterized_truncated_normal_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testRightTailShifted(self):
    self.validateMoments([10**5], -5.0, 1.0, 2.0, np.infty) 
Example #29
Source File: interval.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 5 votes vote down vote up
def _get_next_label(label):
    dtype = getattr(label, 'dtype', type(label))
    if isinstance(label, (Timestamp, Timedelta)):
        dtype = 'datetime64'
    if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
        return label + np.timedelta64(1, 'ns')
    elif is_integer_dtype(dtype):
        return label + 1
    elif is_float_dtype(dtype):
        return np.nextafter(label, np.infty)
    else:
        raise TypeError('cannot determine next label for type {typ!r}'
                        .format(typ=type(label))) 
Example #30
Source File: interval.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 5 votes vote down vote up
def _get_prev_label(label):
    dtype = getattr(label, 'dtype', type(label))
    if isinstance(label, (Timestamp, Timedelta)):
        dtype = 'datetime64'
    if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
        return label - np.timedelta64(1, 'ns')
    elif is_integer_dtype(dtype):
        return label - 1
    elif is_float_dtype(dtype):
        return np.nextafter(label, -np.infty)
    else:
        raise TypeError('cannot determine next label for type {typ!r}'
                        .format(typ=type(label)))