Python numpy.flatnonzero() Examples
The following are 30 code examples for showing how to use numpy.flatnonzero(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example 1
Project: pyberny Author: jhrmnn File: coords.py License: Mozilla Public License 2.0 | 6 votes |
def get_clusters(C): nonassigned = list(range(len(C))) clusters = [] while nonassigned: queue = {nonassigned[0]} clusters.append([]) while queue: node = queue.pop() clusters[-1].append(node) nonassigned.remove(node) queue.update(n for n in np.flatnonzero(C[node]) if n in nonassigned) C = np.zeros_like(C) for cluster in clusters: for i in cluster: C[i, cluster] = True return clusters, C
Example 2
Project: pyberny Author: jhrmnn File: Math.py License: Mozilla Public License 2.0 | 6 votes |
def pinv(A, log=lambda _: None): U, D, V = np.linalg.svd(A) thre = 1e3 thre_log = 1e8 gaps = D[:-1] / D[1:] try: n = np.flatnonzero(gaps > thre)[0] except IndexError: n = len(gaps) else: gap = gaps[n] if gap < thre_log: log('Pseudoinverse gap of only: {:.1e}'.format(gap)) D[n + 1 :] = 0 D[: n + 1] = 1 / D[: n + 1] return U.dot(np.diag(D)).dot(V)
Example 3
Project: lambda-packs Author: ryfeus File: compressed.py License: MIT License | 6 votes |
def _minor_reduce(self, ufunc): """Reduce nonzeros with a ufunc over the minor axis when non-empty Warning: this does not call sum_duplicates() Returns ------- major_index : array of ints Major indices where nonzero value : array of self.dtype Reduce result for nonzeros in each major_index """ major_index = np.flatnonzero(np.diff(self.indptr)) value = ufunc.reduceat(self.data, downcast_intp_index(self.indptr[major_index])) return major_index, value ####################### # Getting and Setting # #######################
Example 4
Project: vnpy_crypto Author: birforce File: test_survfunc.py License: MIT License | 6 votes |
def test_incidence2(): # Check that the cumulative incidence functions for all competing # risks sum to the complementary survival function. np.random.seed(2423) n = 200 time = -np.log(np.random.uniform(size=n)) status = np.random.randint(0, 3, size=n) ii = np.argsort(time) time = time[ii] status = status[ii] ci = CumIncidenceRight(time, status) statusa = 1*(status >= 1) sf = SurvfuncRight(time, statusa) x = 1 - sf.surv_prob y = (ci.cinc[0] + ci.cinc[1])[np.flatnonzero(statusa)] assert_allclose(x, y)
Example 5
Project: vnpy_crypto Author: birforce File: bayes_mixed_glm.py License: MIT License | 6 votes |
def vb_elbo_grad(self, vb_mean, vb_sd): """ Returns the gradient of the model's evidence lower bound (ELBO). """ fep_mean, vcp_mean, vc_mean = self._unpack(vb_mean) fep_sd, vcp_sd, vc_sd = self._unpack(vb_sd) tm, tv = self._lp_stats(fep_mean, fep_sd, vc_mean, vc_sd) def h(z): u = tm + np.sqrt(tv)*z x = np.zeros_like(u) ii = np.flatnonzero(u > 0) uu = u[ii] x[ii] = 1 / (1 + np.exp(-uu)) ii = np.flatnonzero(u <= 0) uu = u[ii] x[ii] = np.exp(uu) / (1 + np.exp(uu)) return -x return self.vb_elbo_grad_base( h, tm, tv, fep_mean, vcp_mean, vc_mean, fep_sd, vcp_sd, vc_sd)
Example 6
Project: vnpy_crypto Author: birforce File: cov_struct.py License: MIT License | 6 votes |
def covariance_matrix(self, endog_expval, index): if self.grid: return self.covariance_matrix_grid(endog_expval, index) j1, j2 = np.tril_indices(len(endog_expval)) dx = np.abs(self.time[index][j1] - self.time[index][j2]) ii = np.flatnonzero((0 < dx) & (dx <= self.max_lag)) j1 = j1[ii] j2 = j2[ii] dx = dx[ii] cmat = np.eye(len(endog_expval)) cmat[j1, j2] = self.dep_params[dx - 1] cmat[j2, j1] = self.dep_params[dx - 1] return cmat, True
Example 7
Project: vnpy_crypto Author: birforce File: test_gee.py License: MIT License | 6 votes |
def test_default_time(self): # Check that the time defaults work correctly. endog, exog, group = load_data("gee_logistic_1.csv") # Time values for the autoregressive model T = np.zeros(len(endog)) idx = set(group) for ii in idx: jj = np.flatnonzero(group == ii) T[jj] = lrange(len(jj)) family = Binomial() va = Autoregressive() md1 = GEE(endog, exog, group, family=family, cov_struct=va) mdf1 = md1.fit() md2 = GEE(endog, exog, group, time=T, family=family, cov_struct=va) mdf2 = md2.fit() assert_almost_equal(mdf1.params, mdf2.params, decimal=6) assert_almost_equal(mdf1.standard_errors(), mdf2.standard_errors(), decimal=6)
Example 8
Project: vnpy_crypto Author: birforce File: bayes_mi.py License: MIT License | 6 votes |
def update_data(self): """ Gibbs update of the missing data values. """ for ix in self.patterns: i = ix[0] ix_miss = np.flatnonzero(self.mask[i, :]) ix_obs = np.flatnonzero(~self.mask[i, :]) mm = self.mean[ix_miss] mo = self.mean[ix_obs] voo = self.cov[ix_obs, :][:, ix_obs] vmm = self.cov[ix_miss, :][:, ix_miss] vmo = self.cov[ix_miss, :][:, ix_obs] r = self.data[ix, :][:, ix_obs] - mo cm = mm + np.dot(vmo, np.linalg.solve(voo, r.T)).T cv = vmm - np.dot(vmo, np.linalg.solve(voo, vmo.T)) cs = np.linalg.cholesky(cv) u = np.random.normal(size=(len(ix), len(ix_miss))) self.data[np.ix_(ix, ix_miss)] = cm + np.dot(u, cs.T)
Example 9
Project: Mastering-Elasticsearch-7.0 Author: PacktPublishing File: kernel_pca.py License: MIT License | 6 votes |
def transform(self, X): """Transform X. Parameters ---------- X : array-like, shape (n_samples, n_features) Returns ------- X_new : array-like, shape (n_samples, n_components) """ check_is_fitted(self, 'X_fit_') # Compute centered gram matrix between X and training data X_fit_ K = self._centerer.transform(self._get_kernel(X, self.X_fit_)) # scale eigenvectors (properly account for null-space for dot product) non_zeros = np.flatnonzero(self.lambdas_) scaled_alphas = np.zeros_like(self.alphas_) scaled_alphas[:, non_zeros] = (self.alphas_[:, non_zeros] / np.sqrt(self.lambdas_[non_zeros])) # Project with a scalar product between K and the scaled eigenvectors return np.dot(K, scaled_alphas)
Example 10
Project: edm2016 Author: Knewton File: node.py License: Apache License 2.0 | 6 votes |
def get_data_by_id(self, ids): """ Helper for getting current data values from stored identifiers :param float|list ids: ids for which data are requested :return: the stored ids :rtype: np.ndarray """ if self.ids is None: raise ValueError("IDs not stored in node {}".format(self.name)) if self.data is None: raise ValueError("No data in node {}".format(self.name)) ids = np.array(ids, ndmin=1, copy=False) found_items = np.in1d(ids, self.ids) if not np.all(found_items): raise ValueError("Cannot find {} among {}".format(ids[np.logical_not(found_items)], self.name)) idx = np.empty(len(ids), dtype='int') for k, this_id in enumerate(ids): if self.ids.ndim > 1: idx[k] = np.flatnonzero(np.all(self.ids == this_id, axis=1))[0] else: idx[k] = np.flatnonzero(self.ids == this_id)[0] return np.array(self.data, ndmin=1)[idx]
Example 11
Project: edm2016 Author: Knewton File: test_linear_operators.py License: Apache License 2.0 | 6 votes |
def subset_test(lin_op): """ Test that subsetting a linear operator produces the correct outputs. :param LinearOperator lin_op: the linear operator """ sub_idx = np.random.rand(lin_op.shape[0], 1) > 0.5 # make sure at least one element included sub_idx[np.random.randint(0, len(sub_idx))] = True sub_idx = np.flatnonzero(sub_idx) sub_lin_op = undertest.get_subset_lin_op(lin_op, sub_idx) # test projection to subset of indices x = np.random.randn(lin_op.shape[1], np.random.randint(1, 3)) np.testing.assert_array_almost_equal(sub_lin_op * x, (lin_op * x)[sub_idx, :]) # test back projection from subset of indices y = np.random.randn(len(sub_idx), np.random.randint(1, 3)) z = np.zeros((lin_op.shape[0], y.shape[1])) z[sub_idx] = y np.testing.assert_array_almost_equal(sub_lin_op.rmatvec(y), lin_op.rmatvec(z))
Example 12
Project: edm2016 Author: Knewton File: test_node.py License: Apache License 2.0 | 6 votes |
def test_get_data_by_id(self): dim, data, cpd, ids = self.gen_data() node = undertest.Node(name='test node', data=data, cpd=cpd, ids=ids) # test setting of ids np.testing.assert_array_equal(node.ids, ids) # test for one id idx = np.random.randint(0, dim) np.testing.assert_array_equal(node.get_data_by_id(ids[idx]).ravel(), node.data[idx]) # test for a random set of ids ids_subset = np.random.choice(ids, dim, replace=True) np.testing.assert_array_equal(node.get_data_by_id(ids_subset), [node.data[np.flatnonzero(ids == x)[0]] for x in ids_subset]) # test for all ids self.assertEqual(node.get_all_data_and_ids(), {x: node.get_data_by_id(x) for x in ids}) # test when data are singleton dim, _, cpd, ids = self.gen_data(dim=1) node = undertest.Node(name='test node', data=1, cpd=cpd, ids=ids) self.assertEqual(node.get_all_data_and_ids(), {x: node.get_data_by_id(x) for x in ids})
Example 13
Project: spotpy Author: thouska File: signatures.py License: MIT License | 6 votes |
def fill_nan(data): """ Returns the timeseries where any gaps (represented by NaN) are filled, using a linear approximation between the neighbors. Gaps at the beginning or end are filled with the first resp. last valid entry :param data: The timeseries data as a numeric sequence :return: The filled timeseries as array """ # All data indices x = np.arange(len(data)) # Valid data indices xp = np.flatnonzero(np.isfinite(data)) # Valid data fp = remove_nan(data) # Interpolate missing values return np.interp(x, xp, fp)
Example 14
Project: graph-neural-networks Author: alelab-upenn File: graphTools.py License: GNU General Public License v3.0 | 5 votes |
def computeNonzeroRows(S, Nl = 'all'): """ computeNonzeroRows: Find the position of the nonzero elements of each row of a matrix Input: S (np.array): matrix Nl (int or 'all'): number of rows to compute the nonzero elements; if 'all', then Nl = S.shape[0]. Rows are counted from the top. Output: nonzeroElements (list): list of size Nl where each element is an array of the indices of the nonzero elements of the corresponding row. """ # Find the position of the nonzero elements of each row of the matrix S. # Nl = 'all' means for all rows, otherwise, it will be an int. if Nl == 'all': Nl = S.shape[0] assert Nl <= S.shape[0] # Save neighborhood variable neighborhood = [] # For each of the selected nodes for n in range(Nl): neighborhood += [np.flatnonzero(S[n,:])] return neighborhood
Example 15
Project: argus-freesound Author: lRomul File: metrics.py License: MIT License | 5 votes |
def _one_sample_positive_class_precisions(self, scores, truth): """Calculate precisions for each true class for a single sample. Args: scores: np.array of (num_classes,) giving the individual classifier scores. truth: np.array of (num_classes,) bools indicating which classes are true. Returns: pos_class_indices: np.array of indices of the true classes for this sample. pos_class_precisions: np.array of precisions corresponding to each of those classes. """ num_classes = scores.shape[0] pos_class_indices = np.flatnonzero(truth > 0) # Only calculate precisions if there are some true classes. if not len(pos_class_indices): return pos_class_indices, np.zeros(0) # Retrieval list of classes for this sample. retrieved_classes = np.argsort(scores)[::-1] # class_rankings[top_scoring_class_index] == 0 etc. class_rankings = np.zeros(num_classes, dtype=np.int) class_rankings[retrieved_classes] = range(num_classes) # Which of these is a true label? retrieved_class_true = np.zeros(num_classes, dtype=np.bool) retrieved_class_true[class_rankings[pos_class_indices]] = True # Num hits for every truncated retrieval list. retrieved_cumulative_hits = np.cumsum(retrieved_class_true) # Precision of retrieval list truncated at each hit, in order of pos_labels. precision_at_hits = ( retrieved_cumulative_hits[class_rankings[pos_class_indices]] / (1 + class_rankings[pos_class_indices].astype(np.float))) return pos_class_indices, precision_at_hits
Example 16
Project: pyberny Author: jhrmnn File: coords.py License: Mozilla Public License 2.0 | 5 votes |
def __init__(self, geom, allowed=None, dihedral=True, superweakdih=False): self._coords = [] n = len(geom) geom = geom.supercell() dist = geom.dist(geom) radii = np.array([get_property(sp, 'covalent_radius') for sp in geom.species]) bondmatrix = dist < 1.3 * (radii[None, :] + radii[:, None]) self.fragments, C = get_clusters(bondmatrix) radii = np.array([get_property(sp, 'vdw_radius') for sp in geom.species]) shift = 0.0 C_total = C.copy() while not C_total.all(): bondmatrix |= ~C_total & (dist < radii[None, :] + radii[:, None] + shift) C_total = get_clusters(bondmatrix)[1] shift += 1.0 for i, j in combinations(range(len(geom)), 2): if bondmatrix[i, j]: bond = Bond(i, j, C=C) self.append(bond) for j in range(len(geom)): for i, k in combinations(np.flatnonzero(bondmatrix[j, :]), 2): ang = Angle(i, j, k, C=C) if ang.eval(geom.coords) > pi / 4: self.append(ang) if dihedral: for bond in self.bonds: self.extend( get_dihedrals( [bond.i, bond.j], geom.coords, bondmatrix, C, superweak=superweakdih, ) ) if geom.lattice is not None: self._reduce(n)
Example 17
Project: risk-slim Author: ustunb File: solution_classes.py License: BSD 3-Clause "New" or "Revised" License | 5 votes |
def compute_objvals(self, get_objval): compute_idx = np.flatnonzero(np.isnan(self._objvals)) self._objvals[compute_idx] = np.array(list(map(get_objval, self._solutions[compute_idx, :]))) return self
Example 18
Project: LearningX Author: ankonzoid File: gridworld.py License: MIT License | 5 votes |
def get_action(self, env): # Epsilon-greedy agent policy if random.uniform(0, 1) < self.epsilon: # explore return np.random.choice(env.allowed_actions()) else: # exploit on allowed actions state = env.state; actions_allowed = env.allowed_actions() Q_s = self.Q[state[0], state[1], actions_allowed] actions_greedy = actions_allowed[np.flatnonzero(Q_s == np.max(Q_s))] return np.random.choice(actions_greedy)
Example 19
Project: LearningX Author: ankonzoid File: multiarmed_bandit.py License: MIT License | 5 votes |
def get_action(self): # Epsilon-greedy policy if np.random.random() < self.eps: # explore return np.random.randint(self.nActions) else: # exploit return np.random.choice(np.flatnonzero(self.Q == self.Q.max())) # Start multi-armed bandit simulation
Example 20
Project: recruit Author: Frank-qlu File: numeric.py License: Apache License 2.0 | 5 votes |
def flatnonzero(a): """ Return indices that are non-zero in the flattened version of a. This is equivalent to np.nonzero(np.ravel(a))[0]. Parameters ---------- a : array_like Input data. Returns ------- res : ndarray Output array, containing the indices of the elements of `a.ravel()` that are non-zero. See Also -------- nonzero : Return the indices of the non-zero elements of the input array. ravel : Return a 1-D array containing the elements of the input array. Examples -------- >>> x = np.arange(-2, 3) >>> x array([-2, -1, 0, 1, 2]) >>> np.flatnonzero(x) array([0, 1, 3, 4]) Use the indices of the non-zero elements as an index array to extract these elements: >>> x.ravel()[np.flatnonzero(x)] array([-2, -1, 1, 2]) """ return np.nonzero(np.ravel(a))[0]
Example 21
Project: recruit Author: Frank-qlu File: sas_xport.py License: Apache License 2.0 | 5 votes |
def _record_count(self): """ Get number of records in file. This is maybe suboptimal because we have to seek to the end of the file. Side effect: returns file position to record_start. """ self.filepath_or_buffer.seek(0, 2) total_records_length = (self.filepath_or_buffer.tell() - self.record_start) if total_records_length % 80 != 0: warnings.warn("xport file may be corrupted") if self.record_length > 80: self.filepath_or_buffer.seek(self.record_start) return total_records_length // self.record_length self.filepath_or_buffer.seek(-80, 2) last_card = self.filepath_or_buffer.read(80) last_card = np.frombuffer(last_card, dtype=np.uint64) # 8 byte blank ix = np.flatnonzero(last_card == 2314885530818453536) if len(ix) == 0: tail_pad = 0 else: tail_pad = 8 * len(ix) self.filepath_or_buffer.seek(self.record_start) return (total_records_length - tail_pad) // self.record_length
Example 22
Project: pytim Author: Marcello-Sega File: itim.py License: GNU General Public License v3.0 | 5 votes |
def _append_layers(self, uplow, layer, layers): inlayer_indices = np.flatnonzero(self._seen[uplow] == layer + 1) inlayer_group = self.cluster_group[inlayer_indices] if self.molecular is True: # we first select the (unique) residues corresponding # to inlayer_group, and then we create group of the # atoms belonging to them, with # inlayer_group.residues.atoms inlayer_group = inlayer_group.residues.atoms # now we need the indices within the cluster_group, # of the atoms in the molecular layer group; # NOTE that from MDAnalysis 0.16, .ids runs from 1->N # (was 0->N-1 in 0.15), we use now .indices indices = np.flatnonzero( np.in1d(self.cluster_group.atoms.indices, inlayer_group.atoms.indices)) # and update the tagged, sorted atoms self._seen[uplow][indices] = layer + 1 # one of the two layers (upper,lower) or both are empty if not inlayer_group: raise Exception(messages.EMPTY_LAYER) layers.append(inlayer_group)
Example 23
Project: mabalgs Author: alison-carrera File: simulator.py License: Apache License 2.0 | 5 votes |
def get_rewards(self, arms, chosen_arms): """ This method presents a reward calculation way to ranked mabs. :param arms: Arms probability in each rank. :param chosen_arms: The selected arms by ranked algorithm. :return: The rewarded arm and the value of the reward. """ rewards = [] velocity_factor = [] for i in range(len(chosen_arms)): rewards.append(arms[i][chosen_arms[i]].draw()) velocity_factor.append(arms[-1][i].draw()) rewards = np.asarray(rewards) velocity_factor = np.asarray(velocity_factor) final_rewards = np.logical_and(rewards, velocity_factor).astype(np.int) if final_rewards.sum() > 1: non_zero_index = np.flatnonzero(final_rewards) element = random.choice(list(non_zero_index)) return chosen_arms[element], 1 elif final_rewards.sum() == 1: return chosen_arms[final_rewards.tolist().index(1)], 1 else: return -1, 0
Example 24
Project: mars Author: mars-project File: test_indexing_execute.py License: Apache License 2.0 | 5 votes |
def testFlatnonzeroExecution(self): x = arange(-2, 3, chunk_size=2) t = flatnonzero(x) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.flatnonzero(np.arange(-2, 3)) np.testing.assert_equal(res, expected)
Example 25
Project: lingvo Author: tensorflow File: breakdown_metric.py License: Apache License 2.0 | 5 votes |
def _FindRecallAtGivenPrecision(precision_recall, precision_level): """Find the recall at given precision level. Args: precision_recall: np.array of shape [n, m, 2] where n is the number of classes, m is then number of values in the curve, 2 indexes between precision [0] and recall [1]. np.float32. precision_level: float32. Selected precision level between (0, 1). Typically, this may be 0.95. Returns: recall: np.array of shape [n] consisting of recall for all classes where the values are 0.0 if a given precision is never achieved. """ # The method for computing precision-recall inserts precision = 0.0 # when a particular recall value has not been achieved. The maximum # recall value is therefore the highest recall value when the associated # precision > 0. assert len(precision_recall.shape) == 3, 'Invalid precision recall curve.' assert precision_recall.shape[-1] == 2, 'Invalid precision recall curve.' assert precision_level > 0.0, 'Precision must be greater then 0.' assert precision_level < 1.0, 'Precision must be less then 1.' num_classes = precision_recall.shape[0] recall = np.zeros(shape=(num_classes), dtype=np.float32) for i in range(num_classes): precisions = precision_recall[i, :, 0] recalls = precision_recall[i, :, 1] indices_at_precision_level = np.flatnonzero(precisions >= precision_level) if indices_at_precision_level.size > 0: recall[i] = np.max(recalls[indices_at_precision_level]) return recall
Example 26
Project: lambda-packs Author: ryfeus File: numeric.py License: MIT License | 5 votes |
def flatnonzero(a): """ Return indices that are non-zero in the flattened version of a. This is equivalent to np.nonzero(np.ravel(a))[0]. Parameters ---------- a : array_like Input data. Returns ------- res : ndarray Output array, containing the indices of the elements of `a.ravel()` that are non-zero. See Also -------- nonzero : Return the indices of the non-zero elements of the input array. ravel : Return a 1-D array containing the elements of the input array. Examples -------- >>> x = np.arange(-2, 3) >>> x array([-2, -1, 0, 1, 2]) >>> np.flatnonzero(x) array([0, 1, 3, 4]) Use the indices of the non-zero elements as an index array to extract these elements: >>> x.ravel()[np.flatnonzero(x)] array([-2, -1, 1, 2]) """ return np.nonzero(np.ravel(a))[0]
Example 27
Project: ScanSSD Author: MaliParag File: stitch_patches_page.py License: MIT License | 5 votes |
def find_blank_rows(image, line_spacing=1): gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blank_rows = np.all(gray_image == 255, axis=1) im_bw = np.zeros(gray_image.shape) im_bw[blank_rows] = 255 #gray_image[~blank_rows] = 0 #cv2.imwrite("/home/psm2208/code/eval/test.png", im_bw) labeled, ncomponents = ndimage.label(im_bw) rows = [] indices = np.indices(im_bw.shape).T[:, :, [1, 0]] line_bbs = ndimage.find_objects(labeled) sizes = np.array([[bb.stop - bb.start for bb in line_bb] for line_bb in line_bbs]) sizes = sizes[:,0] mask = (sizes > line_spacing) idx = np.flatnonzero(mask) for i in idx: labels = (labeled == (i+1)) pixels = indices[labels.T] box = [min(pixels[:, 0]), min(pixels[:, 1]), max(pixels[:, 0]), max(pixels[:, 1])] rows.append(box) return rows
Example 28
Project: vnpy_crypto Author: birforce File: numeric.py License: MIT License | 5 votes |
def flatnonzero(a): """ Return indices that are non-zero in the flattened version of a. This is equivalent to a.ravel().nonzero()[0]. Parameters ---------- a : ndarray Input array. Returns ------- res : ndarray Output array, containing the indices of the elements of `a.ravel()` that are non-zero. See Also -------- nonzero : Return the indices of the non-zero elements of the input array. ravel : Return a 1-D array containing the elements of the input array. Examples -------- >>> x = np.arange(-2, 3) >>> x array([-2, -1, 0, 1, 2]) >>> np.flatnonzero(x) array([0, 1, 3, 4]) Use the indices of the non-zero elements as an index array to extract these elements: >>> x.ravel()[np.flatnonzero(x)] array([-2, -1, 1, 2]) """ return a.ravel().nonzero()[0]
Example 29
Project: vnpy_crypto Author: birforce File: hazard_regression.py License: MIT License | 5 votes |
def schoenfeld_residuals(self): """ A matrix containing the Schoenfeld residuals. Notes ----- Schoenfeld residuals for censored observations are set to zero. """ surv = self.model.surv w_avg = self.weighted_covariate_averages # Initialize at NaN since rows that belong to strata with no # events have undefined residuals. sch_resid = np.nan*np.ones(self.model.exog.shape, dtype=np.float64) # Loop over strata for stx in range(surv.nstrat): uft = surv.ufailt[stx] exog_s = surv.exog_s[stx] time_s = surv.time_s[stx] strat_ix = surv.stratum_rows[stx] ii = np.searchsorted(uft, time_s) # These subjects are censored after the last event in # their stratum, so have empty risk sets and undefined # residuals. jj = np.flatnonzero(ii < len(uft)) sch_resid[strat_ix[jj], :] = exog_s[jj, :] - w_avg[stx][ii[jj], :] jj = np.flatnonzero(self.model.status == 0) sch_resid[jj, :] = np.nan return sch_resid
Example 30
Project: vnpy_crypto Author: birforce File: test_phreg.py License: MIT License | 5 votes |
def test_post_estimation(self): # All regression tests np.random.seed(34234) time = 50 * np.random.uniform(size=200) status = np.random.randint(0, 2, 200).astype(np.float64) exog = np.random.normal(size=(200,4)) mod = PHReg(time, exog, status) rslt = mod.fit() mart_resid = rslt.martingale_residuals assert_allclose(np.abs(mart_resid).sum(), 120.72475743348433) w_avg = rslt.weighted_covariate_averages assert_allclose(np.abs(w_avg[0]).sum(0), np.r_[7.31008415, 9.77608674,10.89515885, 13.1106801]) bc_haz = rslt.baseline_cumulative_hazard v = [np.mean(np.abs(x)) for x in bc_haz[0]] w = np.r_[23.482841556421608, 0.44149255358417017, 0.68660114081275281] assert_allclose(v, w) score_resid = rslt.score_residuals v = np.r_[ 0.50924792, 0.4533952, 0.4876718, 0.5441128] w = np.abs(score_resid).mean(0) assert_allclose(v, w) groups = np.random.randint(0, 3, 200) mod = PHReg(time, exog, status) rslt = mod.fit(groups=groups) robust_cov = rslt.cov_params() v = [0.00513432, 0.01278423, 0.00810427, 0.00293147] w = np.abs(robust_cov).mean(0) assert_allclose(v, w, rtol=1e-6) s_resid = rslt.schoenfeld_residuals ii = np.flatnonzero(np.isfinite(s_resid).all(1)) s_resid = s_resid[ii, :] v = np.r_[0.85154336, 0.72993748, 0.73758071, 0.78599333] assert_allclose(np.abs(s_resid).mean(0), v)