Python numpy.unique() Examples
The following are 30 code examples for showing how to use numpy.unique(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example 1
Project: libTLDA Author: wmkouw File: test_util.py License: MIT License | 6 votes |
def test_one_hot(): """Check if one_hot returns correct label matrices.""" # Generate label vector y = np.hstack((np.ones((10,))*0, np.ones((10,))*1, np.ones((10,))*2)) # Map to matrix Y, labels = one_hot(y) # Check for only 0's and 1's assert len(np.setdiff1d(np.unique(Y), [0, 1])) == 0 # Check for correct labels assert np.all(labels == np.unique(y)) # Check correct shape of matrix assert Y.shape[0] == y.shape[0] assert Y.shape[1] == len(labels)
Example 2
Project: DDPAE-video-prediction Author: jthsieh File: metrics.py License: MIT License | 6 votes |
def find_match(self, pred, gt): ''' Match component to balls. ''' batch_size, n_frames_input, n_components, _ = pred.shape diff = pred.reshape(batch_size, n_frames_input, n_components, 1, 2) - \ gt.reshape(batch_size, n_frames_input, 1, n_components, 2) diff = np.sum(np.sum(diff ** 2, axis=-1), axis=1) # Direct indices indices = np.argmin(diff, axis=2) ambiguous = np.zeros(batch_size, dtype=np.int8) for i in range(batch_size): _, counts = np.unique(indices[i], return_counts=True) if not np.all(counts == 1): ambiguous[i] = 1 return indices, ambiguous
Example 3
Project: models Author: kipoi File: dataloader_m.py License: MIT License | 6 votes |
def prepro_pos_table(pos_tables): """Extracts unique positions and sorts them.""" if not isinstance(pos_tables, list): pos_tables = [pos_tables] pos_table = None for next_pos_table in pos_tables: if pos_table is None: pos_table = next_pos_table else: pos_table = pd.concat([pos_table, next_pos_table]) pos_table = pos_table.groupby('chromo').apply( lambda df: pd.DataFrame({'pos': np.unique(df['pos'])})) pos_table.reset_index(inplace=True) pos_table = pos_table[['chromo', 'pos']] pos_table.sort_values(['chromo', 'pos'], inplace=True) return pos_table
Example 4
Project: fuku-ml Author: fukuball File: RidgeRegression.py License: MIT License | 6 votes |
def init_W(self, mode='normal'): self.W = {} if (self.status != 'load_train_data') and (self.status != 'train'): print("Please load train data first.") return self.W self.status = 'init' self.data_num = len(self.train_Y) self.data_demension = len(self.train_X[0]) self.class_list = list(itertools.combinations(np.unique(self.train_Y), 2)) for class_item in self.class_list: self.W[class_item] = np.zeros(self.data_demension) return self.W
Example 5
Project: fuku-ml Author: fukuball File: KernelRidgeRegression.py License: MIT License | 6 votes |
def init_W(self, mode='normal'): self.W = {} if (self.status != 'load_train_data') and (self.status != 'train'): print("Please load train data first.") return self.W self.status = 'init' self.data_num = len(self.train_Y) self.data_demension = len(self.train_X[0]) self.class_list = list(itertools.combinations(np.unique(self.train_Y), 2)) for class_item in self.class_list: self.W[class_item] = np.zeros(self.data_demension) return self.W
Example 6
Project: fuku-ml Author: fukuball File: SupportVectorMachine.py License: MIT License | 6 votes |
def init_W(self, mode='normal'): self.W = {} if (self.status != 'load_train_data') and (self.status != 'train'): print("Please load train data first.") return self.W self.status = 'init' self.data_num = len(self.train_Y) self.data_demension = len(self.train_X[0]) self.class_list = list(itertools.combinations(np.unique(self.train_Y), 2)) for class_item in self.class_list: self.W[class_item] = np.zeros(self.data_demension) return self.W
Example 7
Project: fuku-ml Author: fukuball File: LinearRegression.py License: MIT License | 6 votes |
def init_W(self, mode='normal'): self.W = {} if (self.status != 'load_train_data') and (self.status != 'train'): print("Please load train data first.") return self.W self.status = 'init' self.data_num = len(self.train_Y) self.data_demension = len(self.train_X[0]) self.class_list = list(itertools.combinations(np.unique(self.train_Y), 2)) for class_item in self.class_list: self.W[class_item] = np.zeros(self.data_demension) return self.W
Example 8
Project: neural-combinatorial-optimization-rl-tensorflow Author: MichelDeudon File: dataset.py License: MIT License | 6 votes |
def visualize_sampling(self,permutations): max_length = len(permutations[0]) grid = np.zeros([max_length,max_length]) # initialize heatmap grid to 0 transposed_permutations = np.transpose(permutations) for t, cities_t in enumerate(transposed_permutations): # step t, cities chosen at step t city_indices, counts = np.unique(cities_t,return_counts=True,axis=0) for u,v in zip(city_indices, counts): grid[t][u]+=v # update grid with counts from the batch of permutations # plot heatmap fig = plt.figure() rcParams.update({'font.size': 22}) ax = fig.add_subplot(1,1,1) ax.set_aspect('equal') plt.imshow(grid, interpolation='nearest', cmap='gray') plt.colorbar() plt.title('Sampled permutations') plt.ylabel('Time t') plt.xlabel('City i') plt.show() # Heatmap of attention (x=cities; y=steps)
Example 9
Project: neural-combinatorial-optimization-rl-tensorflow Author: MichelDeudon File: dataset.py License: MIT License | 6 votes |
def visualize_sampling(self, permutations): max_length = len(permutations[0]) grid = np.zeros([max_length,max_length]) # initialize heatmap grid to 0 transposed_permutations = np.transpose(permutations) for t, cities_t in enumerate(transposed_permutations): # step t, cities chosen at step t city_indices, counts = np.unique(cities_t,return_counts=True,axis=0) for u,v in zip(city_indices, counts): grid[t][u]+=v # update grid with counts from the batch of permutations # plot heatmap fig = plt.figure() rcParams.update({'font.size': 22}) ax = fig.add_subplot(1,1,1) ax.set_aspect('equal') plt.imshow(grid, interpolation='nearest', cmap='gray') plt.colorbar() plt.title('Sampled permutations') plt.ylabel('Time t') plt.xlabel('City i') plt.show()
Example 10
Project: discomll Author: romanorac File: decision_tree.py License: Apache License 2.0 | 6 votes |
def rand_indices(x, rand_attr): """ Function randomly selects features without replacement. It used with random forest. Selected features must have more than one distinct value. x: numpy array - dataset rand_attr - parameter defines number of randomly selected features """ loop = True indices = range(len(x[0])) while loop: loop = False # randomly selected features without replacement rand_list = random.sample(indices, rand_attr) for i in rand_list: if len(np.unique(x[:, i])) == 1: loop = True indices.remove(i) if len(indices) == rand_attr - 1: return -1 # all features in dataset have one distinct value break return rand_list
Example 11
Project: cvpr2018-hnd Author: kibok90 File: test.py License: MIT License | 6 votes |
def count_super(p, m, counters, preds, labels, label_to_ch): for l in np.unique(labels): preds_l = preds[labels == l] # in -> known if label_to_ch[l]: acc = np.zeros_like(preds_l, dtype=bool) for c in label_to_ch[l]: if p == 0: counters['data'][m][c] += preds_l.shape[0] acc |= (preds_l == c) acc_sum = acc.sum() for c in label_to_ch[l]: counters['acc'][p,m][c] += acc_sum # out -> novel else: if p == 0: counters['data'][m][-1] += preds_l.shape[0] acc_sum = (preds_l < 0).sum() counters['acc'][p,m][-1] += acc_sum
Example 12
Project: pruning_yolov3 Author: zbyuan File: utils.py License: GNU General Public License v3.0 | 6 votes |
def print_mutation(hyp, results, bucket=''): # Print mutation results to evolve.txt (for use with train.py --evolve) a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values c = '%10.3g' * len(results) % results # results (P, R, mAP, F1, test_loss) print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) if bucket: os.system('gsutil cp gs://%s/evolve.txt .' % bucket) # download evolve.txt with open('evolve.txt', 'a') as f: # append result f.write(c + b + '\n') x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows np.savetxt('evolve.txt', x[np.argsort(-fitness(x))], '%10.3g') # save sort by fitness if bucket: os.system('gsutil cp evolve.txt gs://%s' % bucket) # upload evolve.txt
Example 13
Project: transferlearning Author: jindongwang File: MEDA.py License: MIT License | 6 votes |
def estimate_mu(self, _X1, _Y1, _X2, _Y2): adist_m = proxy_a_distance(_X1, _X2) C = len(np.unique(_Y1)) epsilon = 1e-3 list_adist_c = [] for i in range(1, C + 1): ind_i, ind_j = np.where(_Y1 == i), np.where(_Y2 == i) Xsi = _X1[ind_i[0], :] Xtj = _X2[ind_j[0], :] adist_i = proxy_a_distance(Xsi, Xtj) list_adist_c.append(adist_i) adist_c = sum(list_adist_c) / C mu = adist_c / (adist_c + adist_m) if mu > 1: mu = 1 if mu < epsilon: mu = 0 return mu
Example 14
Project: sparse-subspace-clustering-python Author: abhinav4192 File: BestMap.py License: MIT License | 6 votes |
def BestMap(L1, L2): L1 = L1.flatten(order='F').astype(float) L2 = L2.flatten(order='F').astype(float) if L1.size != L2.size: sys.exit('size(L1) must == size(L2)') Label1 = np.unique(L1) nClass1 = Label1.size Label2 = np.unique(L2) nClass2 = Label2.size nClass = max(nClass1, nClass2) # For Hungarian - Label2 are Workers, Label1 are Tasks. G = np.zeros([nClass, nClass]).astype(float) for i in range(0, nClass2): for j in range(0, nClass1): G[i, j] = np.sum(np.logical_and(L2 == Label2[i], L1 == Label1[j])) c = Hungarian(-G) newL2 = np.zeros(L2.shape) for i in range(0, nClass2): newL2[L2 == Label2[i]] = Label1[c[i]] return newL2
Example 15
Project: contextualbandits Author: david-cortes File: online.py License: BSD 2-Clause "Simplified" License | 6 votes |
def _add_choices(self, nchoices): if isinstance(nchoices, int): self.nchoices = nchoices self.choice_names = None elif isinstance(nchoices, list) or nchoices.__class__.__name__ == "Series" or nchoices.__class__.__name__ == "DataFrame": self.choice_names = np.array(nchoices).reshape(-1) self.nchoices = self.choice_names.shape[0] if np.unique(self.choice_names).shape[0] != self.choice_names.shape[0]: raise ValueError("Arm/choice names contain duplicates.") elif isinstance(nchoices, np.ndarray): self.choice_names = nchoices.reshape(-1) self.nchoices = self.choice_names.shape[0] if np.unique(self.choice_names).shape[0] != self.choice_names.shape[0]: raise ValueError("Arm/choice names contain duplicates.") else: raise ValueError("'nchoices' must be an integer or list with named arms.")
Example 16
Project: contextualbandits Author: david-cortes File: utils.py License: BSD 2-Clause "Simplified" License | 6 votes |
def _partial_fit_single(self, choice, X, a, r): yclass, this_choice = self._filter_arm_data(X, a, r, choice) if self.smooth is not None: self.counters[0, choice] += yclass.shape[0] xclass = X[this_choice, :] do_full_refit = False if self.buffer is not None: do_full_refit = self.buffer[choice].do_full_refit() xclass, yclass = self.buffer[choice].get_batch(xclass, yclass) if (xclass.shape[0] > 0) or self.force_fit: if (do_full_refit) and (np.unique(yclass).shape[0] >= 2): self.algos[choice].fit(xclass, yclass) else: self.algos[choice].partial_fit(xclass, yclass, classes = [0, 1]) ## update the beta counters if needed if (self.force_counters): self._update_beta_counters(yclass, choice)
Example 17
Project: contextualbandits Author: david-cortes File: utils.py License: BSD 2-Clause "Simplified" License | 6 votes |
def fit(self, X, y, *args, **kwargs): if X.shape[0] == 0: return self elif np.unique(y).shape[0] <= 1: return self self.model.fit(X, y) var = self.model.predict_proba(X)[:,1] var = var * (1 - var) n = X.shape[1] self.Sigma = np.zeros((n+self.fit_intercept, n+self.fit_intercept), dtype=ctypes.c_double) X, Xcsr = self._process_X(X) _wrapper_double.update_matrices_noinv( X, np.empty(0, dtype=ctypes.c_double), var, self.Sigma, np.empty(0, dtype=ctypes.c_double), Xcsr = Xcsr, add_bias=self.fit_intercept, overwrite=1 ) _matrix_inv_symm(self.Sigma, self.lambda_) self.is_fitted = True
Example 18
Project: contextualbandits Author: david-cortes File: utils.py License: BSD 2-Clause "Simplified" License | 6 votes |
def fit(self, X, y): if X.shape[0] == 0: return self elif np.unique(y).shape[0] <= 1: self.update_aux(y) return self seed = self.random_state.integers(np.iinfo(np.int32).max) self.model.set_params(random_state = seed) self.model.fit(X, y) n_nodes = self.model.tree_.node_count self.pos = np.zeros(n_nodes, dtype=ctypes.c_long) self.neg = np.zeros(n_nodes, dtype=ctypes.c_long) pred_node = self.model.apply(X).astype(ctypes.c_long) _create_node_counters(self.pos, self.neg, pred_node, y.astype(ctypes.c_double)) self.pos = self.pos.astype(ctypes.c_double) + self.beta_prior[0] self.neg = self.neg.astype(ctypes.c_double) + self.beta_prior[1] self.is_fitted = True return self
Example 19
Project: libTLDA Author: wmkouw File: util.py License: MIT License | 5 votes |
def one_hot(y, fill_k=False, one_not=False): """Map to one-hot encoding.""" # Check labels labels = np.unique(y) # Number of classes K = len(labels) # Number of samples N = y.shape[0] # Preallocate array if one_not: Y = -np.ones((N, K)) else: Y = np.zeros((N, K)) # Set k-th column to 1 for n-th sample for n in range(N): # Map current class to index label y_n = (y[n] == labels) if fill_k: Y[n, y_n] = y_n else: Y[n, y_n] = 1 return Y, labels
Example 20
Project: libTLDA Author: wmkouw File: test_iw.py License: MIT License | 5 votes |
def test_predict(): """Test for making predictions.""" X = rnd.randn(10, 2) y = np.hstack((-np.ones((5,)), np.ones((5,)))) Z = rnd.randn(10, 2) + 1 clf = ImportanceWeightedClassifier() clf.fit(X, y, Z) u_pred = clf.predict(Z) labels = np.unique(y) assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0
Example 21
Project: libTLDA Author: wmkouw File: test_tca.py License: MIT License | 5 votes |
def test_predict(): """Test for making predictions.""" X = rnd.randn(10, 2) y = np.hstack((-np.ones((5,)), np.ones((5,)))) Z = rnd.randn(10, 2) + 1 clf = TransferComponentClassifier() clf.fit(X, y, Z) u_pred = clf.predict(Z) labels = np.unique(y) assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0
Example 22
Project: libTLDA Author: wmkouw File: test_suba.py License: MIT License | 5 votes |
def test_predict_semi(): """Test for making predictions.""" X = rnd.randn(10, 2) y = np.hstack((np.zeros((5,)), np.ones((5,)))) Z = rnd.randn(10, 2) + 1 u = np.array([[0, 0], [9, 1]]) clf = SemiSubspaceAlignedClassifier() clf.fit(X, y, Z, u) u_pred = clf.predict(Z) labels = np.unique(y) assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0
Example 23
Project: libTLDA Author: wmkouw File: test_rba.py License: MIT License | 5 votes |
def test_predict(): """Test for making predictions.""" X = rnd.randn(10, 2) y = np.hstack((np.zeros((5,)), np.ones((5,)))) Z = rnd.randn(10, 2) + 1 clf = RobustBiasAwareClassifier() clf.fit(X, y, Z) u_pred = clf.predict(Z) labels = np.unique(y) assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0
Example 24
Project: libTLDA Author: wmkouw File: test_tcpr.py License: MIT License | 5 votes |
def test_predict(): """Test for making predictions.""" X = np.vstack((rnd.randn(5, 2), rnd.randn(5, 2)+1)) y = np.hstack((np.zeros((5,)), np.ones((5,)))) Z = np.vstack((rnd.randn(5, 2)-1, rnd.randn(5, 2)+2)) clf = TargetContrastivePessimisticClassifier(l2=0.1) clf.fit(X, y, Z) u_pred = clf.predict(Z) labels = np.unique(y) assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0
Example 25
Project: libTLDA Author: wmkouw File: test_scl.py License: MIT License | 5 votes |
def test_init(): """Test for object type.""" clf = StructuralCorrespondenceClassifier() assert type(clf) == StructuralCorrespondenceClassifier assert not clf.is_trained # def test_fit(): # """Test for fitting the model.""" # X = np.vstack((rnd.randn(5, 2), rnd.randn(5, 2)+1)) # y = np.hstack((np.zeros((5,)), np.ones((5,)))) # Z = np.vstack((rnd.randn(5, 2)-1, rnd.randn(5, 2)+2)) # clf = StructuralCorrespondenceClassifier(l2=1.0) # clf.fit(X, y, Z) # assert clf.is_trained # def test_predict(): # """Test for making predictions.""" # X = np.vstack((rnd.randn(5, 2), rnd.randn(5, 2)+1)) # y = np.hstack((np.zeros((5,)), np.ones((5,)))) # Z = np.vstack((rnd.randn(5, 2)-1, rnd.randn(5, 2)+2)) # clf = StructuralCorrespondenceClassifier(l2=1.0) # clf.fit(X, y, Z) # u_pred = clf.predict(Z) # labels = np.unique(y) # assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0
Example 26
Project: libTLDA Author: wmkouw File: test_flda.py License: MIT License | 5 votes |
def test_predict(): """Test for making predictions.""" X = rnd.randn(10, 2) y = np.hstack((np.zeros((5,)), np.ones((5,)))) Z = rnd.randn(10, 2) + 1 clf = FeatureLevelDomainAdaptiveClassifier() clf.fit(X, y, Z) u_pred = clf.predict(Z) labels = np.unique(y) assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0
Example 27
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection Author: Sunarker File: ds_utils.py License: MIT License | 5 votes |
def unique_boxes(boxes, scale=1.0): """Return indices of unique boxes.""" v = np.array([1, 1e3, 1e6, 1e9]) hashes = np.round(boxes * scale).dot(v) _, index = np.unique(hashes, return_index=True) return np.sort(index)
Example 28
Project: mmdetection Author: open-mmlab File: regnet.py License: Apache License 2.0 | 5 votes |
def generate_regnet(self, initial_width, width_slope, width_parameter, depth, divisor=8): """Generates per block width from RegNet parameters. Args: initial_width ([int]): Initial width of the backbone width_slope ([float]): Slope of the quantized linear function width_parameter ([int]): Parameter used to quantize the width. depth ([int]): Depth of the backbone. divisor (int, optional): The divisor of channels. Defaults to 8. Returns: list, int: return a list of widths of each stage and the number of stages """ assert width_slope >= 0 assert initial_width > 0 assert width_parameter > 1 assert initial_width % divisor == 0 widths_cont = np.arange(depth) * width_slope + initial_width ks = np.round( np.log(widths_cont / initial_width) / np.log(width_parameter)) widths = initial_width * np.power(width_parameter, ks) widths = np.round(np.divide(widths, divisor)) * divisor num_stages = len(np.unique(widths)) widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() return widths, num_stages
Example 29
Project: models Author: kipoi File: gtf_utils.py License: MIT License | 5 votes |
def get_all_exons(self): exons = np.vstack([i.exons for i in self.trans]) exons = np.unique(exons, axis=0) ind = np.lexsort((exons[:,1],exons[:,0])) if self.strand == '-': ind = ind[::-1] exons = exons[ind] return exons
Example 30
Project: models Author: kipoi File: gtf_utils.py License: MIT License | 5 votes |
def get_all_introns(self): for j in range(len(self.trans)): self.trans[j].add_introns() introns = np.vstack([i.introns for i in self.trans]) introns = np.unique(introns, axis=0) ind = np.lexsort((introns[:,1],introns[:,0])) if self.strand == '-': ind = ind[::-1] introns = introns[ind] return introns