Python numpy.setdiff1d() Examples
The following are 30 code examples for showing how to use numpy.setdiff1d(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example 1
Project: libTLDA Author: wmkouw File: tcpr.py License: MIT License | 6 votes |
def add_intercept(self, X): """Add 1's to data as last features.""" # Data shape N, D = X.shape # Check if there's not already an intercept column if np.any(np.sum(X, axis=0) == N): # Report print('Intercept is not the last feature. Swapping..') # Find which column contains the intercept intercept_index = np.argwhere(np.sum(X, axis=0) == N) # Swap intercept to last X = X[:, np.setdiff1d(np.arange(D), intercept_index)] # Add intercept as last column X = np.hstack((X, np.ones((N, 1)))) # Append column of 1's to data, and increment dimensionality return X, D+1
Example 2
Project: libTLDA Author: wmkouw File: test_util.py License: MIT License | 6 votes |
def test_one_hot(): """Check if one_hot returns correct label matrices.""" # Generate label vector y = np.hstack((np.ones((10,))*0, np.ones((10,))*1, np.ones((10,))*2)) # Map to matrix Y, labels = one_hot(y) # Check for only 0's and 1's assert len(np.setdiff1d(np.unique(Y), [0, 1])) == 0 # Check for correct labels assert np.all(labels == np.unique(y)) # Check correct shape of matrix assert Y.shape[0] == y.shape[0] assert Y.shape[1] == len(labels)
Example 3
Project: manifold-diffusion Author: ducha-aiki File: diffussion.py License: MIT License | 6 votes |
def dfs_trunk(sim, A,alpha = 0.99, QUERYKNN = 10, maxiter = 8, K = 100, tol = 1e-3): qsim = sim_kernel(sim).T sortidxs = np.argsort(-qsim, axis = 1) for i in range(len(qsim)): qsim[i,sortidxs[i,QUERYKNN:]] = 0 qsims = sim_kernel(qsim) W = sim_kernel(A) W = csr_matrix(topK_W(W, K)) out_ranks = [] t =time() for i in range(qsims.shape[0]): qs = qsims[i,:] tt = time() w_idxs, W_trunk = find_trunc_graph(qs, W, 2); Wn = normalize_connection_graph(W_trunk) Wnn = eye(Wn.shape[0]) - alpha * Wn f,inf = s_linalg.minres(Wnn, qs[w_idxs], tol=tol, maxiter=maxiter) ranks = w_idxs[np.argsort(-f.reshape(-1))] missing = np.setdiff1d(np.arange(A.shape[1]), ranks) out_ranks.append(np.concatenate([ranks.reshape(-1,1), missing.reshape(-1,1)], axis = 0)) #print time() -t, 'qtime' out_ranks = np.concatenate(out_ranks, axis = 1) return out_ranks
Example 4
Project: dgl Author: dmlc File: utils.py License: Apache License 2.0 | 6 votes |
def set_diff(ar1, ar2): """Find the set difference of two index arrays. Return the unique values in ar1 that are not in ar2. Parameters ---------- ar1: utils.Index Input index array. ar2: utils.Index Input comparison index array. Returns ------- setdiff: Array of values in ar1 that are not in ar2. """ ar1_np = ar1.tonumpy() ar2_np = ar2.tonumpy() setdiff = np.setdiff1d(ar1_np, ar2_np) setdiff = toindex(setdiff) return setdiff
Example 5
Project: bdol-ml Author: bdol File: data_utils.py License: GNU Lesser General Public License v3.0 | 6 votes |
def cross_validation_folds(n, k=5): if n % k != 0: skip = int(np.floor(float(n)/float(k))) else: skip = n/k ind = np.arange(n) np.random.shuffle(ind) train_ind = dict() val_ind = dict() for i in range(k): if i == k-1: # Use the rest of the examples val = ind[skip*i:] else: val = ind[skip*i:skip*(i+1)] train = np.setdiff1d(ind, val_ind) val_ind[i] = val train_ind[i] = train return train_ind, val_ind
Example 6
Project: viznet Author: GiggleLiu File: circuit.py License: MIT License | 6 votes |
def focus(self, lines): ''' focus to target lines Args: lines (list): the target lines to put up. ''' alllines = range(self.num_bit) pin = NodeBrush('pin') old_positions = [] for i in range(self.num_bit): old_positions.append(self.gate(pin, i)) lmap = np.append(lines, np.setdiff1d(alllines, lines)) self.x += 0.8 pins = [] for opos, j in zip(old_positions, lmap): pi = Pin(self.get_position(j)) self.node_dict[j].append(pi) self.edge >> (opos, pi) pins.append(pi) return pins
Example 7
Project: Mastering-Elasticsearch-7.0 Author: PacktPublishing File: bagging.py License: MIT License | 6 votes |
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes): """Private function used to compute log probabilities within a job.""" n_samples = X.shape[0] log_proba = np.empty((n_samples, n_classes)) log_proba.fill(-np.inf) all_classes = np.arange(n_classes, dtype=np.int) for estimator, features in zip(estimators, estimators_features): log_proba_estimator = estimator.predict_log_proba(X[:, features]) if n_classes == len(estimator.classes_): log_proba = np.logaddexp(log_proba, log_proba_estimator) else: log_proba[:, estimator.classes_] = np.logaddexp( log_proba[:, estimator.classes_], log_proba_estimator[:, range(len(estimator.classes_))]) missing = np.setdiff1d(all_classes, estimator.classes_) log_proba[:, missing] = np.logaddexp(log_proba[:, missing], -np.inf) return log_proba
Example 8
Project: Mastering-Elasticsearch-7.0 Author: PacktPublishing File: label.py License: MIT License | 6 votes |
def inverse_transform(self, y): """Transform labels back to original encoding. Parameters ---------- y : numpy array of shape [n_samples] Target values. Returns ------- y : numpy array of shape [n_samples] """ check_is_fitted(self, 'classes_') y = column_or_1d(y, warn=True) # inverse transform of empty array is empty array if _num_samples(y) == 0: return np.array([]) diff = np.setdiff1d(y, np.arange(len(self.classes_))) if len(diff): raise ValueError( "y contains previously unseen labels: %s" % str(diff)) y = np.asarray(y) return self.classes_[y]
Example 9
Project: libTLDA Author: wmkouw File: tcpr.py License: MIT License | 5 votes |
def remove_intercept(self, X): """Remove 1's from data as last features.""" # Data shape N, D = X.shape # Find which column contains the intercept intercept_index = [] for d in range(D): if np.all(X[:, d] == 0): intercept_index.append(d) # Remove intercept columns X = X[:, np.setdiff1d(np.arange(D), intercept_index)] return X, D-len(intercept_index)
Example 10
Project: libTLDA Author: wmkouw File: test_iw.py License: MIT License | 5 votes |
def test_predict(): """Test for making predictions.""" X = rnd.randn(10, 2) y = np.hstack((-np.ones((5,)), np.ones((5,)))) Z = rnd.randn(10, 2) + 1 clf = ImportanceWeightedClassifier() clf.fit(X, y, Z) u_pred = clf.predict(Z) labels = np.unique(y) assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0
Example 11
Project: libTLDA Author: wmkouw File: test_tca.py License: MIT License | 5 votes |
def test_predict(): """Test for making predictions.""" X = rnd.randn(10, 2) y = np.hstack((-np.ones((5,)), np.ones((5,)))) Z = rnd.randn(10, 2) + 1 clf = TransferComponentClassifier() clf.fit(X, y, Z) u_pred = clf.predict(Z) labels = np.unique(y) assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0
Example 12
Project: libTLDA Author: wmkouw File: test_suba.py License: MIT License | 5 votes |
def test_predict_semi(): """Test for making predictions.""" X = rnd.randn(10, 2) y = np.hstack((np.zeros((5,)), np.ones((5,)))) Z = rnd.randn(10, 2) + 1 u = np.array([[0, 0], [9, 1]]) clf = SemiSubspaceAlignedClassifier() clf.fit(X, y, Z, u) u_pred = clf.predict(Z) labels = np.unique(y) assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0
Example 13
Project: libTLDA Author: wmkouw File: test_rba.py License: MIT License | 5 votes |
def test_predict(): """Test for making predictions.""" X = rnd.randn(10, 2) y = np.hstack((np.zeros((5,)), np.ones((5,)))) Z = rnd.randn(10, 2) + 1 clf = RobustBiasAwareClassifier() clf.fit(X, y, Z) u_pred = clf.predict(Z) labels = np.unique(y) assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0
Example 14
Project: libTLDA Author: wmkouw File: test_tcpr.py License: MIT License | 5 votes |
def test_predict(): """Test for making predictions.""" X = np.vstack((rnd.randn(5, 2), rnd.randn(5, 2)+1)) y = np.hstack((np.zeros((5,)), np.ones((5,)))) Z = np.vstack((rnd.randn(5, 2)-1, rnd.randn(5, 2)+2)) clf = TargetContrastivePessimisticClassifier(l2=0.1) clf.fit(X, y, Z) u_pred = clf.predict(Z) labels = np.unique(y) assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0
Example 15
Project: libTLDA Author: wmkouw File: test_scl.py License: MIT License | 5 votes |
def test_init(): """Test for object type.""" clf = StructuralCorrespondenceClassifier() assert type(clf) == StructuralCorrespondenceClassifier assert not clf.is_trained # def test_fit(): # """Test for fitting the model.""" # X = np.vstack((rnd.randn(5, 2), rnd.randn(5, 2)+1)) # y = np.hstack((np.zeros((5,)), np.ones((5,)))) # Z = np.vstack((rnd.randn(5, 2)-1, rnd.randn(5, 2)+2)) # clf = StructuralCorrespondenceClassifier(l2=1.0) # clf.fit(X, y, Z) # assert clf.is_trained # def test_predict(): # """Test for making predictions.""" # X = np.vstack((rnd.randn(5, 2), rnd.randn(5, 2)+1)) # y = np.hstack((np.zeros((5,)), np.ones((5,)))) # Z = np.vstack((rnd.randn(5, 2)-1, rnd.randn(5, 2)+2)) # clf = StructuralCorrespondenceClassifier(l2=1.0) # clf.fit(X, y, Z) # u_pred = clf.predict(Z) # labels = np.unique(y) # assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0
Example 16
Project: libTLDA Author: wmkouw File: test_flda.py License: MIT License | 5 votes |
def test_predict(): """Test for making predictions.""" X = rnd.randn(10, 2) y = np.hstack((np.zeros((5,)), np.ones((5,)))) Z = rnd.randn(10, 2) + 1 clf = FeatureLevelDomainAdaptiveClassifier() clf.fit(X, y, Z) u_pred = clf.predict(Z) labels = np.unique(y) assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0
Example 17
Project: fenics-topopt Author: zfergus File: problem.py License: MIT License | 5 votes |
def __init__(self, nelx, nely, penal, bc): # Problem size self.nelx = nelx self.nely = nely # Max and min stiffness self.Emin = 1e-9 self.Emax = 1.0 # SIMP penalty self.penal = penal # dofs: self.ndof = 2 * (nelx + 1) * (nely + 1) # FE: Build the index vectors for the for coo matrix format. self.build_indices(nelx, nely) # BC's and support (half MBB-beam) dofs = np.arange(2 * (nelx + 1) * (nely + 1)) self.fixed = bc.get_fixed_nodes() self.free = np.setdiff1d(dofs, self.fixed) # Solution and RHS vectors self.f = bc.get_forces() self.u = np.zeros(self.f.shape) # Per element compliance self.ce = np.zeros(nely * nelx)
Example 18
Project: fenics-topopt Author: zfergus File: problem.py License: MIT License | 5 votes |
def __init__(self, nelx, nely, penal, bc): # Problem size self.nelx = nelx self.nely = nely # Max and min stiffness self.Emin = 1e-9 self.Emax = 1.0 # SIMP penalty self.penal = penal # dofs: self.ndof = 2 * (nelx + 1) * (nely + 1) # FE: Build the index vectors for the for coo matrix format. self.build_indices(nelx, nely) # BC's and support (half MBB-beam) dofs = np.arange(2 * (nelx + 1) * (nely + 1)) self.fixed = bc.get_fixed_nodes() self.free = np.setdiff1d(dofs, self.fixed) # Solution and RHS vectors self.f = bc.get_forces() self.u = np.zeros(self.f.shape) # Per element compliance self.ce = np.zeros(nely * nelx)
Example 19
Project: neuropythy Author: noahbenson File: hcp.py License: GNU Affero General Public License v3.0 | 5 votes |
def _siblings_to_pairs(rs): subject_list = [u for v in six.itervalues(rs) for uuu in [[six.iterkeys(v)], six.itervalues(v)] for uu in uuu for u in ([uu] if pimms.is_int(uu) else uu)] subject_list = np.unique(subject_list) # setup twin numbers so that we can export anonymized twin data (i.e., # files containing twin data but not the subject IDs) twin_pairs = {tw: pimms.imm_array(list(sorted(dat))) for tw in ['MZ','DZ'] for dat in [set([tuple(sorted([k,v])) for (k,v) in six.iteritems(rs[tw])])]} # also get a list of all siblings so we can track who is/isn't related siblings = {} for s1 in subject_list: q = [] for sibs in six.itervalues(rs): if s1 not in sibs: continue ss = sibs[s1] if pimms.is_int(ss): ss = [ss] for s2 in ss: q.append(s2) if len(q) > 0: siblings[s1] = q # Make up a list of all possible unrelated pairs unrelated_pairs = [] for sid in subject_list: # find a random subject to pair them with urs = np.setdiff1d(subject_list, [sid] + siblings.get(sid,[])) unrelated_pairs.append([urs, np.full(len(urs), sid)]) unrelated_pairs = np.unique(np.sort(np.hstack(unrelated_pairs), axis=0), axis=1).T unrelated_pairs.setflags(write=False) # Having made those unrelated pairs, we can add them to the twin pairs twin_pairs['UR'] = unrelated_pairs # finally, let's figure out the non-twin siblings: sibs = [(k,v) for (k,vv) in six.iteritems(rs['']) for v in vv] twin_pairs['SB'] = np.unique(np.sort(sibs, axis=1), axis=0) twin_pairs['SB'].setflags(write=False) return pyr.pmap({'monozygotic_twins': twin_pairs['MZ'], 'dizygotic_twins': twin_pairs['DZ'], 'nontwin_siblings': twin_pairs['SB'], 'unrelated_pairs': twin_pairs['UR']})
Example 20
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: test_kvstore.py License: Apache License 2.0 | 5 votes |
def test_row_sparse_pull(): kv = init_kv_with_str('row_sparse') kv.init('e', mx.nd.ones(shape).tostype('row_sparse')) def check_row_sparse_pull(kv, count): num_rows = shape[0] vals = [] row_ids = [] all_row_ids = np.arange(num_rows) for i in range(count): vals.append(mx.nd.zeros(shape).tostype('row_sparse')) row_id = np.random.randint(num_rows, size=num_rows) row_ids.append(mx.nd.array(row_id).reshape((2, num_rows//2))) row_ids_to_pull = row_ids[0] if len(row_ids) == 1 else row_ids vals_to_pull = vals[0] if len(vals) == 1 else vals kv.row_sparse_pull('e', out=vals_to_pull, row_ids=row_ids_to_pull) for val, row_id in zip(vals, row_ids): retained = val.asnumpy() excluded_row_ids = np.setdiff1d(all_row_ids, row_id.asnumpy()) for row in range(num_rows): expected_val = np.zeros_like(retained[row]) expected_val += 0 if row in excluded_row_ids else 1 assert_almost_equal(retained[row], expected_val) check_row_sparse_pull(kv, 1) check_row_sparse_pull(kv, 4)
Example 21
Project: simnibs Author: simnibs File: electrode_placement.py License: GNU General Public License v3.0 | 5 votes |
def _inside_complex_polygon(poly, nodes, triangles, holes=[], tol=1e-2): ''' Determines the triangles inside a complex polygon ''' avg_l = np.average( np.linalg.norm(nodes[triangles[:, 0]] - nodes[triangles[:, 1]], axis=1)) bar = np.mean(nodes[triangles], axis=1) tr_inside = np.where(_point_inside_polygon(poly, bar, tol=tol*avg_l))[0] if len(holes) > 0: tr_hole = [] for h in holes: tr_hole.append( np.where(_point_inside_polygon(h, bar, tol=tol*avg_l))[0]) tr_hole = np.unique(np.hstack(tr_hole)) tr_inside = np.setdiff1d(tr_inside, tr_hole) return tr_inside
Example 22
Project: simnibs Author: simnibs File: test_mesh_io.py License: GNU General Public License v3.0 | 5 votes |
def test_remove_mesh_nodes(self, sphere3_msh): removed = sphere3_msh.remove_from_mesh( nodes=np.unique(sphere3_msh.elm[sphere3_msh.elm.tag1==5]) ) assert len(np.setdiff1d([3, 1003, 4], removed.elm.tag1)) == 0
Example 23
Project: simnibs Author: simnibs File: test_mesh_io.py License: GNU General Public License v3.0 | 5 votes |
def test_remove_mesh_elements(self, sphere3_msh): removed = sphere3_msh.remove_from_mesh( elements=sphere3_msh.elm.elm_number[sphere3_msh.elm.tag1==1005] ) assert len(np.setdiff1d([3, 1003, 4, 1004, 5], removed.elm.tag1)) == 0
Example 24
Project: mabwiser Author: fidelity File: mab.py License: Apache License 2.0 | 5 votes |
def _validate_fit_args(self, decisions, rewards, contexts) -> NoReturn: """" Validates argument types for fit and partial_fit functions. """ # Type check for decisions check_true(isinstance(decisions, (list, np.ndarray, pd.Series)), TypeError("The decisions should be given as list, numpy array, or pandas series.")) # Type check for rewards check_true(isinstance(rewards, (list, np.ndarray, pd.Series)), TypeError("The rewards should be given as list, numpy array, or pandas series.")) # Type check for contexts --don't use "if contexts" since it's n-dim array if contexts is not None: MAB._validate_context_type(contexts) # Sync contexts data with contextual policy check_true(self.is_contextual, TypeError("Fitting contexts data requires context policy or parametric learning policy.")) check_true((len(decisions) == len(contexts)) or (len(decisions) == 1 and isinstance(contexts, pd.Series)), ValueError("Decisions and contexts should be same length: len(decision) = " + str(len(decisions)) + " vs. len(contexts) = " + str(len(contexts)))) else: check_false(self.is_contextual, TypeError("Fitting contextual policy or parametric learning policy requires contexts data.")) # Length check for decisions and rewards check_true(len(decisions) == len(rewards), ValueError("Decisions and rewards should be same length.")) # Thompson Sampling: works with binary rewards or requires function to convert non-binary rewards if isinstance(self.learning_policy, LearningPolicy.ThompsonSampling) and \ self.learning_policy.binarizer is None: check_false(np.setdiff1d(rewards, [0, 0.0, 1, 1.0]).size, ValueError("Thompson Sampling requires binary rewards when binarizer function is not " "provided."))
Example 25
Project: interactive-deep-colorization Author: junyanz File: util.py License: MIT License | 5 votes |
def flatten_nd_array(pts_nd, axis=1): # Flatten an nd array into a 2d array with a certain axis # INPUTS # pts_nd N0xN1x...xNd array # axis integer # OUTPUTS # pts_flt prod(N \ N_axis) x N_axis array NDIM = pts_nd.ndim SHP = np.array(pts_nd.shape) nax = np.setdiff1d(np.arange(0, NDIM), np.array((axis))) # non axis indices NPTS = np.prod(SHP[nax]) axorder = np.concatenate((nax, np.array(axis).flatten()), axis=0) pts_flt = pts_nd.transpose((axorder)) pts_flt = pts_flt.reshape(NPTS, SHP[axis]) return pts_flt
Example 26
Project: interactive-deep-colorization Author: junyanz File: util.py License: MIT License | 5 votes |
def unflatten_2d_array(pts_flt, pts_nd, axis=1, squeeze=False): # Unflatten a 2d array with a certain axis # INPUTS # pts_flt prod(N \ N_axis) x M array # pts_nd N0xN1x...xNd array # axis integer # squeeze bool if true, M=1, squeeze it out # OUTPUTS # pts_out N0xN1x...xNd array NDIM = pts_nd.ndim SHP = np.array(pts_nd.shape) nax = np.setdiff1d(np.arange(0, NDIM), np.array((axis))) # non axis indices if(squeeze): axorder = nax axorder_rev = np.argsort(axorder) M = pts_flt.shape[1] NEW_SHP = SHP[nax].tolist() pts_out = pts_flt.reshape(NEW_SHP) pts_out = pts_out.transpose(axorder_rev) else: axorder = np.concatenate((nax, np.array(axis).flatten()), axis=0) axorder_rev = np.argsort(axorder) M = pts_flt.shape[1] NEW_SHP = SHP[nax].tolist() NEW_SHP.append(M) pts_out = pts_flt.reshape(NEW_SHP) pts_out = pts_out.transpose(axorder_rev) return pts_out
Example 27
Project: paramz Author: sods File: index_operations.py License: BSD 3-Clause "New" or "Revised" License | 5 votes |
def remove_indices(arr, to_remove): return numpy.setdiff1d(arr, to_remove, True)
Example 28
Project: lingvo Author: tensorflow File: sampling_ops_test.py License: Apache License 2.0 | 5 votes |
def testZFilter(self, cmethod, nmethod, nalgo): b, n, m, k = 1, 10000, 128, 128 g = tf.Graph() with g.as_default(): points = tf.random.uniform(shape=(b, n, 3)) points_padding = tf.zeros(shape=(b, n)) center, center_padding, indices, indices_padding = ops.sample_points( points=points, points_padding=points_padding, num_seeded_points=0, center_selector=cmethod, neighbor_sampler=nmethod, num_centers=m, center_z_min=0.25, center_z_max=0.75, num_neighbors=k, max_distance=0.25) # Ensure shapes are known at graph construction. self.assertListEqual(center.shape.as_list(), [b, m]) self.assertListEqual(center_padding.shape.as_list(), [b, m]) self.assertListEqual(indices.shape.as_list(), [b, m, k]) self.assertListEqual(indices_padding.shape.as_list(), [b, m, k]) with self.session(graph=g): c1, p1 = self.evaluate([center, points]) c2, p2 = self.evaluate([center, points]) # With extremely high probability, sampling centers twice should be # different. self.assertGreater(np.setdiff1d(c1, c2).size, 0) # Centers should be filtered by z range. self.assertTrue((0.25 <= p1[0, c1[0], 2]).all()) self.assertTrue((p1[0, c1[0], 2] <= 0.75).all()) self.assertTrue((0.25 <= p2[0, c2[0], 2]).all()) self.assertTrue((p2[0, c2[0], 2] <= 0.75).all())
Example 29
Project: lambda-packs Author: ryfeus File: arraysetops.py License: MIT License | 5 votes |
def setdiff1d(ar1, ar2, assume_unique=False): """ Find the set difference of two arrays. Return the sorted, unique values in `ar1` that are not in `ar2`. Parameters ---------- ar1 : array_like Input array. ar2 : array_like Input comparison array. assume_unique : bool If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. Returns ------- setdiff1d : ndarray Sorted 1D array of values in `ar1` that are not in `ar2`. See Also -------- numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Examples -------- >>> a = np.array([1, 2, 3, 2, 4, 1]) >>> b = np.array([3, 4, 5, 6]) >>> np.setdiff1d(a, b) array([1, 2]) """ if assume_unique: ar1 = np.asarray(ar1).ravel() else: ar1 = unique(ar1) ar2 = unique(ar2) return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
Example 30
Project: lambda-packs Author: ryfeus File: arraysetops.py License: MIT License | 5 votes |
def setdiff1d(ar1, ar2, assume_unique=False): """ Find the set difference of two arrays. Return the sorted, unique values in `ar1` that are not in `ar2`. Parameters ---------- ar1 : array_like Input array. ar2 : array_like Input comparison array. assume_unique : bool If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. Returns ------- setdiff1d : ndarray Sorted 1D array of values in `ar1` that are not in `ar2`. See Also -------- numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Examples -------- >>> a = np.array([1, 2, 3, 2, 4, 1]) >>> b = np.array([3, 4, 5, 6]) >>> np.setdiff1d(a, b) array([1, 2]) """ if assume_unique: ar1 = np.asarray(ar1).ravel() else: ar1 = unique(ar1) ar2 = unique(ar2) return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]