Python numpy.argwhere() Examples
The following are 30 code examples for showing how to use numpy.argwhere(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example 1
Project: libTLDA Author: wmkouw File: tcpr.py License: MIT License | 6 votes |
def add_intercept(self, X): """Add 1's to data as last features.""" # Data shape N, D = X.shape # Check if there's not already an intercept column if np.any(np.sum(X, axis=0) == N): # Report print('Intercept is not the last feature. Swapping..') # Find which column contains the intercept intercept_index = np.argwhere(np.sum(X, axis=0) == N) # Swap intercept to last X = X[:, np.setdiff1d(np.arange(D), intercept_index)] # Add intercept as last column X = np.hstack((X, np.ones((N, 1)))) # Append column of 1's to data, and increment dimensionality return X, D+1
Example 2
Project: PSMNet Author: JiaRenChang File: finetune.py License: MIT License | 6 votes |
def test(imgL,imgR,disp_true): model.eval() imgL = Variable(torch.FloatTensor(imgL)) imgR = Variable(torch.FloatTensor(imgR)) if args.cuda: imgL, imgR = imgL.cuda(), imgR.cuda() with torch.no_grad(): output3 = model(imgL,imgR) pred_disp = output3.data.cpu() #computing 3-px error# true_disp = disp_true index = np.argwhere(true_disp>0) disp_true[index[0][:], index[1][:], index[2][:]] = np.abs(true_disp[index[0][:], index[1][:], index[2][:]]-pred_disp[index[0][:], index[1][:], index[2][:]]) correct = (disp_true[index[0][:], index[1][:], index[2][:]] < 3)|(disp_true[index[0][:], index[1][:], index[2][:]] < true_disp[index[0][:], index[1][:], index[2][:]]*0.05) torch.cuda.empty_cache() return 1-(float(torch.sum(correct))/float(len(index[0])))
Example 3
Project: pyscf Author: pyscf File: ulocal.py License: Apache License 2.0 | 6 votes |
def lowdinPop(mol,coeff,ova,enorb,occ): print '\nLowdin population for LMOs:' nb,nc = coeff.shape s12 = sqrtm(ova) lcoeff = s12.dot(coeff) diff = reduce(numpy.dot,(lcoeff.T,lcoeff)) - numpy.identity(nc) print 'diff=',numpy.linalg.norm(diff) pthresh = 0.05 labels = mol.ao_labels(None) nelec = 0.0 for iorb in range(nc): vec = lcoeff[:,iorb]**2 idx = list(numpy.argwhere(vec>pthresh)) print ' iorb=',iorb,' occ=',occ[iorb],' <i|F|i>=',enorb[iorb] for iao in idx: print ' iao=',labels[iao],' pop=',vec[iao] nelec += occ[iorb] print 'nelec=',nelec return 0
Example 4
Project: pyscf Author: pyscf File: linalg_helper.py License: Apache License 2.0 | 6 votes |
def precond(r, e0, x0): idx = numpy.argwhere(abs(x0)>.1).ravel() #idx = numpy.arange(20) m = idx.size if m > 2: h0 = a[idx][:,idx] - numpy.eye(m)*e0 h0x0 = x0 / (a.diagonal() - e0) h0x0[idx] = numpy.linalg.solve(h0, h0x0[idx]) h0r = r / (a.diagonal() - e0) h0r[idx] = numpy.linalg.solve(h0, r[idx]) e1 = numpy.dot(x0, h0r) / numpy.dot(x0, h0x0) x1 = (r - e1*x0) / (a.diagonal() - e0) x1[idx] = numpy.linalg.solve(h0, (r-e1*x0)[idx]) return x1 else: return r / (a.diagonal() - e0)
Example 5
Project: pyscf Author: pyscf File: common_slow.py License: Apache License 2.0 | 6 votes |
def format_mask(x): """ Formats a mask into a readable string. Args: x (ndarray): an array with the mask; Returns: A readable string with the mask. """ x = numpy.asanyarray(x) if len(x) == 0: return "(empty)" if x.dtype == bool: x = numpy.argwhere(x)[:, 0] grps = tuple(list(g) for _, g in groupby(x, lambda n, c=count(): n-next(c))) return ",".join("{:d}-{:d}".format(i[0], i[-1]) if len(i) > 1 else "{:d}".format(i[0]) for i in grps)
Example 6
Project: PathCon Author: hwwang55 File: train.py License: MIT License | 6 votes |
def calculate_ranking_metrics(triplets, scores, true_relations): for i in range(scores.shape[0]): head, tail, relation = triplets[i] for j in true_relations[head, tail] - {relation}: scores[i, j] -= 1.0 sorted_indices = np.argsort(-scores, axis=1) relations = np.array(triplets)[0:scores.shape[0], 2] sorted_indices -= np.expand_dims(relations, 1) zero_coordinates = np.argwhere(sorted_indices == 0) rankings = zero_coordinates[:, 1] + 1 mrr = float(np.mean(1 / rankings)) mr = float(np.mean(rankings)) hit1 = float(np.mean(rankings <= 1)) hit3 = float(np.mean(rankings <= 3)) hit5 = float(np.mean(rankings <= 5)) return mrr, mr, hit1, hit3, hit5
Example 7
Project: conv-social-pooling Author: nachiket92 File: utils.py License: MIT License | 6 votes |
def getHistory(self,vehId,t,refVehId,dsId): if vehId == 0: return np.empty([0,2]) else: if self.T.shape[1]<=vehId-1: return np.empty([0,2]) refTrack = self.T[dsId-1][refVehId-1].transpose() vehTrack = self.T[dsId-1][vehId-1].transpose() refPos = refTrack[np.where(refTrack[:,0]==t)][0,1:3] if vehTrack.size==0 or np.argwhere(vehTrack[:, 0] == t).size==0: return np.empty([0,2]) else: stpt = np.maximum(0, np.argwhere(vehTrack[:, 0] == t).item() - self.t_h) enpt = np.argwhere(vehTrack[:, 0] == t).item() + 1 hist = vehTrack[stpt:enpt:self.d_s,1:3]-refPos if len(hist) < self.t_h//self.d_s + 1: return np.empty([0,2]) return hist ## Helper function to get track future
Example 8
Project: H3DNet Author: zaiweizhang File: pc_util.py License: MIT License | 6 votes |
def point_cloud_to_sem_vox(pt, sem_label, vs=0.06,xymin=-3.84, xymax=3.84, zmin=-0.2, zmax=2.68): pt[:,0]=pt[:,0]-xymin pt[:,1]=pt[:,1]-xymin pt[:,2]=pt[:,2]-zmin pt=pt/vs vxy=int((xymax-xymin)/vs) vz = int((zmax-zmin)/vs) pt = np.clip(pt, 0,vxy-1) pt[:,2] = np.clip(pt[:,2], 0,vz-1) vol=np.zeros((vxy,vxy,vz), np.float32) pt = pt.astype(np.int32) for i in range(pt.shape[0]): if sem_label[i] not in choose_classes: continue vol[pt[i,0], pt[i,1], pt[i,2]]=np.argwhere(choose_classes==sem_label[i])[0,0]+1 return vol
Example 9
Project: PoseWarper Author: facebookresearch File: eval_helpers.py License: Apache License 2.0 | 6 votes |
def VOCap(rec,prec): mpre = np.zeros([1,2+len(prec)]) mpre[0,1:len(prec)+1] = prec mrec = np.zeros([1,2+len(rec)]) mrec[0,1:len(rec)+1] = rec mrec[0,len(rec)+1] = 1.0 for i in range(mpre.size-2,-1,-1): mpre[0,i] = max(mpre[0,i],mpre[0,i+1]) i = np.argwhere( ~np.equal( mrec[0,1:], mrec[0,:mrec.shape[1]-1]) )+1 i = i.flatten() # compute area under the curve ap = np.sum( np.multiply( np.subtract( mrec[0,i], mrec[0,i-1]), mpre[0,i] ) ) return ap
Example 10
Project: pytim Author: Marcello-Sega File: chacon_tarazona.py License: GNU General Public License v3.0 | 6 votes |
def _points_next_to_surface(self, surf, modes, pivot): """ Searches for points within a distance self.tau from the interface. """ pivot_pos = self.cluster_group[pivot].positions z_max = np.max(pivot_pos[:, 2]) z_min = np.min(pivot_pos[:, 2]) z_max += self.alpha * 2 z_min -= self.alpha * 2 positions = self.cluster_group.positions[:] # TODO other directions z = positions[:, 2] condition = np.logical_and(z > z_min, z < z_max) candidates = np.argwhere(condition)[:, 0] dists = surf.surface_from_modes(positions[candidates], modes) dists = dists - z[candidates] return candidates[dists * dists < self.tau**2]
Example 11
Project: gmpe-smtk Author: GEMScienceTools File: residual_plots.py License: GNU Affero General Public License v3.0 | 6 votes |
def _tojson(*numpy_objs): '''Utility function which returns a list where each element of numpy_objs is converted to its python equivalent (float or list)''' ret = [] # problem: browsers might not be happy with JSON 'NAN', so convert # NaNs to None. Unfortunately, the conversion must be done element wise # in numpy (seems not to exist a pandas na filter): for obj in numpy_objs: isscalar = np.isscalar(obj) nan_indices = None if isscalar else \ np.argwhere(np.isnan(obj)).flatten() # note: numpy.float64(N).tolist() returns a python float, so: obj = None if isscalar and np.isnan(obj) else obj.tolist() if nan_indices is not None: for idx in nan_indices: obj[idx] = None ret.append(obj) return ret # tuple(_.tolist() for _ in numpy_objs)
Example 12
Project: pylops Author: equinor File: Spread.py License: GNU Lesser General Public License v3.0 | 6 votes |
def _matvec_numpy(self, x): x = x.reshape(self.dims) y = np.zeros(self.dimsd, dtype=self.dtype) for it in range(self.dims[1]): for ix0 in range(self.dims[0]): if self.usetable: indices = self.table[ix0, it] if self.interp: dindices = self.dtable[ix0, it] else: if self.interp: indices, dindices = self.fh(ix0, it) else: indices = self.fh(ix0, it) mask = np.argwhere(~np.isnan(indices)) if mask.size > 0: indices = (indices[mask]).astype(np.int) if not self.interp: y[mask, indices] += x[ix0, it] else: y[mask, indices] += (1-dindices[mask])*x[ix0, it] y[mask, indices + 1] += dindices[mask] * x[ix0, it] return y.ravel()
Example 13
Project: pylops Author: equinor File: Spread.py License: GNU Lesser General Public License v3.0 | 6 votes |
def _rmatvec_numpy(self, x): x = x.reshape(self.dimsd) y = np.zeros(self.dims, dtype=self.dtype) for it in range(self.dims[1]): for ix0 in range(self.dims[0]): if self.usetable: indices = self.table[ix0, it] if self.interp: dindices = self.dtable[ix0, it] else: if self.interp: indices, dindices = self.fh(ix0, it) else: indices = self.fh(ix0, it) mask = np.argwhere(~np.isnan(indices)) if mask.size > 0: indices = (indices[mask]).astype(np.int) if not self.interp: y[ix0, it] = np.sum(x[mask, indices]) else: y[ix0, it] = \ np.sum(x[mask, indices]*(1-dindices[mask])) + \ np.sum(x[mask, indices+1]*dindices[mask]) return y.ravel()
Example 14
Project: argus-tgs-salt Author: lRomul File: postprocess.py License: MIT License | 6 votes |
def find_points(mask, x_shift=0, y_shift=0): # Find points where mask change class on edges mask = mask > 0 mask = mask.astype(np.int) n = mask.shape[1] edges = [mask[:, 0+x_shift], mask[:, -1-x_shift], mask[0+y_shift, :], mask[-1-y_shift, :]] diffs = [np.diff(edge, n=1) for edge in edges] pos = [np.argwhere(diff>0)+1 for diff in diffs] neg = [np.argwhere(diff<0)+1 for diff in diffs] pos = [[int(x) for x in p] for p in pos] neg = [[int(x) for x in n] for n in neg] if mask[0, 0] > 0: for i in [left, top]: pos[i] = [0] + pos[i] if mask[-1, 0] > 0: pos[bottom] = [0] + pos[bottom] neg[left] = [n] + neg[left] if mask[0, -1] > 0: pos[right] = [0] + pos[right] neg[top] = [n] + neg[top] if mask[-1, -1] > 0: for i in [right, bottom]: neg[i] = [n] + neg[i] return(pos, neg)
Example 15
Project: tf-pose Author: SrikanthVelpuri File: ImageView.py License: Apache License 2.0 | 6 votes |
def timeIndex(self, slider): ## Return the time and frame index indicated by a slider if self.image is None: return (0,0) t = slider.value() xv = self.tVals if xv is None: ind = int(t) else: if len(xv) < 2: return (0,0) totTime = xv[-1] + (xv[-1]-xv[-2]) inds = np.argwhere(xv < t) if len(inds) < 1: return (0,t) ind = inds[-1,0] return ind, t
Example 16
Project: mars Author: mars-project File: test_base_execute.py License: Apache License 2.0 | 6 votes |
def testArgwhereExecution(self): x = arange(6, chunk_size=2).reshape(2, 3) t = argwhere(x > 1) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.argwhere(np.arange(6).reshape(2, 3) > 1) np.testing.assert_array_equal(res, expected) data = np.asfortranarray(np.random.rand(10, 20)) x = tensor(data, chunk_size=10) t = argwhere(x > 0.5) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.argwhere(data > 0.5) np.testing.assert_array_equal(res, expected) self.assertTrue(res.flags['F_CONTIGUOUS']) self.assertFalse(res.flags['C_CONTIGUOUS'])
Example 17
Project: phidl Author: amccaugh File: geometry.py License: MIT License | 6 votes |
def _merge_nearby_floating_points(x, tol = 1e-10): """ Takes an array `x` and merges any values within the tolerance `tol` So if given >>> x = [-2, -1, 0, 1.0001, 1.0002, 1.0003, 4, 5, 5.003, 6, 7, 8] >>> _merge_nearby_floating_points(x, tol = 1e-3) will then return: >>> [-2, -1, 0, 1.0001, 1.0001, 1.0001, 4, 5, 5.003, 6, 7, 8] """ xargsort = np.argsort(x) xargunsort = np.argsort(xargsort) xsort = x[xargsort] xsortthreshold = (np.diff(xsort) < tol) xsortthresholdind = np.argwhere(xsortthreshold) # Merge nearby floating point values for xi in xsortthresholdind: xsort[xi+1] = xsort[xi] return xsort[xargunsort]
Example 18
Project: clock-recovery Author: mossmann File: wpcr.py License: MIT License | 5 votes |
def find_clock_frequency(spectrum): maxima = scipy.signal.argrelextrema(spectrum, numpy.greater_equal)[0] while maxima[0] < 2: maxima = maxima[1:] if maxima.any(): threshold = max(spectrum[2:-1])*0.8 indices_above_threshold = numpy.argwhere(spectrum[maxima] > threshold) return maxima[indices_above_threshold[0]] else: return 0
Example 19
Project: clock-recovery Author: mossmann File: wpcr.py License: MIT License | 5 votes |
def wpcr(a): if len(a) < 4: return [] b = (a > midpoint(a)) * 1.0 d = numpy.diff(b)**2 if len(numpy.argwhere(d > 0)) < 2: return [] f = scipy.fft(d, len(a)) p = find_clock_frequency(abs(f)) if p == 0: return [] cycles_per_sample = (p*1.0)/len(f) clock_phase = 0.5 + numpy.angle(f[p])/(tau) if clock_phase <= 0.5: clock_phase += 1 symbols = [] for i in range(len(a)): if clock_phase >= 1: clock_phase -= 1 symbols.append(a[i]) clock_phase += cycles_per_sample if debug: print("peak frequency index: %d / %d" % (p, len(f))) print("samples per symbol: %f" % (1.0/cycles_per_sample)) print("clock cycles per sample: %f" % (cycles_per_sample)) print("clock phase in cycles between 1st and 2nd samples: %f" % (clock_phase)) print("clock phase in cycles at 1st sample: %f" % (clock_phase - cycles_per_sample/2)) print("symbol count: %d" % (len(symbols))) return symbols # convert soft symbols into bits (assuming binary symbols)
Example 20
Project: clock-recovery Author: mossmann File: dsss-bpsk-reverse.py License: MIT License | 5 votes |
def find_clock_frequency(spectrum): maxima = scipy.signal.argrelextrema(spectrum, numpy.greater_equal)[0] while maxima[0] < 3: maxima = maxima[1:] if maxima.any(): threshold = max(spectrum[5:-4])*0.8 indices_above_threshold = numpy.argwhere(spectrum[maxima] > threshold) return maxima[indices_above_threshold[0][0]] else: return 0 # input: complex valued samples # output: signed FFT bin number of detected frequency
Example 21
Project: OpenFermion-Cirq Author: quantumlib File: random_test.py License: Apache License 2.0 | 5 votes |
def test_random_interaction_operator_term(order, real, seed): op = ofctr.random_interaction_operator_term(order, real, seed) assert openfermion.is_hermitian(op) assert op.constant == 0 assert op.one_body_tensor.shape == (order,) * 2 assert op.two_body_tensor.shape == (order,) * 4 for tensor in (op.one_body_tensor, op.two_body_tensor): for indices in np.argwhere(tensor): assert len(set(indices)) == order op_2 = ofctr.random_interaction_operator_term(order, real, seed) assert op == op_2 if order == 1: assert op.one_body_tensor != 0 assert op.two_body_tensor != 0 elif order == 2: assert np.all((op.one_body_tensor == 0) == np.eye(2)) elif order == 3: assert np.all(op.one_body_tensor == 0) elif order == 4: assert np.all(op.one_body_tensor == 0) else: assert np.all(op.one_body_tensor == 0) assert np.all(op.two_body_tensor == 0)
Example 22
Project: cvpr2018-hnd Author: kibok90 File: samplers.py License: MIT License | 5 votes |
def balanced_shuffle(labels, num_epochs=50, path=None, start_time=time.time()): order_path = '{path}/balanced_order_{num_epochs}.h5' \ .format(path=path, num_epochs=num_epochs) if path is not None and os.path.isfile(order_path): with h5py.File(order_path, 'r') as f: order = f['order'][:] else: evenness = 5 # batch_size | evenness*num_classes classes = np.unique(labels.numpy()) num_classes = len(classes) loc_data_per_class = [np.argwhere(labels.numpy() == k).flatten() for k in classes] num_data_per_class = [(labels.numpy() == k).sum() for k in classes] max_data_per_class = max(num_data_per_class) num_loc_split = (max_data_per_class // evenness) * np.ones(evenness, dtype=int) num_loc_split[:(max_data_per_class % evenness)] += 1 loc_split = [0] loc_split.extend(np.cumsum(num_loc_split).tolist()) order = -np.ones([num_epochs, max_data_per_class*num_classes], dtype=int) for epoch in range(num_epochs): order_e = -np.ones([max_data_per_class, num_classes], dtype=int) for k in classes: loc_k = np.random.permutation(loc_data_per_class[k]) for i in range(evenness): loc_i = loc_k[loc_split[i]:loc_split[i+1]] order_e[i:(len(loc_i)*evenness+i):evenness, k] = loc_i order[epoch] = order_e.flatten() print_freq = min([100, (num_epochs-1) // 5 + 1]) print_me = (epoch == 0 or epoch == num_epochs-1 or (epoch+1) % print_freq == 0) if print_me: print('{epoch:4d}/{num_epochs:4d} e; '.format(epoch=epoch+1, num_epochs=num_epochs), end='') print('generate balanced random order; {time:8.3f} s'.format(time=time.time()-start_time)) if path is not None: with h5py.File(order_path, 'w') as f: f.create_dataset('order', data=order, compression='gzip', compression_opts=9) print('balanced random order; {time:8.3f} s'.format(time=time.time()-start_time)) return torch.from_numpy(order)
Example 23
Project: pruning_yolov3 Author: zbyuan File: prune_utils.py License: GNU General Public License v3.0 | 5 votes |
def init_weights_from_loose_model(compact_model, loose_model, CBL_idx, Conv_idx, CBLidx2mask): for idx in CBL_idx: compact_CBL = compact_model.module_list[idx] loose_CBL = loose_model.module_list[idx] out_channel_idx = np.argwhere(CBLidx2mask[idx])[:, 0].tolist() compact_bn, loose_bn = compact_CBL[1], loose_CBL[1] compact_bn.weight.data = loose_bn.weight.data[out_channel_idx].clone() compact_bn.bias.data = loose_bn.bias.data[out_channel_idx].clone() compact_bn.running_mean.data = loose_bn.running_mean.data[out_channel_idx].clone() compact_bn.running_var.data = loose_bn.running_var.data[out_channel_idx].clone() input_mask = get_input_mask(loose_model.module_defs, idx, CBLidx2mask) in_channel_idx = np.argwhere(input_mask)[:, 0].tolist() compact_conv, loose_conv = compact_CBL[0], loose_CBL[0] tmp = loose_conv.weight.data[:, in_channel_idx, :, :].clone() compact_conv.weight.data = tmp[out_channel_idx, :, :, :].clone() for idx in Conv_idx: compact_conv = compact_model.module_list[idx][0] loose_conv = loose_model.module_list[idx][0] input_mask = get_input_mask(loose_model.module_defs, idx, CBLidx2mask) in_channel_idx = np.argwhere(input_mask)[:, 0].tolist() compact_conv.weight.data = loose_conv.weight.data[:, in_channel_idx, :, :].clone() compact_conv.bias.data = loose_conv.bias.data.clone()
Example 24
Project: fine-lm Author: akzaidi File: gym_utils.py License: MIT License | 5 votes |
def find_ball(obs, default=None): ball_area = obs[37:193, :, 0] res = np.argwhere(ball_area == 236) if not res: return default else: x, y = res[0] x += 37 return x, y
Example 25
Project: fine-lm Author: akzaidi File: gym_utils.py License: MIT License | 5 votes |
def find_ball(ob, default=None): off_x = 63 clipped_ob = ob[off_x:-21, :, 0] pos = np.argwhere(clipped_ob == 200) if not pos.size: return default x = off_x + pos[0][0] y = 0 + pos[0][1] return x, y
Example 26
Project: fine-lm Author: akzaidi File: gene_expression.py License: MIT License | 5 votes |
def to_example_dict(encoder, inputs, mask, outputs): """Convert single h5 record to an example dict.""" # Inputs bases = [] input_ids = [] last_idx = -1 for row in np.argwhere(inputs): idx, base_id = row idx, base_id = int(idx), int(base_id) assert idx > last_idx # if not, means 2 True values in 1 row # Some rows are all False. Those rows are mapped to UNK_ID. while idx != last_idx + 1: bases.append(encoder.UNK) last_idx += 1 bases.append(encoder.BASES[base_id]) last_idx = idx assert len(inputs) == len(bases) input_ids = encoder.encode(bases) input_ids.append(text_encoder.EOS_ID) # Targets: mask and output targets_mask = [float(v) for v in mask] # The output is (n, m); store targets_shape so that it can be reshaped # properly on the other end. targets = [float(v) for v in outputs.flatten()] targets_shape = [int(dim) for dim in outputs.shape] assert mask.shape[0] == outputs.shape[0] example_keys = ["inputs", "targets_mask", "targets", "targets_shape"] ex_dict = dict( zip(example_keys, [input_ids, targets_mask, targets, targets_shape])) return ex_dict
Example 27
Project: interpret-text Author: interpretml File: utils_introspective_rationale.py License: MIT License | 5 votes |
def generate_data(batch, use_cuda): """Create a formatted and ordered data batch to use in the three player model. :param batch: A pandas dataframe containing the tokens, masks, counts, and labels associated with a batch of data :type batch: DataFrame :param use_cuda: whether to use CUDA :type use_cuda: bool :return: formatted and ordered tokens (x), masks (m), and labels (y) associated with a batch of data :rtype: dict """ # sort for rnn happiness batch.sort_values("counts", inplace=True, ascending=False) x_mask = np.stack(batch["mask"], axis=0) # drop all zero columns zero_col_idxs = np.argwhere(np.all(x_mask[..., :] == 0, axis=0)) x_mask = np.delete(x_mask, zero_col_idxs, axis=1) x_mat = np.stack(batch["tokens"], axis=0) # drop all zero columns x_mat = np.delete(x_mat, zero_col_idxs, axis=1) y_vec = np.stack(batch["labels"], axis=0) batch_x_ = Variable(torch.from_numpy(x_mat)).to(torch.int64) batch_m_ = Variable(torch.from_numpy(x_mask)).type(torch.FloatTensor) batch_y_ = Variable(torch.from_numpy(y_vec)).to(torch.int64) if use_cuda: batch_x_ = batch_x_.cuda() batch_m_ = batch_m_.cuda() batch_y_ = batch_y_.cuda() return {"x": batch_x_, "m": batch_m_, "y": batch_y_}
Example 28
Project: pyscf Author: pyscf File: test_common.py License: Apache License 2.0 | 5 votes |
def assert_vectors_close(v1, v2, axis=0, threshold=1e-5, atol=1e-8): """Compares two vectors up to a phase difference.""" v1, v2 = remove_phase_difference(v1, v2, axis=axis, threshold=threshold) delta = abs(v1 - v2).max(axis=1) wrong = delta > atol if any(wrong): raise AssertionError("Vectors are not close to tolerance atol={}\n\n({:d} roots mismatch)\ndelta {}".format( str(atol), sum(wrong), ", ".join("#{:d}: {:.3e}".format(i, delta[i]) for i in numpy.argwhere(wrong)[:, 0]), ))
Example 29
Project: pyscf Author: pyscf File: test_common.py License: Apache License 2.0 | 5 votes |
def assert_vectors_close(v1, v2, axis=0, threshold=1e-5, atol=1e-8): """Compares two vectors up to a phase difference.""" v1, v2 = remove_phase_difference(v1, v2, axis=axis, threshold=threshold) delta = abs(v1 - v2).max(axis=1) wrong = delta > atol if any(wrong): raise AssertionError("Vectors are not close to tolerance atol={}\n\n({:d} roots mismatch)\ndelta {}".format( str(atol), sum(wrong), ", ".join("#{:d}: {:.3e}".format(i, delta[i]) for i in numpy.argwhere(wrong)[:, 0]), ))
Example 30
Project: pyscf Author: pyscf File: test_common.py License: Apache License 2.0 | 5 votes |
def assert_vectors_close(v1, v2, axis=0, threshold=1e-5, atol=1e-8): """Compares two vectors up to a phase difference.""" v1, v2 = remove_phase_difference(v1, v2, axis=axis, threshold=threshold) delta = abs(v1 - v2).max(axis=1) wrong = delta > atol if any(wrong): raise AssertionError("Vectors are not close to tolerance atol={}\n\n({:d} roots mismatch)\ndelta {}".format( str(atol), sum(wrong), ", ".join("#{:d}: {:.3e}".format(i, delta[i]) for i in numpy.argwhere(wrong)[:, 0]), ))