Python numpy.logical_or() Examples
The following are 30 code examples for showing how to use numpy.logical_or(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example 1
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: test_utils.py License: Apache License 2.0 | 6 votes |
def almost_equal_ignore_nan(a, b, rtol=None, atol=None): """Test that two NumPy arrays are almost equal (ignoring NaN in either array). Combines a relative and absolute measure of approximate eqality. If either the relative or absolute check passes, the arrays are considered equal. Including an absolute check resolves issues with the relative check where all array values are close to zero. Parameters ---------- a : np.ndarray b : np.ndarray rtol : None or float The relative threshold. Default threshold will be used if set to ``None``. atol : None or float The absolute threshold. Default threshold will be used if set to ``None``. """ a = np.copy(a) b = np.copy(b) nan_mask = np.logical_or(np.isnan(a), np.isnan(b)) a[nan_mask] = 0 b[nan_mask] = 0 return almost_equal(a, b, rtol, atol)
Example 2
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: test_utils.py License: Apache License 2.0 | 6 votes |
def assert_almost_equal_ignore_nan(a, b, rtol=None, atol=None, names=('a', 'b')): """Test that two NumPy arrays are almost equal (ignoring NaN in either array). Combines a relative and absolute measure of approximate eqality. If either the relative or absolute check passes, the arrays are considered equal. Including an absolute check resolves issues with the relative check where all array values are close to zero. Parameters ---------- a : np.ndarray b : np.ndarray rtol : None or float The relative threshold. Default threshold will be used if set to ``None``. atol : None or float The absolute threshold. Default threshold will be used if set to ``None``. """ a = np.copy(a) b = np.copy(b) nan_mask = np.logical_or(np.isnan(a), np.isnan(b)) a[nan_mask] = 0 b[nan_mask] = 0 assert_almost_equal(a, b, rtol, atol, names)
Example 3
Project: pyscf Author: pyscf File: test_kproxy_supercell_hf.py License: Apache License 2.0 | 6 votes |
def test_class(self): """Tests container behavior.""" model = kproxy_supercell.TDProxy(self.model_krhf, "hf", [self.k, 1, 1], density_fitting_hf) model.nroots = self.td_model_krhf.nroots assert not model.fast model.kernel() testing.assert_allclose(model.e, self.td_model_krhf.e, atol=1e-5) # Test real testing.assert_allclose(model.e.imag, 0, atol=1e-8) nocc = nvirt = 4 testing.assert_equal(model.xy.shape, (len(model.e), 2, self.k, self.k, nocc, nvirt)) # Test only non-degenerate roots d = abs(model.e[1:] - model.e[:-1]) < 1e-8 d = numpy.logical_or(numpy.concatenate(([False], d)), numpy.concatenate((d, [False]))) d = numpy.logical_not(d) assert_vectors_close(self.td_model_krhf.xy[d], model.xy[d], atol=1e-5)
Example 4
Project: hgru4rec Author: mquad File: build_dataset.py License: MIT License | 6 votes |
def make_sessions(data, session_th=30 * 60, is_ordered=False, user_key='user_id', item_key='item_id', time_key='ts'): """Assigns session ids to the events in data without grouping keys""" if not is_ordered: # sort data by user and time data.sort_values(by=[user_key, time_key], ascending=True, inplace=True) # compute the time difference between queries tdiff = np.diff(data[time_key].values) # check which of them are bigger then session_th split_session = tdiff > session_th split_session = np.r_[True, split_session] # check when the user chenges is data new_user = data['user_id'].values[1:] != data['user_id'].values[:-1] new_user = np.r_[True, new_user] # a new sessions stars when at least one of the two conditions is verified new_session = np.logical_or(new_user, split_session) # compute the session ids session_ids = np.cumsum(new_session) data['session_id'] = session_ids return data
Example 5
Project: tensorflow-XNN Author: ChenglongChen File: main.py License: MIT License | 6 votes |
def load_test_data(chunksize=350000*2): types_dict_test = { 'test_id': 'int32', 'item_condition_id': 'int32', 'shipping': 'int8', 'name': 'str', 'brand_name': 'str', 'item_description': 'str', 'category_name': 'str', } chunks = pd.read_csv('../input/test.tsv', delimiter='\t', low_memory=True, dtype=types_dict_test, chunksize=chunksize) for df in chunks: df.rename(columns={"test_id": "id"}, inplace=True) df.rename(columns={"item_description": "item_desc"}, inplace=True) df["missing_brand_name"] = df["brand_name"].isnull().astype(int) df["missing_category_name"] = df["category_name"].isnull().astype(int) missing_ind = np.logical_or(df["item_desc"].isnull(), df["item_desc"].str.lower().str.contains("no\s+description\s+yet")) df["missing_item_desc"] = missing_ind.astype(int) df["item_desc"][missing_ind] = df["name"][missing_ind] yield df
Example 6
Project: feets Author: quatrope File: ext_beyond1_std.py License: MIT License | 6 votes |
def fit(self, magnitude, error): n = len(magnitude) weighted_mean = np.average(magnitude, weights=1 / error ** 2) # Standard deviation with respect to the weighted mean var = sum((magnitude - weighted_mean) ** 2) std = np.sqrt((1.0 / (n - 1)) * var) count = np.sum( np.logical_or( magnitude > weighted_mean + std, magnitude < weighted_mean - std, ) ) return {"Beyond1Std": float(count) / n}
Example 7
Project: recruit Author: Frank-qlu File: test_ufunc.py License: Apache License 2.0 | 6 votes |
def test_object_logical(self): a = np.array([3, None, True, False, "test", ""], dtype=object) assert_equal(np.logical_or(a, None), np.array([x or None for x in a], dtype=object)) assert_equal(np.logical_or(a, True), np.array([x or True for x in a], dtype=object)) assert_equal(np.logical_or(a, 12), np.array([x or 12 for x in a], dtype=object)) assert_equal(np.logical_or(a, "blah"), np.array([x or "blah" for x in a], dtype=object)) assert_equal(np.logical_and(a, None), np.array([x and None for x in a], dtype=object)) assert_equal(np.logical_and(a, True), np.array([x and True for x in a], dtype=object)) assert_equal(np.logical_and(a, 12), np.array([x and 12 for x in a], dtype=object)) assert_equal(np.logical_and(a, "blah"), np.array([x and "blah" for x in a], dtype=object)) assert_equal(np.logical_not(a), np.array([not x for x in a], dtype=object)) assert_equal(np.logical_or.reduce(a), 3) assert_equal(np.logical_and.reduce(a), None)
Example 8
Project: recruit Author: Frank-qlu File: test_ufunc.py License: Apache License 2.0 | 6 votes |
def test_NotImplemented_not_returned(self): # See gh-5964 and gh-2091. Some of these functions are not operator # related and were fixed for other reasons in the past. binary_funcs = [ np.power, np.add, np.subtract, np.multiply, np.divide, np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, np.logical_and, np.logical_or, np.logical_xor, np.maximum, np.minimum, np.mod, np.greater, np.greater_equal, np.less, np.less_equal, np.equal, np.not_equal] a = np.array('1') b = 1 c = np.array([1., 2.]) for f in binary_funcs: assert_raises(TypeError, f, a, b) assert_raises(TypeError, f, c, a)
Example 9
Project: recruit Author: Frank-qlu File: test_umath.py License: Apache License 2.0 | 6 votes |
def test_truth_table_logical(self): # 2, 3 and 4 serves as true values input1 = [0, 0, 3, 2] input2 = [0, 4, 0, 2] typecodes = (np.typecodes['AllFloat'] + np.typecodes['AllInteger'] + '?') # boolean for dtype in map(np.dtype, typecodes): arg1 = np.asarray(input1, dtype=dtype) arg2 = np.asarray(input2, dtype=dtype) # OR out = [False, True, True, True] for func in (np.logical_or, np.maximum): assert_equal(func(arg1, arg2).astype(bool), out) # AND out = [False, False, False, True] for func in (np.logical_and, np.minimum): assert_equal(func(arg1, arg2).astype(bool), out) # XOR out = [False, True, True, False] for func in (np.logical_xor, np.not_equal): assert_equal(func(arg1, arg2).astype(bool), out)
Example 10
Project: pandas-qt Author: datalyze-solutions File: DataSearch.py License: MIT License | 6 votes |
def indexSearch(self, indexes): """Filters the data by a list of indexes. Args: indexes (list of int): List of index numbers to return. Returns: list: A list containing all indexes with filtered data. Matches will be `True`, the remaining items will be `False`. If the dataFrame is empty, an empty list will be returned. """ if not self._dataFrame.empty: filter0 = self._dataFrame.index == -9999 for index in indexes: filter1 = self._dataFrame.index == index filter0 = np.logical_or(filter0, filter1) return filter0 else: return []
Example 11
Project: cwcf Author: jaromiru File: env.py License: MIT License | 6 votes |
def _get_random_batch(self, size, zero_prob): idx = np.random.randint(len(self.data_x), size=size) x = self.data_x[idx] y = self.data_y[idx] p = self.hpc_p[idx] n = self.data_n[idx] m = Environment._random_mask(size, zero_prob) * ~n # can take only available features s = Environment._get_state(x, m) a = ~np.logical_or(m, n) # available actions c = np.sum(a * self.costs * config.FEATURE_FACTOR, axis=1) # cost of remaining actions return (s, x, y, p, c) #==============================
Example 12
Project: bop_toolkit Author: thodan File: pose_error.py License: MIT License | 6 votes |
def cou_mask(mask_est, mask_gt): """Complement over Union of 2D binary masks. :param mask_est: hxw ndarray with the estimated mask. :param mask_gt: hxw ndarray with the ground-truth mask. :return: The calculated error. """ mask_est_bool = mask_est.astype(np.bool) mask_gt_bool = mask_gt.astype(np.bool) inter = np.logical_and(mask_gt_bool, mask_est_bool) union = np.logical_or(mask_gt_bool, mask_est_bool) union_count = float(union.sum()) if union_count > 0: e = 1.0 - inter.sum() / union_count else: e = 1.0 return e
Example 13
Project: bop_toolkit Author: thodan File: visibility.py License: MIT License | 6 votes |
def estimate_visib_mask_est(d_test, d_est, visib_gt, delta, visib_mode='bop19'): """Estimates a mask of the visible object surface in the estimated pose. For an explanation of why the visibility mask is calculated differently for the estimated and the ground-truth pose, see equation (14) and related text in Hodan et al., On Evaluation of 6D Object Pose Estimation, ECCVW'16. :param d_test: Distance image of a scene in which the visibility is estimated. :param d_est: Rendered distance image of the object model in the est. pose. :param visib_gt: Visibility mask of the object model in the GT pose (from function estimate_visib_mask_gt). :param delta: Tolerance used in the visibility test. :param visib_mode: See _estimate_visib_mask. :return: Visibility mask. """ visib_est = _estimate_visib_mask(d_test, d_est, delta, visib_mode) visib_est = np.logical_or(visib_est, np.logical_and(visib_gt, d_est > 0)) return visib_est
Example 14
Project: auto-alt-text-lambda-api Author: abhisuri97 File: test_ufunc.py License: MIT License | 6 votes |
def test_object_logical(self): a = np.array([3, None, True, False, "test", ""], dtype=object) assert_equal(np.logical_or(a, None), np.array([x or None for x in a], dtype=object)) assert_equal(np.logical_or(a, True), np.array([x or True for x in a], dtype=object)) assert_equal(np.logical_or(a, 12), np.array([x or 12 for x in a], dtype=object)) assert_equal(np.logical_or(a, "blah"), np.array([x or "blah" for x in a], dtype=object)) assert_equal(np.logical_and(a, None), np.array([x and None for x in a], dtype=object)) assert_equal(np.logical_and(a, True), np.array([x and True for x in a], dtype=object)) assert_equal(np.logical_and(a, 12), np.array([x and 12 for x in a], dtype=object)) assert_equal(np.logical_and(a, "blah"), np.array([x and "blah" for x in a], dtype=object)) assert_equal(np.logical_not(a), np.array([not x for x in a], dtype=object)) assert_equal(np.logical_or.reduce(a), 3) assert_equal(np.logical_and.reduce(a), None)
Example 15
Project: auto-alt-text-lambda-api Author: abhisuri97 File: test_ufunc.py License: MIT License | 6 votes |
def test_NotImplemented_not_returned(self): # See gh-5964 and gh-2091. Some of these functions are not operator # related and were fixed for other reasons in the past. binary_funcs = [ np.power, np.add, np.subtract, np.multiply, np.divide, np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, np.logical_and, np.logical_or, np.logical_xor, np.maximum, np.minimum, np.mod ] # These functions still return NotImplemented. Will be fixed in # future. # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal] a = np.array('1') b = 1 for f in binary_funcs: assert_raises(TypeError, f, a, b)
Example 16
Project: xskillscore Author: raybellwaves File: np_deterministic.py License: Apache License 2.0 | 6 votes |
def _match_nans(a, b, weights): """ Considers missing values pairwise. If a value is missing in a, the corresponding value in b is turned to nan, and vice versa. Returns ------- a, b, weights : ndarray a, b, and weights (if not None) with nans placed at pairwise locations. """ if np.isnan(a).any() or np.isnan(b).any(): # Find pairwise indices in a and b that have nans. idx = np.logical_or(np.isnan(a), np.isnan(b)) a[idx], b[idx] = np.nan, np.nan if weights is not None: weights = weights.copy() weights[idx] = np.nan return a, b, weights
Example 17
Project: coded Author: bullocke File: postprocess_utils.py License: MIT License | 6 votes |
def min_max_years(config, image, before): """ Exclude data outside of min and max year desired """ min_year = int(config['postprocessing']['minimum_year']) if not min_year: min_year = 1980 max_year = int(config['postprocessing']['maximum_year']) if not max_year: max_year = 2200 year_image = image[0,:,:].astype(np.str).view(np.chararray).ljust(4) year_image = np.array(year_image).astype(np.float) bad_indices = np.logical_or(year_image < min_year, year_image > max_year) for i in range(image.shape[0] - 1): image[i,:,:][bad_indices] = 0 image[image.shape[0]-1,:,:][bad_indices] = before[bad_indices] return image
Example 18
Project: TheCannon Author: annayqho File: apogee.py License: MIT License | 6 votes |
def get_pixmask(fluxes, flux_errs): """ Create and return a bad pixel mask for an APOGEE spectrum Bad pixels are defined as follows: fluxes or errors are not finite, or reported errors are <= 0, or fluxes are 0 Parameters ---------- fluxes: ndarray Flux data values flux_errs: ndarray Flux uncertainty data values Returns ------- mask: ndarray Bad pixel mask, value of True corresponds to bad pixels """ bad_flux = np.logical_or(~np.isfinite(fluxes), fluxes == 0) bad_err = (~np.isfinite(flux_errs)) | (flux_errs <= 0) bad_pix = bad_err | bad_flux return bad_pix
Example 19
Project: TheCannon Author: annayqho File: dataset.py License: MIT License | 6 votes |
def diagnostics_test_step_flagstars(self): """ Write files listing stars whose inferred labels lie outside 2 standard deviations from the reference label space """ label_names = self.get_plotting_labels() nlabels = len(label_names) reference_labels = self.tr_label test_labels = self.test_label_vals test_IDs = np.array(self.test_ID) mean = np.mean(reference_labels, 0) stdev = np.std(reference_labels, 0) lower = mean - 2 * stdev upper = mean + 2 * stdev for i in range(nlabels): label_name = label_names[i] test_vals = test_labels[:,i] warning = np.logical_or(test_vals < lower[i], test_vals > upper[i]) filename = "flagged_stars_%s.txt" % i with open(filename, 'w') as output: for star in test_IDs[warning]: output.write('{0:s}\n'.format(star)) print("Reference label %s" % label_name) print("flagged %s stars beyond 2-sig of ref labels" % sum(warning)) print("Saved list %s" % filename)
Example 20
Project: TheCannon Author: annayqho File: aaomega_munge_data.py License: MIT License | 6 votes |
def make_full_ivar(): """ take the scatters and skylines and make final ivars """ # skylines come as an ivar # don't use them for now, because I don't really trust them... # skylines = np.load("%s/skylines.npz" %DATA_DIR)['arr_0'] ref_flux = np.load("%s/ref_flux_all.npz" %DATA_DIR)['arr_0'] ref_scat = np.load("%s/ref_spec_scat_all.npz" %DATA_DIR)['arr_0'] test_flux = np.load("%s/test_flux.npz" %DATA_DIR)['arr_0'] test_scat = np.load("%s/test_spec_scat.npz" %DATA_DIR)['arr_0'] ref_ivar = np.ones(ref_flux.shape) / ref_scat[:,None]**2 test_ivar = np.ones(test_flux.shape) / test_scat[:,None]**2 # ref_ivar = (ref_ivar_temp * skylines[None,:]) / (ref_ivar_temp + skylines) # test_ivar = (test_ivar_temp * skylines[None,:]) / (test_ivar_temp + skylines) ref_bad = np.logical_or(ref_flux <= 0, ref_flux > 1.1) test_bad = np.logical_or(test_flux <= 0, test_flux > 1.1) SMALL = 1.0 / 1000000000.0 ref_ivar[ref_bad] = SMALL test_ivar[test_bad] = SMALL np.savez("%s/ref_ivar_corr.npz" %DATA_DIR, ref_ivar) np.savez("%s/test_ivar_corr.npz" %DATA_DIR, test_ivar)
Example 21
Project: TheCannon Author: annayqho File: pull_data.py License: MIT License | 6 votes |
def find_colors(ref_id, ref_flux, ref_ivar): # Find colors print("Finding colors") a = pyfits.open(DATA_DIR + "/lamost_catalog_colors.fits") data = a[1].data a.close() all_ids = data['LAMOST_ID_1'] all_ids = np.array([val.strip() for val in all_ids]) ref_id_col = np.intersect1d(all_ids, ref_id) inds = np.array([np.where(all_ids==val)[0][0] for val in ref_id_col]) all_col, all_col_ivar = get_colors( DATA_DIR + "/lamost_catalog_colors.fits") col = all_col[:,inds] col_ivar = all_col_ivar[:,inds] bad_ivar = np.logical_or(np.isnan(col_ivar), col_ivar==np.inf) col_ivar[bad_ivar] = 0.0 bad_flux = np.logical_or(np.isnan(col), col==np.inf) col[bad_flux] = 1.0 col_ivar[bad_flux] = 0.0 # add them to the wl, flux and ivar arrays inds = np.array([np.where(ref_id==val)[0][0] for val in ref_id_col]) ref_flux_col = np.hstack((ref_flux[inds], col.T)) ref_ivar_col = np.hstack((ref_ivar[inds], col_ivar.T)) return ref_id_col, ref_flux_col, ref_ivar_col
Example 22
Project: neuropythy Author: noahbenson File: util.py License: GNU Affero General Public License v3.0 | 5 votes |
def tetrahedral_barycentric_coordinates(tetra, pt): ''' tetrahedral_barycentric_coordinates(tetrahedron, point) yields a list of weights for each vertex in the given tetrahedron in the same order as the vertices given. If all weights are 0, then the point is not inside the tetrahedron. ''' # I found a description of this algorithm here (Nov. 2017): # http://steve.hollasch.net/cgindex/geometry/ptintet.html tetra = np.asarray(tetra) pt = np.asarray(pt) if tetra.shape[0] != 4: if tetra.shape[1] == 4: if tetra.shape[0] == 3: tetra = np.transpose(tetra, (1,0) if len(tetra.shape) == 2 else (1,0,2)) else: tetra = np.transpose(tetra, (1,2,0)) elif tetra.shape[1] == 3: tetra = np.transpose(tetra, (2,1,0)) else: tetra = np.transpose(tetra, (2,0,1)) elif tetra.shape[1] != 3: tetra = np.transpose(tetra, (0,2,1)) if pt.shape[0] != 3: pt = pt.T # Okay, calculate the determinants... d_ = det_4x3(tetra[0], tetra[1], tetra[2], tetra[3]) d0 = det_4x3(pt, tetra[1], tetra[2], tetra[3]) d1 = det_4x3(tetra[0], pt, tetra[2], tetra[3]) d2 = det_4x3(tetra[0], tetra[1], pt, tetra[3]) d3 = det_4x3(tetra[0], tetra[1], tetra[2], pt) s_ = np.sign(d_) z_ = np.logical_or(np.any([s_ * si == -1 for si in np.sign([d0,d1,d2,d3])], axis=0), np.isclose(d_,0)) x_ = np.logical_not(z_) d_inv = x_ / (x_ * d_ + z_) return np.asarray([d_inv * dq for dq in (d0,d1,d2,d3)])
Example 23
Project: pyscf Author: pyscf File: kproxy.py License: Apache License 2.0 | 5 votes |
def proxy_response_ov_batch(self, k_row, k_col): """ A raw response submatrix corresponding to specific k-points. Args: k_row (ndarray): sets of k-point pairs (row index); k_col (ndarray): sets of k-point pairs (column index); Returns: A raw response matrix. """ masks_row = tuple(self.kov2ov(i) for i in k_row) masks_col = tuple(self.kov2ov(i) for i in k_col) full_mask_row = reduce(numpy.logical_or, masks_row) full_mask_col = reduce(numpy.logical_or, masks_col) big = kproxy_supercell.supercell_response_ov( self.proxy_vind, (full_mask_row, full_mask_col), self.nocc_full, self.nmo_full, self.proxy_is_double(), self.model_super.supercell_inv_rotation, self.model, ) result = [] for m_row, m_col in zip(masks_row, masks_col): m_row_red = m_row[full_mask_row] m_col_red = m_col[full_mask_col] result.append(tuple(i[m_row_red][:, m_col_red] for i in big)) return tuple(result) # This is needed for krhf_slow.get_block_k_ix
Example 24
Project: pyscf Author: pyscf File: m_xc_scalar_ni.py License: Apache License 2.0 | 5 votes |
def xc_scalar_ni(me, sp1,R1, sp2,R2, xc_code, deriv, **kw): from pyscf.dft.libxc import eval_xc """ Computes overlap for an atom pair. The atom pair is given by a pair of species indices and the coordinates of the atoms. Args: sp1,sp2 : specie indices, and R1,R2 : respective coordinates in Bohr, atomic units Result: matrix of orbital overlaps The procedure uses the numerical integration in coordinate space. """ grids = build_3dgrid(me, sp1, R1, sp2, R2, **kw) rho = dens_libnao(grids.coords, me.sv.nspin) THRS = 1e-12 if me.sv.nspin==1: rho[rho<THRS] = 0.0 elif me.sv.nspin==2: msk = np.logical_or(rho[:,0]<THRS, rho[:,1]<THRS) rho[msk,0],rho[msk,1] = 0.0,0.0 exc, vxc, fxc, kxc = libxc.eval_xc(xc_code, rho.T, spin=me.sv.nspin-1, deriv=deriv) ao1 = ao_eval(me.ao1, R1, sp1, grids.coords) if deriv==1 : xq = vxc[0] if vxc[0].ndim>1 else vxc[0].reshape((vxc[0].size,1)) elif deriv==2: xq = fxc[0] if fxc[0].ndim>1 else fxc[0].reshape((fxc[0].size,1)) else: print(' deriv ', deriv) raise RuntimeError('!deriv!') ao1 = np.einsum('ax,x,xq->qax', ao1, grids.weights, xq) ao2 = ao_eval(me.ao2, R2, sp2, grids.coords) return np.einsum('qax,bx->qab', ao1, ao2)
Example 25
Project: pyGSTi Author: pyGSTio File: objectivefns.py License: Apache License 2.0 | 5 votes |
def get_dweights(self, p, wts): # derivative of weights w.r.t. p cp = _np.clip(p, self.minProbClipForWeighting, 1 - self.minProbClipForWeighting) dw = -0.5 * wts / cp # nSpamLabels x nCircuits array (K x M) dw[_np.logical_or(p < self.minProbClipForWeighting, p > (1 - self.minProbClipForWeighting))] = 0.0 return dw
Example 26
Project: pyGSTi Author: pyGSTio File: objectivefns.py License: Apache License 2.0 | 5 votes |
def get_dweights(self, p, wts): # derivative of weights w.r.t. p cp = _np.clip(p, self.minProbClipForWeighting, 1 - self.minProbClipForWeighting) dw = -0.5 * wts / cp # nSpamLabels x nCircuits array (K x M) dw[_np.logical_or(p < self.minProbClipForWeighting, p > (1 - self.minProbClipForWeighting))] = 0.0 return dw #Objective Function
Example 27
Project: tensorflow-XNN Author: ChenglongChen File: main.py License: MIT License | 5 votes |
def load_train_data(): types_dict_train = { 'train_id': 'int32', 'item_condition_id': 'int32', 'price': 'float32', 'shipping': 'int8', 'name': 'str', 'brand_name': 'str', 'item_desc': 'str', 'category_name': 'str', } df = pd.read_csv('../input/train.tsv', delimiter='\t', low_memory=True, dtype=types_dict_train) df.rename(columns={"train_id": "id"}, inplace=True) df.rename(columns={"item_description": "item_desc"}, inplace=True) if DROP_ZERO_PRICE: df = df[df.price > 0].copy() price = np.log1p(df.price.values) df.drop("price", axis=1, inplace=True) df["price"] = price df["is_train"] = 1 df["missing_brand_name"] = df["brand_name"].isnull().astype(int) df["missing_category_name"] = df["category_name"].isnull().astype(int) missing_ind = np.logical_or(df["item_desc"].isnull(), df["item_desc"].str.lower().str.contains("no\s+description\s+yet")) df["missing_item_desc"] = missing_ind.astype(int) df["item_desc"][missing_ind] = df["name"][missing_ind] gc.collect() if DEBUG: return df.head(DEBUG_SAMPLE_NUM) else: return df
Example 28
Project: recruit Author: Frank-qlu File: extras.py License: Apache License 2.0 | 5 votes |
def _covhelper(x, y=None, rowvar=True, allow_masked=True): """ Private function for the computation of covariance and correlation coefficients. """ x = ma.array(x, ndmin=2, copy=True, dtype=float) xmask = ma.getmaskarray(x) # Quick exit if we can't process masked data if not allow_masked and xmask.any(): raise ValueError("Cannot process masked data.") # if x.shape[0] == 1: rowvar = True # Make sure that rowvar is either 0 or 1 rowvar = int(bool(rowvar)) axis = 1 - rowvar if rowvar: tup = (slice(None), None) else: tup = (None, slice(None)) # if y is None: xnotmask = np.logical_not(xmask).astype(int) else: y = array(y, copy=False, ndmin=2, dtype=float) ymask = ma.getmaskarray(y) if not allow_masked and ymask.any(): raise ValueError("Cannot process masked data.") if xmask.any() or ymask.any(): if y.shape == x.shape: # Define some common mask common_mask = np.logical_or(xmask, ymask) if common_mask is not nomask: xmask = x._mask = y._mask = ymask = common_mask x._sharedmask = False y._sharedmask = False x = ma.concatenate((x, y), axis) xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int) x -= x.mean(axis=rowvar)[tup] return (x, xnotmask, rowvar)
Example 29
Project: recruit Author: Frank-qlu File: test_numeric.py License: Apache License 2.0 | 5 votes |
def test_logical_and_or_xor(self): assert_array_equal(self.t | self.t, self.t) assert_array_equal(self.f | self.f, self.f) assert_array_equal(self.t | self.f, self.t) assert_array_equal(self.f | self.t, self.t) np.logical_or(self.t, self.t, out=self.o) assert_array_equal(self.o, self.t) assert_array_equal(self.t & self.t, self.t) assert_array_equal(self.f & self.f, self.f) assert_array_equal(self.t & self.f, self.f) assert_array_equal(self.f & self.t, self.f) np.logical_and(self.t, self.t, out=self.o) assert_array_equal(self.o, self.t) assert_array_equal(self.t ^ self.t, self.f) assert_array_equal(self.f ^ self.f, self.f) assert_array_equal(self.t ^ self.f, self.t) assert_array_equal(self.f ^ self.t, self.t) np.logical_xor(self.t, self.t, out=self.o) assert_array_equal(self.o, self.f) assert_array_equal(self.nm & self.t, self.nm) assert_array_equal(self.im & self.f, False) assert_array_equal(self.nm & True, self.nm) assert_array_equal(self.im & False, self.f) assert_array_equal(self.nm | self.t, self.t) assert_array_equal(self.im | self.f, self.im) assert_array_equal(self.nm | True, self.t) assert_array_equal(self.im | False, self.im) assert_array_equal(self.nm ^ self.t, self.im) assert_array_equal(self.im ^ self.f, self.im) assert_array_equal(self.nm ^ True, self.im) assert_array_equal(self.im ^ False, self.im)
Example 30
Project: recruit Author: Frank-qlu File: stata.py License: Apache License 2.0 | 5 votes |
def _do_convert_missing(self, data, convert_missing): # Check for missing values, and replace if found for i, colname in enumerate(data): fmt = self.typlist[i] if fmt not in self.VALID_RANGE: continue nmin, nmax = self.VALID_RANGE[fmt] series = data[colname] missing = np.logical_or(series < nmin, series > nmax) if not missing.any(): continue if convert_missing: # Replacement follows Stata notation missing_loc = np.argwhere(missing._ndarray_values) umissing, umissing_loc = np.unique(series[missing], return_inverse=True) replacement = Series(series, dtype=np.object) for j, um in enumerate(umissing): missing_value = StataMissingValue(um) loc = missing_loc[umissing_loc == j] replacement.iloc[loc] = missing_value else: # All replacements are identical dtype = series.dtype if dtype not in (np.float32, np.float64): dtype = np.float64 replacement = Series(series, dtype=dtype) replacement[missing] = np.nan data[colname] = replacement