Python numpy.bool8() Examples
The following are 12 code examples for showing how to use numpy.bool8(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example 1
Project: ray Author: ray-project File: test_logger.py License: Apache License 2.0 | 6 votes |
def testTBX(self): config = { "a": 2, "b": [1, 2], "c": { "c": { "D": 123 } }, "d": np.int64(1), "e": np.bool8(True) } t = Trial(evaluated_params=config, trial_id="tbx") logger = TBXLogger(config=config, logdir=self.test_dir, trial=t) logger.on_result(result(0, 4)) logger.on_result(result(1, 4)) logger.on_result(result(2, 4, score=[1, 2, 3], hello={"world": 1})) logger.close()
Example 2
Project: lisflood-code Author: ec-jrc File: transmission.py License: European Union Public License 1.2 | 6 votes |
def initial(self): """ initial part of the transmission loss module """ settings = LisSettings.instance() option = settings.options if option['TransLoss']: TransArea = loadmap('TransArea') self.var.TransSub = loadmap('TransSub') # downstream area taking into account for transmission loss self.var.UpAreaTrans = loadmap('UpAreaTrans') # upstream area self.var.UpTrans = np.where(self.var.UpAreaTrans >= TransArea,np.bool8(1),np.bool8(0)) # Downstream taking into accound for transmission loss # if upstream area (the total one) is bigger than a threshold us # transmission loss self.var.TransPower1 = loadmap('TransPower1') self.var.TransPower2 = 1.0 / self.var.TransPower1 # transmission loss function maskinfo = MaskInfo.instance() self.var.TransCum = maskinfo.in_zero() # Cumulative transmission loss # self.var.TransLossM3Dt = maskinfo.in_zero() # substep amount of transmission loss
Example 3
Project: dask-image Author: dask File: _utils.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def _get_mask(image, mask): if mask is None: mask = True mask_type = _get_dtype(mask).type if isinstance(mask, (numpy.ndarray, dask.array.Array)): if mask.shape != image.shape: raise RuntimeError("`mask` must have the same shape as `image`.") if not issubclass(mask_type, numpy.bool8): mask = (mask != 0) elif issubclass(mask_type, numpy.bool8): mask = bool(mask) else: raise TypeError("`mask` must be a Boolean or an array.") return mask
Example 4
Project: Computable Author: ktraunmueller File: test_excel.py License: MIT License | 5 votes |
def test_bool_types(self): _skip_if_no_xlrd() for np_type in (np.bool8, np.bool_): with ensure_clean(self.ext) as path: # Test np.bool values read come back as float. frame = (DataFrame([1, 0, True, False], dtype=np_type)) frame.to_excel(path, 'test1') reader = ExcelFile(path) recons = reader.parse('test1').astype(np_type) tm.assert_frame_equal(frame, recons)
Example 5
Project: qkit Author: qkitgroup File: dll_support_nidaq.py License: GNU General Public License v2.0 | 5 votes |
def write_dig_port_stream(channel, samples, rate, timeout=1.0): ''' Output a binary waveform Input: channel - channels to output data to samples - a 2d numpy array of booleans, 2nd index is the channel rate - sample rate in Hz ''' taskHandle = TaskHandle(0) try: CHK(nidaq.DAQmxCreateTask("", ctypes.byref(taskHandle))) CHK(nidaq.DAQmxCreateDOChan(taskHandle, channel, '', DAQmx_Val_ChanForAllLines)) nwritten = int32(0) vals = numpy.array(samples, numpy.bool8) if (vals.ndim == 1): nSamples = 1 else: nSamples = vals.shape[0] # DAQmxSetSampClkSrc(taskHandle, "") CHK(nidaq.DAQmxCfgSampClkTiming(taskHandle, "", float64(rate), DAQmx_Val_Rising, DAQmx_Val_FiniteSamps, uInt64(nSamples))) nbytes = int32(0) CHK(nidaq.DAQmxGetWriteDigitalLinesBytesPerChan(taskHandle, ctypes.byref(nbytes))) CHK(nidaq.DAQmxWriteDigitalLines(taskHandle, int32(nSamples), int32(1), float64(1.0), int32(DAQmx_Val_GroupByChannel), vals.ctypes.data, ctypes.byref(nwritten), None)) CHK(nidaq.DAQmxStartTask(taskHandle)) except Exception, e: logging.error('NI DAQ call failed: %s', str(e))
Example 6
Project: deep-image-retrieval Author: almazan File: dataset.py License: BSD 3-Clause "New" or "Revised" License | 5 votes |
def eval_query_top(self, query_idx, scores, k=(1,5,10,20,50,100)): """ Evaluates top-k for a given query. """ if not self.labels: raise NotImplementedError() q_label = self.get_query_groundtruth(query_idx, 'label') correct = np.bool8([l==q_label for l in self.labels]) correct = correct[(-scores).argsort()] return {k_:float(correct[:k_].any()) for k_ in k if k_<len(correct)}
Example 7
Project: dask-image Author: dask File: _utils.py License: BSD 3-Clause "New" or "Revised" License | 5 votes |
def _get_structure(image, structure): # Create square connectivity as default if structure is None: structure = scipy.ndimage.generate_binary_structure(image.ndim, 1) elif isinstance(structure, (numpy.ndarray, dask.array.Array)): if structure.ndim != image.ndim: raise RuntimeError( "`structure` must have the same rank as `image`." ) if not issubclass(structure.dtype.type, numpy.bool8): structure = (structure != 0) else: raise TypeError("`structure` must be an array.") return structure
Example 8
Project: elasticintel Author: securityclippy File: test_excel.py License: GNU General Public License v3.0 | 5 votes |
def test_bool_types(self): _skip_if_no_xlrd() for np_type in (np.bool8, np.bool_): with ensure_clean(self.ext) as path: # Test np.bool values read come back as float. frame = (DataFrame([1, 0, True, False], dtype=np_type)) frame.to_excel(path, 'test1') reader = ExcelFile(path) recons = read_excel(reader, 'test1').astype(np_type) tm.assert_frame_equal(frame, recons)
Example 9
Project: vnpy_crypto Author: birforce File: multicomp.py License: MIT License | 4 votes |
def tukeyhsd(self, alpha=0.05): """Tukey's range test to compare means of all pairs of groups Parameters ---------- alpha : float, optional Value of FWER at which to calculate HSD. Returns ------- results : TukeyHSDResults instance A results class containing relevant data and some post-hoc calculations """ self.groupstats = GroupsStats( np.column_stack([self.data, self.groupintlab]), useranks=False) gmeans = self.groupstats.groupmean gnobs = self.groupstats.groupnobs #var_ = self.groupstats.groupvarwithin() #possibly an error in varcorrection in this case var_ = np.var(self.groupstats.groupdemean(), ddof=len(gmeans)) #res contains: 0:(idx1, idx2), 1:reject, 2:meandiffs, 3: std_pairs, 4:confint, 5:q_crit, #6:df_total, 7:reject2 res = tukeyhsd(gmeans, gnobs, var_, df=None, alpha=alpha, q_crit=None) resarr = np.array(lzip(self.groupsunique[res[0][0]], self.groupsunique[res[0][1]], np.round(res[2],4), np.round(res[4][:, 0],4), np.round(res[4][:, 1],4), res[1]), dtype=[('group1', object), ('group2', object), ('meandiff',float), ('lower',float), ('upper',float), ('reject', np.bool8)]) results_table = SimpleTable(resarr, headers=resarr.dtype.names) results_table.title = 'Multiple Comparison of Means - Tukey HSD,' + \ 'FWER=%4.2f' % alpha return TukeyHSDResults(self, results_table, res[5], res[1], res[2], res[3], res[4], res[6], res[7], var_)
Example 10
Project: allantools Author: aewallin File: allantools.py License: GNU Lesser General Public License v3.0 | 4 votes |
def tau_reduction(ms, rate, n_per_decade): """Reduce the number of taus to maximum of n per decade (Helper function) takes in a tau list and reduces the number of taus to a maximum amount per decade. This is only useful if more than the "decade" and "octave" but less than the "all" taus are wanted. E.g. to show certain features of the data one might want 100 points per decade. NOTE: The algorithm is slightly inaccurate for ms under n_per_decade, and will also remove some points in this range, which is usually fine. Typical use would be something like: (data,m,taus)=tau_generator(data,rate,taus="all") (m,taus)=tau_reduction(m,rate,n_per_decade) Parameters ---------- ms: array of integers List of m values (assumed to be an "all" list) to remove points from. rate: float Sample rate of data in Hz. Time interval between measurements is 1/rate seconds. Used to convert to taus. n_per_decade: int Number of ms/taus to keep per decade. Returns ------- m: np.array Reduced list of m values taus: np.array Reduced list of tau values """ ms = np.int64(ms) keep = np.bool8(np.rint(n_per_decade*np.log10(ms[1:])) - np.rint(n_per_decade*np.log10(ms[:-1]))) # Adjust ms size to fit above-defined mask ms = ms[:-1] assert len(ms) == len(keep) ms = ms[keep] taus = ms/float(rate) return ms, taus
Example 11
Project: Splunking-Crime Author: nccgroup File: multicomp.py License: GNU Affero General Public License v3.0 | 4 votes |
def tukeyhsd(self, alpha=0.05): """Tukey's range test to compare means of all pairs of groups Parameters ---------- alpha : float, optional Value of FWER at which to calculate HSD. Returns ------- results : TukeyHSDResults instance A results class containing relevant data and some post-hoc calculations """ self.groupstats = GroupsStats( np.column_stack([self.data, self.groupintlab]), useranks=False) gmeans = self.groupstats.groupmean gnobs = self.groupstats.groupnobs #var_ = self.groupstats.groupvarwithin() #possibly an error in varcorrection in this case var_ = np.var(self.groupstats.groupdemean(), ddof=len(gmeans)) #res contains: 0:(idx1, idx2), 1:reject, 2:meandiffs, 3: std_pairs, 4:confint, 5:q_crit, #6:df_total, 7:reject2 res = tukeyhsd(gmeans, gnobs, var_, df=None, alpha=alpha, q_crit=None) resarr = np.array(lzip(self.groupsunique[res[0][0]], self.groupsunique[res[0][1]], np.round(res[2],4), np.round(res[4][:, 0],4), np.round(res[4][:, 1],4), res[1]), dtype=[('group1', object), ('group2', object), ('meandiff',float), ('lower',float), ('upper',float), ('reject', np.bool8)]) results_table = SimpleTable(resarr, headers=resarr.dtype.names) results_table.title = 'Multiple Comparison of Means - Tukey HSD,' + \ 'FWER=%4.2f' % alpha return TukeyHSDResults(self, results_table, res[5], res[1], res[2], res[3], res[4], res[6], res[7], var_)
Example 12
Project: eo-learn Author: sentinel-hub File: eodata.py License: MIT License | 4 votes |
def _parse_feature_value(self, value): """ Checks if value fits the feature type. If not it tries to fix it or raise an error :raises: ValueError """ if isinstance(value, FeatureIO): return value if not hasattr(self, 'ndim'): # Because of serialization/deserialization during multiprocessing return value if self.ndim: if not isinstance(value, np.ndarray): raise ValueError('{} feature has to be a numpy array'.format(self.feature_type)) if value.ndim != self.ndim: raise ValueError('Numpy array of {} feature has to have {} ' 'dimension{}'.format(self.feature_type, self.ndim, 's' if self.ndim > 1 else '')) if self.feature_type.is_discrete(): if not issubclass(value.dtype.type, (np.integer, np.bool, np.bool_, np.bool8)): msg = '{} is a discrete feature type therefore dtype of data should be a subtype of ' \ 'numpy.integer or numpy.bool, found type {}. In the future an error will be raised because' \ 'of this'.format(self.feature_type, value.dtype.type) warnings.warn(msg, DeprecationWarning, stacklevel=3) # raise ValueError('{} is a discrete feature type therefore dtype of data has to be a subtype of ' # 'numpy.integer or numpy.bool, found type {}'.format(self.feature_type, # value.dtype.type)) # This checking is disabled for now # else: # if not issubclass(value.dtype.type, (np.floating, np.float)): # raise ValueError('{} is a floating feature type therefore dtype of data has to be a subtype of ' # 'numpy.floating or numpy.float, found type {}'.format(self.feature_type, # value.dtype.type)) return value if self.is_vector: if isinstance(value, gpd.GeoSeries): value = gpd.GeoDataFrame(dict(geometry=value), crs=value.crs) if isinstance(value, gpd.GeoDataFrame): if self.feature_type is FeatureType.VECTOR: if FeatureType.TIMESTAMP.value.upper() not in value: raise ValueError("{} feature has to contain a column 'TIMESTAMP' with " "timestamps".format(self.feature_type)) return value raise ValueError('{} feature works with data of type {}, parsing data type {} is not supported' 'given'.format(self.feature_type, gpd.GeoDataFrame.__name__, type(value))) return value