Python numpy.NaN() Examples
The following are 30 code examples for showing how to use numpy.NaN(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example 1
Project: DOTA_models Author: ringringyi File: swiftshader_renderer.py License: Apache License 2.0 | 6 votes |
def render(self, take_screenshot=False, output_type=0): # self.render_timer.tic() self._actual_render() # self.render_timer.toc(log_at=1000, log_str='render timer', type='time') np_rgb_img = None np_d_img = None c = 1000. if take_screenshot: if self.modality == 'rgb': screenshot_rgba = np.zeros((self.height, self.width, 4), dtype=np.uint8) glReadPixels(0, 0, self.width, self.height, GL_RGBA, GL_UNSIGNED_BYTE, screenshot_rgba) np_rgb_img = screenshot_rgba[::-1,:,:3]; if self.modality == 'depth': screenshot_d = np.zeros((self.height, self.width, 4), dtype=np.uint8) glReadPixels(0, 0, self.width, self.height, GL_RGBA, GL_UNSIGNED_BYTE, screenshot_d) np_d_img = screenshot_d[::-1,:,:3]; np_d_img = np_d_img[:,:,2]*(255.*255./c) + np_d_img[:,:,1]*(255./c) + np_d_img[:,:,0]*(1./c) np_d_img = np_d_img.astype(np.float32) np_d_img[np_d_img == 0] = np.NaN np_d_img = np_d_img[:,:,np.newaxis] glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) return np_rgb_img, np_d_img
Example 2
Project: pysat Author: pysat File: test_meta.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_basic_equality(self): self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'} self.meta['new2'] = {'units': 'hey', 'long_name': 'boo', 'description': 'boohoo', 'fill': np.NaN} # ensure things are the same meta2 = self.meta.copy() assert (meta2 == self.meta) # different way to create meta object meta3 = pysat.Meta() meta3['new1'] = self.meta['new1'] meta3['new2'] = self.meta['new2'] assert (meta3 == self.meta) # make sure differences matter self.meta['new2'] = {'fill': 1} assert not (meta2 == self.meta)
Example 3
Project: arctic Author: man-group File: numpy_records.py License: GNU Lesser General Public License v2.1 | 6 votes |
def _to_primitive(arr, string_max_len=None, forced_dtype=None): if arr.dtype.hasobject: if len(arr) > 0 and isinstance(arr[0], Timestamp): return np.array([t.value for t in arr], dtype=DTN64_DTYPE) if forced_dtype is not None: casted_arr = arr.astype(dtype=forced_dtype, copy=False) elif string_max_len is not None: casted_arr = np.array(arr.astype('U{:d}'.format(string_max_len))) else: casted_arr = np.array(list(arr)) # Pick any unwanted data conversions (e.g. np.NaN to 'nan') if np.array_equal(arr, casted_arr): return casted_arr return arr
Example 4
Project: recruit Author: Frank-qlu File: test_decimal.py License: Apache License 2.0 | 6 votes |
def assert_series_equal(self, left, right, *args, **kwargs): def convert(x): # need to convert array([Decimal(NaN)], dtype='object') to np.NaN # because Series[object].isnan doesn't recognize decimal(NaN) as # NA. try: return math.isnan(x) except TypeError: return False if left.dtype == 'object': left_na = left.apply(convert) else: left_na = left.isna() if right.dtype == 'object': right_na = right.apply(convert) else: right_na = right.isna() tm.assert_series_equal(left_na, right_na) return tm.assert_series_equal(left[~left_na], right[~right_na], *args, **kwargs)
Example 5
Project: recruit Author: Frank-qlu File: test_window.py License: Apache License 2.0 | 6 votes |
def test_rolling_skew_edge_cases(self): all_nan = Series([np.NaN] * 5) # yields all NaN (0 variance) d = Series([1] * 5) x = d.rolling(window=5).skew() tm.assert_series_equal(all_nan, x) # yields all NaN (window too small) d = Series(np.random.randn(5)) x = d.rolling(window=2).skew() tm.assert_series_equal(all_nan, x) # yields [NaN, NaN, NaN, 0.177994, 1.548824] d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401 ]) expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824]) x = d.rolling(window=4).skew() tm.assert_series_equal(expected, x)
Example 6
Project: recruit Author: Frank-qlu File: test_series.py License: Apache License 2.0 | 6 votes |
def test_dense_to_sparse(self): series = self.bseries.to_dense() bseries = series.to_sparse(kind='block') iseries = series.to_sparse(kind='integer') tm.assert_sp_series_equal(bseries, self.bseries) tm.assert_sp_series_equal(iseries, self.iseries, check_names=False) assert iseries.name == self.bseries.name assert len(series) == len(bseries) assert len(series) == len(iseries) assert series.shape == bseries.shape assert series.shape == iseries.shape # non-NaN fill value series = self.zbseries.to_dense() zbseries = series.to_sparse(kind='block', fill_value=0) ziseries = series.to_sparse(kind='integer', fill_value=0) tm.assert_sp_series_equal(zbseries, self.zbseries) tm.assert_sp_series_equal(ziseries, self.ziseries, check_names=False) assert ziseries.name == self.zbseries.name assert len(series) == len(zbseries) assert len(series) == len(ziseries) assert series.shape == zbseries.shape assert series.shape == ziseries.shape
Example 7
Project: recruit Author: Frank-qlu File: test_frame.py License: Apache License 2.0 | 6 votes |
def test_constructor_from_series(self): # GH 2873 x = Series(np.random.randn(10000), name='a') x = x.to_sparse(fill_value=0) assert isinstance(x, SparseSeries) df = SparseDataFrame(x) assert isinstance(df, SparseDataFrame) x = Series(np.random.randn(10000), name='a') y = Series(np.random.randn(10000), name='b') x2 = x.astype(float) x2.loc[:9998] = np.NaN # TODO: x_sparse is unused...fix x_sparse = x2.to_sparse(fill_value=np.NaN) # noqa # Currently fails too with weird ufunc error # df1 = SparseDataFrame([x_sparse, y]) y.loc[:9998] = 0 # TODO: y_sparse is unsused...fix y_sparse = y.to_sparse(fill_value=0) # noqa # without sparse value raises error # df2 = SparseDataFrame([x2_sparse, y])
Example 8
Project: recruit Author: Frank-qlu File: test_astype.py License: Apache License 2.0 | 6 votes |
def test_astype(self): # GH 13149, GH 13209 idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN]) result = idx.astype(object) expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object) tm.assert_index_equal(result, expected) result = idx.astype(int) expected = Int64Index([1463356800000000000] + [-9223372036854775808] * 3, dtype=np.int64) tm.assert_index_equal(result, expected) rng = date_range('1/1/2000', periods=10) result = rng.astype('i8') tm.assert_index_equal(result, Index(rng.asi8)) tm.assert_numpy_array_equal(result.values, rng.asi8)
Example 9
Project: recruit Author: Frank-qlu File: test_astype.py License: Apache License 2.0 | 6 votes |
def test_astype_conversion(self): # GH#13149, GH#13209 idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D') result = idx.astype(object) expected = Index([Period('2016-05-16', freq='D')] + [Period(NaT, freq='D')] * 3, dtype='object') tm.assert_index_equal(result, expected) result = idx.astype(np.int64) expected = Int64Index([16937] + [-9223372036854775808] * 3, dtype=np.int64) tm.assert_index_equal(result, expected) result = idx.astype(str) expected = Index(str(x) for x in idx) tm.assert_index_equal(result, expected) idx = period_range('1990', '2009', freq='A') result = idx.astype('i8') tm.assert_index_equal(result, Index(idx.asi8)) tm.assert_numpy_array_equal(result.values, idx.asi8)
Example 10
Project: recruit Author: Frank-qlu File: test_analytics.py License: Apache License 2.0 | 6 votes |
def test_count(self, datetime_series): assert datetime_series.count() == len(datetime_series) datetime_series[::2] = np.NaN assert datetime_series.count() == np.isfinite(datetime_series).sum() mi = MultiIndex.from_arrays([list('aabbcc'), [1, 2, 2, nan, 1, 2]]) ts = Series(np.arange(len(mi)), index=mi) left = ts.count(level=1) right = Series([2, 3, 1], index=[1, 2, nan]) assert_series_equal(left, right) ts.iloc[[0, 3, 5]] = nan assert_series_equal(ts.count(level=1), right - 1)
Example 11
Project: recruit Author: Frank-qlu File: test_asof.py License: Apache License 2.0 | 6 votes |
def test_scalar(self): N = 30 rng = date_range('1/1/1990', periods=N, freq='53s') ts = Series(np.arange(N), index=rng) ts[5:10] = np.NaN ts[15:20] = np.NaN val1 = ts.asof(ts.index[7]) val2 = ts.asof(ts.index[19]) assert val1 == ts[4] assert val2 == ts[14] # accepts strings val1 = ts.asof(str(ts.index[7])) assert val1 == ts[4] # in there result = ts.asof(ts.index[3]) assert result == ts[3] # no as of value d = ts.index[0] - offsets.BDay() assert np.isnan(ts.asof(d))
Example 12
Project: recruit Author: Frank-qlu File: test_constructors.py License: Apache License 2.0 | 6 votes |
def test_fromValue(self, datetime_series): nans = Series(np.NaN, index=datetime_series.index) assert nans.dtype == np.float_ assert len(nans) == len(datetime_series) strings = Series('foo', index=datetime_series.index) assert strings.dtype == np.object_ assert len(strings) == len(datetime_series) d = datetime.now() dates = Series(d, index=datetime_series.index) assert dates.dtype == 'M8[ns]' assert len(dates) == len(datetime_series) # GH12336 # Test construction of categorical series from value categorical = Series(0, index=datetime_series.index, dtype="category") expected = Series(0, index=datetime_series.index).astype("category") assert categorical.dtype == 'category' assert len(categorical) == len(datetime_series) tm.assert_series_equal(categorical, expected)
Example 13
Project: recruit Author: Frank-qlu File: test_reshape.py License: Apache License 2.0 | 6 votes |
def test_unstack_to_series(self): # check reversibility data = self.frame.unstack() assert isinstance(data, Series) undo = data.unstack().T assert_frame_equal(undo, self.frame) # check NA handling data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]}) data.index = Index(['a', 'b', 'c']) result = data.unstack() midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']], codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]) expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx) assert_series_equal(result, expected) # check composability of unstack old_data = data.copy() for _ in range(4): data = data.unstack() assert_frame_equal(old_data, data)
Example 14
Project: recruit Author: Frank-qlu File: test_reshape.py License: Apache License 2.0 | 6 votes |
def test_unstack_fill_frame_object(): # GH12815 Test unstacking with object. data = pd.Series(['a', 'b', 'c', 'a'], dtype='object') data.index = pd.MultiIndex.from_tuples( [('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')]) # By default missing values will be NaN result = data.unstack() expected = pd.DataFrame( {'a': ['a', np.nan, 'a'], 'b': ['b', 'c', np.nan]}, index=list('xyz') ) assert_frame_equal(result, expected) # Fill with any value replaces missing values as expected result = data.unstack(fill_value='d') expected = pd.DataFrame( {'a': ['a', 'd', 'a'], 'b': ['b', 'c', 'd']}, index=list('xyz') ) assert_frame_equal(result, expected)
Example 15
Project: recruit Author: Frank-qlu File: test_missing.py License: Apache License 2.0 | 6 votes |
def test_isna_lists(self): result = isna([[False]]) exp = np.array([[False]]) tm.assert_numpy_array_equal(result, exp) result = isna([[1], [2]]) exp = np.array([[False], [False]]) tm.assert_numpy_array_equal(result, exp) # list of strings / unicode result = isna(['foo', 'bar']) exp = np.array([False, False]) tm.assert_numpy_array_equal(result, exp) result = isna([u('foo'), u('bar')]) exp = np.array([False, False]) tm.assert_numpy_array_equal(result, exp) # GH20675 result = isna([np.NaN, 'world']) exp = np.array([True, False]) tm.assert_numpy_array_equal(result, exp)
Example 16
Project: RF-Monitor Author: EarToEarOak File: gui.py License: GNU General Public License v2.0 | 5 votes |
def __clear_levels(self): self._levels.fill(numpy.NaN) self.__set_spectrum()
Example 17
Project: NiBetaSeries Author: HBClab File: test_nilearn.py License: MIT License | 5 votes |
def test_atlas_connectivity(betaseries_file, atlas_file, atlas_lut): # read in test files bs_data = nib.load(str(betaseries_file)).get_data() atlas_lut_df = pd.read_csv(str(atlas_lut), sep='\t') # expected output pcorr = np.corrcoef(bs_data.squeeze()) np.fill_diagonal(pcorr, np.NaN) regions = atlas_lut_df['regions'].values pcorr_df = pd.DataFrame(pcorr, index=regions, columns=regions) expected_zcorr_df = pcorr_df.apply(lambda x: (np.log(1 + x) - np.log(1 - x)) * 0.5) # run instance of AtlasConnectivity ac = AtlasConnectivity(timeseries_file=str(betaseries_file), atlas_file=str(atlas_file), atlas_lut=str(atlas_lut)) res = ac.run() output_zcorr_df = pd.read_csv(res.outputs.correlation_matrix, na_values='n/a', delimiter='\t', index_col=0) os.remove(res.outputs.correlation_matrix) # test equality of the matrices up to 3 decimals pd.testing.assert_frame_equal(output_zcorr_df, expected_zcorr_df, check_less_precise=3)
Example 18
Project: DOTA_models Author: ringringyi File: map_utils.py License: Apache License 2.0 | 5 votes |
def get_map_to_predict(src_locs, src_x_axiss, src_y_axiss, map, map_size, interpolation=cv2.INTER_LINEAR): fss = [] valids = [] center = (map_size-1.0)/2.0 dst_theta = np.pi/2.0 dst_loc = np.array([center, center]) dst_x_axis = np.array([np.cos(dst_theta), np.sin(dst_theta)]) dst_y_axis = np.array([np.cos(dst_theta+np.pi/2), np.sin(dst_theta+np.pi/2)]) def compute_points(center, x_axis, y_axis): points = np.zeros((3,2),dtype=np.float32) points[0,:] = center points[1,:] = center + x_axis points[2,:] = center + y_axis return points dst_points = compute_points(dst_loc, dst_x_axis, dst_y_axis) for i in range(src_locs.shape[0]): src_loc = src_locs[i,:] src_x_axis = src_x_axiss[i,:] src_y_axis = src_y_axiss[i,:] src_points = compute_points(src_loc, src_x_axis, src_y_axis) M = cv2.getAffineTransform(src_points, dst_points) fs = cv2.warpAffine(map, M, (map_size, map_size), None, flags=interpolation, borderValue=np.NaN) valid = np.invert(np.isnan(fs)) valids.append(valid) fss.append(fs) return fss, valids
Example 19
Project: DOTA_models Author: ringringyi File: nav_env.py License: Apache License 2.0 | 5 votes |
def image_pre(images, modalities): # Assumes images are ...xHxWxC. # We always assume images are RGB followed by Depth. if 'depth' in modalities: d = images[...,-1][...,np.newaxis]*1. d[d < 0.01] = np.NaN; isnan = np.isnan(d); d = 100./d; d[isnan] = 0.; images = np.concatenate((images[...,:-1], d, isnan), axis=images.ndim-1) if 'rgb' in modalities: images[...,:3] = images[...,:3]*1. - 128 return images
Example 20
Project: Deep_Learning_Weather_Forecasting Author: BruceBinBoxing File: make_dataset_missing_fill.py License: Apache License 2.0 | 5 votes |
def main(input_filepath, obs_file_name, ruitu_file_name, station_id, output_filepath): """ Runs data processing scripts to turn raw data from (../raw) into cleaned data ready to be analyzed (saved in ../processed). """ logger = logging.getLogger(__name__) logger.info('Missing value imputation from %s%s'%(input_filepath,obs_file_name)) assert obs_file_name.split('.')[0].split('_')[-1] == ruitu_file_name.split('.')[0].split('_')[-1], "Error! Both file names must have the same phase info (train, val, test)" # Obs data imputation obs_df = load_pkl(input_filepath, obs_file_name) obs_df.set_index(['sta_id', 'time_index'], inplace=True) obs_df.replace(-9999., np.NaN, inplace=True) print('Simply replace -9999. with np.NaN!') obs_df.fillna(method='ffill', inplace=True) obs_df.fillna(method='bfill', inplace=True) # Ruitu data imputation logger.info('Missing value imputation from %s%s'%(input_filepath,ruitu_file_name)) df_ruitu = load_pkl(input_filepath, ruitu_file_name) df_ruitu.set_index(['sta_id', 'time_index'], inplace=True) df_ruitu.replace(-9999., np.NaN, inplace=True) print('Simply replace -9999. with np.NaN!') df_ruitu.fillna(method='bfill', inplace=True) df_ruitu.fillna(method='ffill', inplace=True) # Make ndarray data for deep models logger.info('Transform imputed dataframes to ndarray ...') ndarray_dic= transform_from_df2ndarray(obs_df=obs_df, ruitu_df=df_ruitu, input_len=74, output_len=37, station_id=station_id, obs_input_only_target_vars=False) phase_flag = obs_file_name.split('.')[0].split('_')[-1] # train, val, OR test assert phase_flag == 'train' or phase_flag == 'test' or phase_flag == 'val', 'phase can only be train, val, OR test; Please reproduce from scrach by running MakeFile script...!' save_pkl(ndarray_dic, output_filepath, '{}_sta_{}.ndarray'.format(phase_flag, station_id))
Example 21
Project: EarthSim Author: pyviz-topics File: io.py License: BSD 3-Clause "New" or "Revised" License | 5 votes |
def open_gssha(filename): """ Reads various filetypes produced by GSSHA """ # Read metadata ftype = filename.split('.')[-1] if ftype in ['fgd', 'asc']: f = open(filename, 'r') c, r, xlc, ylc, gsize, nanval = [ t(f.readline().split(' ')[-1].split('\n')[0]) for t in [int, int, float, float, float, float] ] xs = np.linspace(xlc+gsize/2., xlc+c*gsize-gsize/2., c+1) ys = np.linspace(ylc+gsize/2., ylc+r*gsize-gsize/2., r) else: header_df = pd.read_table(filename, engine='python', names=['meta_key', 'meta_val'], sep=' ', nrows=6) bounds = header_df.loc[:3, 'meta_val'].values.astype(float) r, c = header_df.loc[4:6, 'meta_val'].values.astype(int) xs, ys = get_sampling(bounds, (r, c)) # Read data using dask ddf = dd.read_csv(filename, skiprows=6, header=None, sep=' ') darr = ddf.values.compute() if ftype == 'fgd': darr[darr==nanval] = np.NaN return xr.DataArray(darr[::-1], coords={'x': xs, 'y': ys}, name='z', dims=['y', 'x'])
Example 22
Project: urbansprawl Author: lgervasoni File: dispersion.py License: MIT License | 5 votes |
def closest_building_distance_median( point_ref, tree, df_closest_d, radius_search ): """ Dispersion metric at point_ref Computes the median of the closest distance to another building for each building within a radius search Uses the input KDTree to accelerate calculations Parameters ---------- point_ref : shapely.Point calculate index at input point tree : scipy.spatial.KDTree KDTree of buildings centroid df : pandas.DataFrame data frame of buildings with closest distance calculation radius_search : float circle radius to consider the dispersion calculation at a local point Returns ---------- float value of dispersion at input point """ # Query buildings within radius search indices = tree.query_ball_point( point_ref, radius_search ) # No dispersion value if (len(indices) == 0): return np.NaN # Calculate median of closest distance values. If no information is available, NaN is set return df_closest_d.loc[ indices ].median()
Example 23
Project: urbansprawl Author: lgervasoni File: dispersion.py License: MIT License | 5 votes |
def closest_building_distance_average( point_ref, tree, df_closest_d, radius_search ): """ Dispersion metric at point_ref Computes the mean of the closest distance to another building for each building within a radius search Uses the input KDTree to accelerate calculations Parameters ---------- point_ref : shapely.Point calculate index at input point tree : scipy.spatial.KDTree KDTree of buildings centroid df : pandas.DataFrame data frame of buildings with closest distance calculation radius_search : int circle radius to consider the dispersion calculation at a local point Returns ---------- float value of dispersion at input point """ # Query buildings within radius search indices = tree.query_ball_point( point_ref, radius_search ) # No dispersion value if (len(indices) == 0): return np.NaN # Calculate mean of closest distance values. If no information is available, NaN is set return df_closest_d.loc[ indices ].mean() ############################################################## ### Dispersion indices calculation ##############################################################
Example 24
Project: insightface Author: deepinsight File: triplet_image_iter.py License: MIT License | 5 votes |
def pick_triplets(self, embeddings, nrof_images_per_class): emb_start_idx = 0 triplets = [] people_per_batch = len(nrof_images_per_class) #self.time_reset() pdists = self.pairwise_dists(embeddings) #self.times[3] += self.time_elapsed() for i in range(people_per_batch): nrof_images = int(nrof_images_per_class[i]) for j in range(1,nrof_images): #self.time_reset() a_idx = emb_start_idx + j - 1 #neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1) neg_dists_sqr = pdists[a_idx] #self.times[3] += self.time_elapsed() for pair in range(j, nrof_images): # For every possible positive pair. p_idx = emb_start_idx + pair #self.time_reset() pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx])) #self.times[4] += self.time_elapsed() #self.time_reset() neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN if self.triplet_max_ap>0.0: if pos_dist_sqr>self.triplet_max_ap: continue all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<self.triplet_alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection #self.times[5] += self.time_elapsed() #self.time_reset() #all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0] # VGG Face selecction nrof_random_negs = all_neg.shape[0] if nrof_random_negs>0: rnd_idx = np.random.randint(nrof_random_negs) n_idx = all_neg[rnd_idx] triplets.append( (a_idx, p_idx, n_idx) ) emb_start_idx += nrof_images np.random.shuffle(triplets) return triplets
Example 25
Project: insightface Author: deepinsight File: triplet_image_iter.py License: MIT License | 5 votes |
def pick_triplets(self, embeddings, nrof_images_per_class): emb_start_idx = 0 triplets = [] people_per_batch = len(nrof_images_per_class) #self.time_reset() pdists = self.pairwise_dists(embeddings) #self.times[3] += self.time_elapsed() for i in xrange(people_per_batch): nrof_images = int(nrof_images_per_class[i]) for j in xrange(1,nrof_images): #self.time_reset() a_idx = emb_start_idx + j - 1 #neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1) neg_dists_sqr = pdists[a_idx] #self.times[3] += self.time_elapsed() for pair in xrange(j, nrof_images): # For every possible positive pair. p_idx = emb_start_idx + pair #self.time_reset() pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx])) #self.times[4] += self.time_elapsed() #self.time_reset() neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN if self.triplet_max_ap>0.0: if pos_dist_sqr>self.triplet_max_ap: continue all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<self.triplet_alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection #self.times[5] += self.time_elapsed() #self.time_reset() #all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0] # VGG Face selecction nrof_random_negs = all_neg.shape[0] if nrof_random_negs>0: rnd_idx = np.random.randint(nrof_random_negs) n_idx = all_neg[rnd_idx] triplets.append( (a_idx, p_idx, n_idx) ) emb_start_idx += nrof_images np.random.shuffle(triplets) return triplets
Example 26
Project: insightface Author: deepinsight File: data.py License: MIT License | 5 votes |
def pick_triplets_impl(q_in, q_out): more = True while more: deq = q_in.get() if deq is None: more = False else: embeddings, emb_start_idx, nrof_images, alpha = deq print('running', emb_start_idx, nrof_images, os.getpid()) for j in xrange(1,nrof_images): a_idx = emb_start_idx + j - 1 neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1) for pair in xrange(j, nrof_images): # For every possible positive pair. p_idx = emb_start_idx + pair pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx])) neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection #all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0] # VGG Face selecction nrof_random_negs = all_neg.shape[0] if nrof_random_negs>0: rnd_idx = np.random.randint(nrof_random_negs) n_idx = all_neg[rnd_idx] #triplets.append( (a_idx, p_idx, n_idx) ) q_out.put( (a_idx, p_idx, n_idx) ) #emb_start_idx += nrof_images print('exit',os.getpid())
Example 27
Project: insightface Author: deepinsight File: data.py License: MIT License | 5 votes |
def pick_triplets(self, embeddings, nrof_images_per_class): emb_start_idx = 0 triplets = [] people_per_batch = len(nrof_images_per_class) #self.time_reset() pdists = self.pairwise_dists(embeddings) #self.times[3] += self.time_elapsed() for i in xrange(people_per_batch): nrof_images = int(nrof_images_per_class[i]) for j in xrange(1,nrof_images): #self.time_reset() a_idx = emb_start_idx + j - 1 #neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1) neg_dists_sqr = pdists[a_idx] #self.times[3] += self.time_elapsed() for pair in xrange(j, nrof_images): # For every possible positive pair. p_idx = emb_start_idx + pair #self.time_reset() pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx])) #self.times[4] += self.time_elapsed() #self.time_reset() neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN if self.triplet_max_ap>0.0: if pos_dist_sqr>self.triplet_max_ap: continue all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<self.triplet_alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection #self.times[5] += self.time_elapsed() #self.time_reset() #all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0] # VGG Face selecction nrof_random_negs = all_neg.shape[0] if nrof_random_negs>0: rnd_idx = np.random.randint(nrof_random_negs) n_idx = all_neg[rnd_idx] triplets.append( (a_idx, p_idx, n_idx) ) emb_start_idx += nrof_images np.random.shuffle(triplets) return triplets
Example 28
Project: connecting_the_dots Author: autonomousvision File: plt2d.py License: MIT License | 5 votes |
def depthshow(depth, *args, ax=None, **kwargs): if ax is None: ax = plt.gca() d = depth.copy() d[d < 0] = np.NaN ax.imshow(d, *args, **kwargs)
Example 29
Project: pysat Author: pysat File: _meta.py License: BSD 3-Clause "New" or "Revised" License | 5 votes |
def _insert_default_values(self, input_name): default_str = '' default_nan = np.NaN labels = [self.units_label, self.name_label, self.notes_label, self.desc_label, self.plot_label, self.axis_label, self.scale_label, self.min_label, self.max_label, self.fill_label] defaults = [default_str, input_name, default_str, default_str, input_name, input_name, 'linear', default_nan, default_nan, default_nan] self._data.loc[input_name, labels] = defaults
Example 30
Project: pysat Author: pysat File: _meta.py License: BSD 3-Clause "New" or "Revised" License | 5 votes |
def min_label(self, new_label): self._label_setter(new_label, self._min_label, 'min_label', np.NaN)