Python numpy.nan() Examples
The following are 30
code examples of numpy.nan().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.

Example #1
Source File: losses_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testReturnsCorrectNanLoss(self): batch_size = 3 num_anchors = 10 code_size = 4 prediction_tensor = tf.ones([batch_size, num_anchors, code_size]) target_tensor = tf.concat([ tf.zeros([batch_size, num_anchors, code_size / 2]), tf.ones([batch_size, num_anchors, code_size / 2]) * np.nan ], axis=2) weights = tf.ones([batch_size, num_anchors]) loss_op = losses.WeightedL2LocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights, ignore_nan_targets=True) expected_loss = (3 * 5 * 4) / 2.0 with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, expected_loss)
Example #2
Source File: test_scipy_hungarian.py From QCElemental with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_linear_sum_assignment_input_validation(): assert_raises(ValueError, linear_sum_assignment, [1, 2, 3]) C = [[1, 2, 3], [4, 5, 6]] assert_array_equal(linear_sum_assignment(C), linear_sum_assignment(np.asarray(C))) # assert_array_equal(linear_sum_assignment(C), # linear_sum_assignment(matrix(C))) I = np.identity(3) assert_array_equal(linear_sum_assignment(I.astype(np.bool)), linear_sum_assignment(I)) assert_raises(ValueError, linear_sum_assignment, I.astype(str)) I[0][0] = np.nan assert_raises(ValueError, linear_sum_assignment, I) I = np.identity(3) I[1][1] = np.inf assert_raises(ValueError, linear_sum_assignment, I)
Example #3
Source File: OutlierDetection.py From sparse-subspace-clustering-python with MIT License | 6 votes |
def OutlierDetection(CMat, s): n = np.amax(s) _, N = CMat.shape OutlierIndx = list() FailCnt = 0 Fail = False for i in range(0, N): c = CMat[:, i] if np.sum(np.isnan(c)) >= 1: OutlierIndx.append(i) FailCnt += 1 sc = s.astype(float) sc[OutlierIndx] = np.nan CMatC = CMat.astype(float) CMatC[OutlierIndx, :] = np.nan CMatC[:, OutlierIndx] = np.nan OutlierIndx = OutlierIndx if FailCnt > (N - n): CMatC = np.nan sc = np.nan Fail = True return CMatC, sc, OutlierIndx, Fail
Example #4
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def apply_cmap(zs, cmap, vmin=None, vmax=None, unit=None, logrescale=False): ''' apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmax are passed, they are used to scale z. Note that this function can automatically rescale data into log-space if the colormap is a neuropythy log-space colormap such as log_eccentricity. To enable this behaviour use the optional argument logrescale=True. ''' zs = pimms.mag(zs) if unit is None else pimms.mag(zs, unit) zs = np.asarray(zs, dtype='float') if pimms.is_str(cmap): cmap = matplotlib.cm.get_cmap(cmap) if logrescale: if vmin is None: vmin = np.log(np.nanmin(zs)) if vmax is None: vmax = np.log(np.nanmax(zs)) mn = np.exp(vmin) u = zdivide(nanlog(zs + mn) - vmin, vmax - vmin, null=np.nan) else: if vmin is None: vmin = np.nanmin(zs) if vmax is None: vmax = np.nanmax(zs) u = zdivide(zs - vmin, vmax - vmin, null=np.nan) u[np.isnan(u)] = -np.inf return cmap(u)
Example #5
Source File: utils.py From pruning_yolov3 with GNU General Public License v3.0 | 6 votes |
def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_results_overlay() # Plot training results files 'results*.txt', overlaying train and val losses s = ['train', 'train', 'train', 'Precision', 'mAP', 'val', 'val', 'val', 'Recall', 'F1'] # legends t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T n = results.shape[1] # number of rows x = range(start, min(stop, n) if stop else n) fig, ax = plt.subplots(1, 5, figsize=(14, 3.5)) ax = ax.ravel() for i in range(5): for j in [i, i + 5]: y = results[j, x] if i in [0, 1, 2]: y[y == 0] = np.nan # dont show zero loss values ax[i].plot(x, y, marker='.', label=s[j]) ax[i].set_title(t[i]) ax[i].legend() ax[i].set_ylabel(f) if i == 0 else None # add filename fig.tight_layout() fig.savefig(f.replace('.txt', '.png'), dpi=200)
Example #6
Source File: stat_calc_functions.py From nba_scraper with GNU General Public License v3.0 | 6 votes |
def parse_shot_types(row): """ function to parse what type of shot is being taken Inputs: row - pandas row of play by play dataframe Outputs: shot_type - returns a shot type of the values hook, jump, layup, dunk, tip """ try: if row["eventmsgtype"] in [1, 2, 3]: return SHOT_DICT[row["eventmsgtype"]][row["eventmsgactiontype"]] else: return np.nan except KeyError: return np.nan
Example #7
Source File: stat_calc_functions.py From nba_scraper with GNU General Public License v3.0 | 6 votes |
def wnba_shot_types(row): """ function to parse what type of shot is being taken Inputs: row - pandas row of play by play dataframe Outputs: shot_type - returns a shot type of the values hook, jump, layup, dunk, tip """ try: if row["etype"] in [1, 2, 3]: return SHOT_DICT[row["etype"]][row["mtype"]] else: return np.nan except KeyError: return np.nan
Example #8
Source File: stat_calc_functions.py From nba_scraper with GNU General Public License v3.0 | 6 votes |
def parse_foul(row): """ function to determine what type of foul is being commited by the player Input: row - row of nba play by play Output: foul_type - the foul type of the fould commited by the player """ try: if row["eventmsgtype"] == 6: try: return foul_dict[row["eventmsgactiontype"]] except KeyError: return np.nan return np.nan except KeyError: return np.nan
Example #9
Source File: stat_calc_functions.py From nba_scraper with GNU General Public License v3.0 | 6 votes |
def wnba_parse_foul(row): """ function to determine what type of foul is being commited by the player Input: row - row of nba play by play Output: foul_type - the foul type of the fould commited by the player """ try: if row["etype"] == 6: try: return foul_dict[row["mtype"]] except KeyError: return np.nan return np.nan except KeyError: return np.nan
Example #10
Source File: utils.py From pruning_yolov3 with GNU General Public License v3.0 | 6 votes |
def plot_results(start=0, stop=0): # from utils.utils import *; plot_results() # Plot training results files 'results*.txt' fig, ax = plt.subplots(2, 5, figsize=(14, 7)) ax = ax.ravel() s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall', 'val GIoU', 'val Objectness', 'val Classification', 'mAP', 'F1'] for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T n = results.shape[1] # number of rows x = range(start, min(stop, n) if stop else n) for i in range(10): y = results[i, x] if i in [0, 1, 2, 5, 6, 7]: y[y == 0] = np.nan # dont show zero loss values ax[i].plot(x, y, marker='.', label=f.replace('.txt', '')) ax[i].set_title(s[i]) if i in [5, 6, 7]: # share train and val loss y axes ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) fig.tight_layout() ax[1].legend() fig.savefig('results.png', dpi=200)
Example #11
Source File: test_nistats.py From NiBetaSeries with MIT License | 6 votes |
def test_select_confounds_error(confounds_file, tmp_path): import pandas as pd import numpy as np confounds_df = pd.read_csv(str(confounds_file), sep='\t', na_values='n/a') confounds_df['white_matter'][0] = np.nan conf_file = tmp_path / "confounds.tsv" confounds_df.to_csv(str(conf_file), index=False, sep='\t', na_rep='n/a') with pytest.raises(ValueError) as val_err: _select_confounds(str(conf_file), ['white_matter', 'csf']) assert "The selected confounds contain nans" in str(val_err.value)
Example #12
Source File: object_detection_evaluation.py From DOTA_models with Apache License 2.0 | 6 votes |
def __init__(self, num_groundtruth_classes, matching_iou_threshold=0.5, nms_iou_threshold=1.0, nms_max_output_boxes=10000): self.per_image_eval = per_image_evaluation.PerImageEvaluation( num_groundtruth_classes, matching_iou_threshold, nms_iou_threshold, nms_max_output_boxes) self.num_class = num_groundtruth_classes self.groundtruth_boxes = {} self.groundtruth_class_labels = {} self.groundtruth_is_difficult_list = {} self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=int) self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int) self.detection_keys = set() self.scores_per_class = [[] for _ in range(self.num_class)] self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)] self.num_images_correctly_detected_per_class = np.zeros(self.num_class) self.average_precision_per_class = np.empty(self.num_class, dtype=float) self.average_precision_per_class.fill(np.nan) self.precisions_per_class = [] self.recalls_per_class = [] self.corloc_per_class = np.ones(self.num_class, dtype=float)
Example #13
Source File: metrics.py From object_detector_app with MIT License | 6 votes |
def compute_cor_loc(num_gt_imgs_per_class, num_images_correctly_detected_per_class): """Compute CorLoc according to the definition in the following paper. https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf Returns nans if there are no ground truth images for a class. Args: num_gt_imgs_per_class: 1D array, representing number of images containing at least one object instance of a particular class num_images_correctly_detected_per_class: 1D array, representing number of images that are correctly detected at least one object instance of a particular class Returns: corloc_per_class: A float numpy array represents the corloc score of each class """ return np.where( num_gt_imgs_per_class == 0, np.nan, num_images_correctly_detected_per_class / num_gt_imgs_per_class)
Example #14
Source File: sfd.py From dustmaps with GNU General Public License v2.0 | 6 votes |
def query(self, coords, order=1): """ Returns the map value at the specified location(s) on the sky. Args: coords (`astropy.coordinates.SkyCoord`): The coordinates to query. order (Optional[int]): Interpolation order to use. Defaults to `1`, for linear interpolation. Returns: A float array containing the map value at every input coordinate. The shape of the output will be the same as the shape of the coordinates stored by `coords`. """ out = np.full(len(coords.l.deg), np.nan, dtype='f4') for pole in self.poles: m = (coords.b.deg >= 0) if pole == 'ngp' else (coords.b.deg < 0) if np.any(m): data, w = self._data[pole] x, y = w.wcs_world2pix(coords.l.deg[m], coords.b.deg[m], 0) out[m] = map_coordinates(data, [y, x], order=order, mode='nearest') return out
Example #15
Source File: object_detection_evaluation.py From object_detector_app with MIT License | 6 votes |
def __init__(self, num_groundtruth_classes, matching_iou_threshold=0.5, nms_iou_threshold=1.0, nms_max_output_boxes=10000): self.per_image_eval = per_image_evaluation.PerImageEvaluation( num_groundtruth_classes, matching_iou_threshold, nms_iou_threshold, nms_max_output_boxes) self.num_class = num_groundtruth_classes self.groundtruth_boxes = {} self.groundtruth_class_labels = {} self.groundtruth_is_difficult_list = {} self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=int) self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int) self.detection_keys = set() self.scores_per_class = [[] for _ in range(self.num_class)] self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)] self.num_images_correctly_detected_per_class = np.zeros(self.num_class) self.average_precision_per_class = np.empty(self.num_class, dtype=float) self.average_precision_per_class.fill(np.nan) self.precisions_per_class = [] self.recalls_per_class = [] self.corloc_per_class = np.ones(self.num_class, dtype=float)
Example #16
Source File: technical_indicators.py From pandas-technical-indicators with MIT License | 6 votes |
def trix(df, n): """Calculate TRIX for given data. :param df: pandas.DataFrame :param n: :return: pandas.DataFrame """ EX1 = df['Close'].ewm(span=n, min_periods=n).mean() EX2 = EX1.ewm(span=n, min_periods=n).mean() EX3 = EX2.ewm(span=n, min_periods=n).mean() i = 0 ROC_l = [np.nan] while i + 1 <= df.index[-1]: ROC = (EX3[i + 1] - EX3[i]) / EX3[i] ROC_l.append(ROC) i = i + 1 Trix = pd.Series(ROC_l, name='Trix_' + str(n)) df = df.join(Trix) return df
Example #17
Source File: metrics.py From DOTA_models with Apache License 2.0 | 6 votes |
def compute_cor_loc(num_gt_imgs_per_class, num_images_correctly_detected_per_class): """Compute CorLoc according to the definition in the following paper. https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf Returns nans if there are no ground truth images for a class. Args: num_gt_imgs_per_class: 1D array, representing number of images containing at least one object instance of a particular class num_images_correctly_detected_per_class: 1D array, representing number of images that are correctly detected at least one object instance of a particular class Returns: corloc_per_class: A float numpy array represents the corloc score of each class """ return np.where( num_gt_imgs_per_class == 0, np.nan, num_images_correctly_detected_per_class / num_gt_imgs_per_class)
Example #18
Source File: logger.py From cs294-112_hws with MIT License | 5 votes |
def record_tabular(self, key, val): assert (str(key) not in self._curr_recorded) self._curr_recorded.append(str(key)) if key in self._tabular: self._tabular[key].append(val) else: self._tabular[key] = [np.nan] * self._num_dump_tabular_calls + [val]
Example #19
Source File: plot.py From HardRLWithYoutube with MIT License | 5 votes |
def pad(xs, value=np.nan): maxlen = np.max([len(x) for x in xs]) padded_xs = [] for x in xs: if x.shape[0] >= maxlen: padded_xs.append(x) padding = np.ones((maxlen - x.shape[0],) + x.shape[1:]) * value x_padded = np.concatenate([x, padding], axis=0) assert x_padded.shape[1:] == x.shape[1:] assert x_padded.shape[0] == maxlen padded_xs.append(x_padded) return np.array(padded_xs)
Example #20
Source File: metrics_test.py From object_detector_app with MIT License | 5 votes |
def test_compute_cor_loc_nans(self): num_gt_imgs_per_class = np.array([100, 0, 0, 1, 1], dtype=int) num_images_correctly_detected_per_class = np.array([10, 0, 1, 0, 0], dtype=int) corloc = metrics.compute_cor_loc(num_gt_imgs_per_class, num_images_correctly_detected_per_class) expected_corloc = np.array([0.1, np.nan, np.nan, 0, 0], dtype=float) self.assertAllClose(corloc, expected_corloc)
Example #21
Source File: prepare.py From DeepLung with GNU General Public License v3.0 | 5 votes |
def binarize_per_slice(image, spacing, intensity_th=-600, sigma=1, area_th=30, eccen_th=0.99, bg_patch_size=10): bw = np.zeros(image.shape, dtype=bool) # prepare a mask, with all corner values set to nan image_size = image.shape[1] grid_axis = np.linspace(-image_size/2+0.5, image_size/2-0.5, image_size) x, y = np.meshgrid(grid_axis, grid_axis) d = (x**2+y**2)**0.5 nan_mask = (d<image_size/2).astype(float) nan_mask[nan_mask == 0] = np.nan for i in range(image.shape[0]): # Check if corner pixels are identical, if so the slice before Gaussian filtering if len(np.unique(image[i, 0:bg_patch_size, 0:bg_patch_size])) == 1: current_bw = scipy.ndimage.filters.gaussian_filter(np.multiply(image[i].astype('float32'), nan_mask), sigma, truncate=2.0) < intensity_th else: current_bw = scipy.ndimage.filters.gaussian_filter(image[i].astype('float32'), sigma, truncate=2.0) < intensity_th # select proper components label = measure.label(current_bw) properties = measure.regionprops(label) valid_label = set() for prop in properties: if prop.area * spacing[1] * spacing[2] > area_th and prop.eccentricity < eccen_th: valid_label.add(prop.label) current_bw = np.in1d(label, list(valid_label)).reshape(label.shape) bw[i] = current_bw return bw
Example #22
Source File: math_util.py From HardRLWithYoutube with MIT License | 5 votes |
def explained_variance(ypred,y): """ Computes fraction of variance that ypred explains about y. Returns 1 - Var[y-ypred] / Var[y] interpretation: ev=0 => might as well have predicted zero ev=1 => perfect prediction ev<0 => worse than just predicting zero """ assert y.ndim == 1 and ypred.ndim == 1 vary = np.var(y) return np.nan if vary==0 else 1 - np.var(y-ypred)/vary
Example #23
Source File: a2c.py From lirpg with MIT License | 5 votes |
def safemean(xs): return np.nan if len(xs) == 0 else np.mean(xs)
Example #24
Source File: test_recorders.py From pywr with GNU General Public License v3.0 | 5 votes |
def test_statistic_recorder(self, cyclical_storage_model, recorder_agg_func): """ Test EventStatisticRecorder """ m = cyclical_storage_model strg = m.nodes['Storage'] inpt = m.nodes['Input'] arry = NumpyArrayNodeRecorder(m, inpt) # Create the trigger using a threhsold parameter trigger = StorageThresholdRecorder(m, strg, 4.0, predicate='<=') evt_rec = EventRecorder(m, trigger, tracked_parameter=inpt.max_flow) evt_stat = EventStatisticRecorder(m, evt_rec, agg_func='max', event_agg_func='min', recorder_agg_func=recorder_agg_func) m.run() # Ensure there is at least one event assert evt_rec.events evt_values = {si.global_id:[] for si in m.scenarios.combinations} for evt in evt_rec.events: evt_values[evt.scenario_index.global_id].append(np.min(arry.data[evt.start.index:evt.end.index, evt.scenario_index.global_id])) func = TestEventRecorder.funcs[recorder_agg_func] agg_evt_values = [] for k, v in sorted(evt_values.items()): if len(v) > 0: agg_evt_values.append(func(v)) else: agg_evt_values.append(np.nan) # Test that the assert_allclose(evt_stat.values(), agg_evt_values) assert_allclose(evt_stat.aggregated_value(), np.max(agg_evt_values))
Example #25
Source File: util.py From End-to-end-ASR-Pytorch with MIT License | 5 votes |
def cal_er(tokenizer, pred, truth, mode='wer', ctc=False): # Calculate error rate of a batch if pred is None: return np.nan elif len(pred.shape) >= 3: pred = pred.argmax(dim=-1) er = [] for p, t in zip(pred, truth): p = tokenizer.decode(p.tolist(), ignore_repeat=ctc) t = tokenizer.decode(t.tolist()) if mode == 'wer': p = p.split(' ') t = t.split(' ') er.append(float(ed.eval(p, t))/len(t)) return sum(er)/len(er)
Example #26
Source File: logger.py From cs294-112_hws with MIT License | 5 votes |
def dump_tabular(self, print_func=None): if len(self._curr_recorded) == 0: return '' ### reset self._curr_recorded = list() self._num_dump_tabular_calls += 1 ### make sure all same length for k, v in self._tabular.items(): if len(v) == self._num_dump_tabular_calls: pass elif len(v) == self._num_dump_tabular_calls - 1: self._tabular[k].append(np.nan) else: raise ValueError('key {0} should not have {1} items when {2} calls have been made'.format( k, len(v), self._num_dump_tabular_calls)) ### print if print_func is not None: log_str = tabulate(sorted([(k, v[-1]) for k, v in self._tabular.items()], key=lambda kv: kv[0])) for line in log_str.split('\n'): print_func(line) ### write to file tabular_pandas = pandas.DataFrame({k: pandas.Series(v) for k, v in self._tabular.items()}) tabular_pandas.to_csv(self._csv_path)
Example #27
Source File: test_region.py From aospy with Apache License 2.0 | 5 votes |
def values_for_reg_arr(): return np.array([[-2., 1.], [np.nan, 5.], [3., 3.], [4., 4.2]])
Example #28
Source File: function_helper.py From TradzQAI with Apache License 2.0 | 5 votes |
def fill_for_noncomputable_vals(input_data, result_data): non_computable_values = np.repeat( np.nan, len(input_data) - len(result_data) ) filled_result_data = np.append(non_computable_values, result_data) return filled_result_data
Example #29
Source File: preprocessor_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def expectedLabelScoresAfterThresholdingWithMissingScore(self): return tf.constant([np.nan], dtype=tf.float32)
Example #30
Source File: preprocessor_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def testStrictRandomCropImageWithKeypoints(self): image = self.createColorfulTestImage()[0] boxes = self.createTestBoxes() labels = self.createTestLabels() keypoints = self.createTestKeypoints() with mock.patch.object( tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = ( tf.constant([6, 143, 0], dtype=tf.int32), tf.constant([190, 237, -1], dtype=tf.int32), tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) (new_image, new_boxes, new_labels, new_keypoints) = preprocessor._strict_random_crop_image( image, boxes, labels, keypoints=keypoints) with self.test_session() as sess: new_image, new_boxes, new_labels, new_keypoints = sess.run([ new_image, new_boxes, new_labels, new_keypoints]) expected_boxes = np.array([ [0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0], ], dtype=np.float32) expected_keypoints = np.array([ [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], [[0.38947368, 0.07173], [0.49473682, 0.24050637], [0.60000002, 0.40928277]] ], dtype=np.float32) self.assertAllEqual(new_image.shape, [190, 237, 3]) self.assertAllClose( new_boxes.flatten(), expected_boxes.flatten()) self.assertAllClose( new_keypoints.flatten(), expected_keypoints.flatten())