Python numpy.nonzero() Examples
The following are 30 code examples for showing how to use numpy.nonzero(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example 1
Project: Parsing-R-CNN Author: soeaver File: vis.py License: MIT License | 7 votes |
def vis_mask(img, mask, bbox_color, show_parss=False): """Visualizes a single binary mask.""" img = img.astype(np.float32) idx = np.nonzero(mask) border_color = cfg.VIS.SHOW_SEGMS.BORDER_COLOR border_thick = cfg.VIS.SHOW_SEGMS.BORDER_THICK mask_color = bbox_color if cfg.VIS.SHOW_SEGMS.MASK_COLOR_FOLLOW_BOX else _WHITE mask_color = np.asarray(mask_color) mask_alpha = cfg.VIS.SHOW_SEGMS.MASK_ALPHA _, contours, _ = cv2.findContours(mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) if cfg.VIS.SHOW_SEGMS.SHOW_BORDER: cv2.drawContours(img, contours, -1, border_color, border_thick, cv2.LINE_AA) if cfg.VIS.SHOW_SEGMS.SHOW_MASK and not show_parss: img[idx[0], idx[1], :] *= 1.0 - mask_alpha img[idx[0], idx[1], :] += mask_alpha * mask_color return img.astype(np.uint8)
Example 2
Project: Parsing-R-CNN Author: soeaver File: vis.py License: MIT License | 7 votes |
def vis_parsing(img, parsing, colormap, show_segms=True): """Visualizes a single binary parsing.""" img = img.astype(np.float32) idx = np.nonzero(parsing) parsing_alpha = cfg.VIS.SHOW_PARSS.PARSING_ALPHA colormap = colormap_utils.dict2array(colormap) parsing_color = colormap[parsing.astype(np.int)] border_color = cfg.VIS.SHOW_PARSS.BORDER_COLOR border_thick = cfg.VIS.SHOW_PARSS.BORDER_THICK img[idx[0], idx[1], :] *= 1.0 - parsing_alpha # img[idx[0], idx[1], :] += alpha * parsing_color img += parsing_alpha * parsing_color if cfg.VIS.SHOW_PARSS.SHOW_BORDER and not show_segms: _, contours, _ = cv2.findContours(parsing.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) cv2.drawContours(img, contours, -1, border_color, border_thick, cv2.LINE_AA) return img.astype(np.uint8)
Example 3
Project: fenics-topopt Author: zfergus File: gui.py License: MIT License | 6 votes |
def plot_force_arrows(self, f): """Add arrows to the plot for each force.""" arrowprops = {"arrowstyle": "->", "connectionstyle": "arc3", "lw": "2", "color": 0} cmap = plt.cm.get_cmap("hsv", f.shape[1] + 1) for load_i in range(f.shape[1]): nz = np.nonzero(f[:, load_i]) arrowprops["color"] = cmap(load_i) for i in range(nz[0].shape[0]): x, y = id_to_xy(nz[0][i] // 2, self.nelx, self.nely) x = max(min(x, self.nelx - 1), 0) y = max(min(y, self.nely - 1), 0) z = int(nz[0][i] % 2) mag = -50 * f[nz[0][i], load_i] self.ax.annotate("", xy=(x, y), xycoords="data", xytext = (0 if z else mag, mag if z else 0), textcoords="offset points", arrowprops=arrowprops)
Example 4
Project: fenics-topopt Author: zfergus File: gui.py License: MIT License | 6 votes |
def plot_force_arrows(self, f): """Add arrows to the plot for each force.""" arrowprops = {"arrowstyle": "->", "connectionstyle": "arc3", "lw": "2", "color": 0} cmap = plt.cm.get_cmap("hsv", f.shape[1] + 1) for load_i in range(f.shape[1]): nz = np.nonzero(f[:, load_i]) arrowprops["color"] = cmap(load_i) for i in range(nz[0].shape[0]): x, y = id_to_xy(nz[0][i] // 2, self.nelx, self.nely) x = max(min(x, self.nelx - 1), 0) y = max(min(y, self.nely - 1), 0) z = int(nz[0][i] % 2) mag = -50 * f[nz[0][i], load_i] self.ax.annotate("", xy=(x, y), xycoords="data", xytext = (0 if z else mag, mag if z else 0), textcoords="offset points", arrowprops=arrowprops)
Example 5
Project: DOTA_models Author: ringringyi File: np_box_list_ops.py License: Apache License 2.0 | 6 votes |
def prune_non_overlapping_boxes(boxlist1, boxlist2, minoverlap=0.0): """Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2. For each box in boxlist1, we want its IOA to be more than minoverlap with at least one of the boxes in boxlist2. If it does not, we remove it. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. minoverlap: Minimum required overlap between boxes, to count them as overlapping. Returns: A pruned boxlist with size [N', 4]. """ intersection_over_area = ioa(boxlist2, boxlist1) # [M, N] tensor intersection_over_area = np.amax(intersection_over_area, axis=0) # [N] tensor keep_bool = np.greater_equal(intersection_over_area, np.array(minoverlap)) keep_inds = np.nonzero(keep_bool)[0] new_boxlist1 = gather(boxlist1, keep_inds) return new_boxlist1
Example 6
Project: cs294-112_hws Author: xuwd11 File: utils.py License: MIT License | 6 votes |
def rollout_iterator(self): """ Iterate through all the rollouts in the dataset sequentially """ end_indices = np.nonzero(self._dones)[0] + 1 states = np.asarray(self._states) actions = np.asarray(self._actions) next_states = np.asarray(self._next_states) rewards = np.asarray(self._rewards) dones = np.asarray(self._dones) start_idx = 0 for end_idx in end_indices: indices = np.arange(start_idx, end_idx) yield states[indices], actions[indices], next_states[indices], rewards[indices], dones[indices] start_idx = end_idx
Example 7
Project: cs294-112_hws Author: xuwd11 File: utils.py License: MIT License | 6 votes |
def random_iterator(self, batch_size): """ Iterate once through all (s, a, r, s') in batches in a random order """ all_indices = np.nonzero(np.logical_not(self._dones))[0] np.random.shuffle(all_indices) states = np.asarray(self._states) actions = np.asarray(self._actions) next_states = np.asarray(self._next_states) rewards = np.asarray(self._rewards) dones = np.asarray(self._dones) i = 0 while i < len(all_indices): indices = all_indices[i:i+batch_size] yield states[indices], actions[indices], next_states[indices], rewards[indices], dones[indices] i += batch_size ############### ### Logging ### ###############
Example 8
Project: cs294-112_hws Author: xuwd11 File: utils.py License: MIT License | 6 votes |
def log(self): end_idxs = np.nonzero(self._dones)[0] + 1 returns = [] start_idx = 0 for end_idx in end_idxs: rewards = self._rewards[start_idx:end_idx] returns.append(np.sum(rewards)) start_idx = end_idx logger.record_tabular('ReturnAvg', np.mean(returns)) logger.record_tabular('ReturnStd', np.std(returns)) logger.record_tabular('ReturnMin', np.min(returns)) logger.record_tabular('ReturnMax', np.max(returns)) ################## ### Tensorflow ### ##################
Example 9
Project: QCElemental Author: MolSSI File: scipy_hungarian.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def _step1(state): """Steps 1 and 2 in the Wikipedia page.""" # Step 1: For each row of the matrix, find the smallest element and # subtract it from every element in its row. state.C -= state.C.min(axis=1)[:, np.newaxis] # Step 2: Find a zero (Z) in the resulting matrix. If there is no # starred zero in its row or column, star Z. Repeat for each element # in the matrix. for i, j in zip(*np.nonzero(state.C == 0)): if state.col_uncovered[j] and state.row_uncovered[i]: state.marked[i, j] = 1 state.col_uncovered[j] = False state.row_uncovered[i] = False state._clear_covers() return _step3
Example 10
Project: object_detector_app Author: datitran File: np_box_list_ops.py License: MIT License | 6 votes |
def prune_non_overlapping_boxes(boxlist1, boxlist2, minoverlap=0.0): """Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2. For each box in boxlist1, we want its IOA to be more than minoverlap with at least one of the boxes in boxlist2. If it does not, we remove it. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. minoverlap: Minimum required overlap between boxes, to count them as overlapping. Returns: A pruned boxlist with size [N', 4]. """ intersection_over_area = ioa(boxlist2, boxlist1) # [M, N] tensor intersection_over_area = np.amax(intersection_over_area, axis=0) # [N] tensor keep_bool = np.greater_equal(intersection_over_area, np.array(minoverlap)) keep_inds = np.nonzero(keep_bool)[0] new_boxlist1 = gather(boxlist1, keep_inds) return new_boxlist1
Example 11
Project: ICDAR-2019-SROIE Author: zzzDavid File: my_utils.py License: MIT License | 6 votes |
def pred_to_dict(text, pred, prob): res = {"company": ("", 0), "date": ("", 0), "address": ("", 0), "total": ("", 0)} keys = list(res.keys()) seps = [0] + (numpy.nonzero(numpy.diff(pred))[0] + 1).tolist() + [len(pred)] for i in range(len(seps) - 1): pred_class = pred[seps[i]] - 1 if pred_class == -1: continue new_key = keys[pred_class] new_prob = prob[seps[i] : seps[i + 1]].max() if new_prob > res[new_key][1]: res[new_key] = (text[seps[i] : seps[i + 1]], new_prob) return {k: regex.sub(r"[\t\n]", " ", v[0].strip()) for k, v in res.items()}
Example 12
Project: NeuroKit Author: neuropsychology File: complexity_optimize.py License: MIT License | 6 votes |
def _complexity_dimension(signal, delay=1, dimension_max=20, method="afnn", R=10.0, A=2.0): # Initalize vectors if isinstance(dimension_max, int): dimension_seq = np.arange(1, dimension_max + 1) else: dimension_seq = np.array(dimension_max) # Method method = method.lower() if method in ["afnn"]: E, Es = _embedding_dimension_afn(signal, dimension_seq=dimension_seq, delay=delay, show=False) E1 = E[1:] / E[:-1] E2 = Es[1:] / Es[:-1] min_dimension = [i for i, x in enumerate(E1 >= 0.85 * np.max(E1)) if x][0] + 1 optimize_indices = [E1, E2] return dimension_seq, optimize_indices, min_dimension if method in ["fnn"]: f1, f2, f3 = _embedding_dimension_ffn(signal, dimension_seq=dimension_seq, delay=delay, R=R, A=A) min_dimension = [i for i, x in enumerate(f3 <= 1.85 * np.min(f3[np.nonzero(f3)])) if x][0] optimize_indices = [f1, f2, f3] return dimension_seq, optimize_indices, min_dimension else: raise ValueError("NeuroKit error: complexity_dimension(): 'method' not recognized.")
Example 13
Project: NeuroKit Author: neuropsychology File: fractal_correlation.py License: MIT License | 6 votes |
def _fractal_correlation(signal, r_vals, dist): """References ----------- - `nolds <https://github.com/CSchoel/nolds/blob/master/nolds/measures.py>`_ """ n = len(signal) corr = np.zeros(len(r_vals)) for i, r in enumerate(r_vals): corr[i] = 1 / (n * (n - 1)) * np.sum(dist < r) # filter zeros from csums nonzero = np.nonzero(corr)[0] r_vals = r_vals[nonzero] corr = corr[nonzero] return r_vals, corr
Example 14
Project: radiometric_normalization Author: planetlabs File: utils.py License: Apache License 2.0 | 6 votes |
def pixel_list_to_array(pixel_locations, shape): ''' Transforms a list of pixel locations into a 2D array. :param tuple pixel_locations: A tuple of two lists representing the x and y coordinates of the locations of a set of pixels (i.e. the output of numpy.nonzero(valid_pixels) where valid_pixels is a 2D boolean array representing the pixel locations) :param list active_pixels: A list the same length as the x and y coordinate lists within pixel_locations representing whether a pixel location should be represented in the mask or not :param tuple shape: The shape of the output array consisting of a tuple of (height, width) :returns: A 2-D boolean array representing active pixels ''' mask = numpy.zeros(shape, dtype=numpy.bool) mask[pixel_locations] = True return mask
Example 15
Project: radiometric_normalization Author: planetlabs File: utils.py License: Apache License 2.0 | 6 votes |
def trim_pixel_list(pixel_locations, active_pixels): ''' Trims the list of pixel locations to only the active pixels. :param tuple pixel_locations: A tuple of two lists representing the x and y coordinates of the locations of a set of pixels (i.e. the output of numpy.nonzero(valid_pixels) where valid_pixels is a 2D boolean array representing the pixel locations) :param list active_pixels: A list the same length as the x and y coordinate lists within pixel_locations representing whether a pixel location should be represented in the mask or not :returns: A tuple of two lists representing the x and y coordinates of the locations of active pixels ''' active_pixels = numpy.nonzero(active_pixels)[0] return (pixel_locations[0][active_pixels], pixel_locations[1][active_pixels])
Example 16
Project: radiometric_normalization Author: planetlabs File: time_stack.py License: Apache License 2.0 | 6 votes |
def _uniform_weight_alpha(sum_masked_arrays, output_datatype): '''Calculates the cumulative mask of a list of masked array Input: sum_masked_arrays (list of numpy masked arrays): The list of masked arrays to find the cumulative mask of, each element represents one band. (sums_masked_array.mask has a 1 for a no data pixel and a 0 otherwise) output_datatype (numpy datatype): The output datatype Output: output_alpha (numpy uint16 array): The output mask (0 for a no data pixel, uint16 max value otherwise) ''' output_alpha = numpy.ones(sum_masked_arrays[0].shape) for band_sum_masked_array in sum_masked_arrays: output_alpha[numpy.nonzero(band_sum_masked_array.mask == 1)] = 0 output_alpha = output_alpha.astype(output_datatype) * \ numpy.iinfo(output_datatype).max return output_alpha
Example 17
Project: radiometric_normalization Author: planetlabs File: display_wrapper.py License: Apache License 2.0 | 6 votes |
def create_pixel_plots(candidate_path, reference_path, base_name, last_band_alpha=False, limits=None, custom_alpha=None): c_ds, c_alpha, c_band_count = _open_image_and_get_info( candidate_path, last_band_alpha) r_ds, r_alpha, r_band_count = _open_image_and_get_info( reference_path, last_band_alpha) _assert_consistent(c_alpha, r_alpha, c_band_count, r_band_count) if custom_alpha != None: combined_alpha = custom_alpha else: combined_alpha = numpy.logical_and(c_alpha, r_alpha) valid_pixels = numpy.nonzero(combined_alpha) for band_no in range(1, c_band_count + 1): c_band = gimage.read_single_band(c_ds, band_no) r_band = gimage.read_single_band(r_ds, band_no) file_name = '{}_{}.png'.format(base_name, band_no) display.plot_pixels(file_name, c_band[valid_pixels], r_band[valid_pixels], limits)
Example 18
Project: radiometric_normalization Author: planetlabs File: display_wrapper.py License: Apache License 2.0 | 6 votes |
def create_all_bands_histograms(candidate_path, reference_path, base_name, last_band_alpha=False, color_order=['b', 'g', 'r', 'y'], x_limits=None, y_limits=None): c_gimg = gimage.load(candidate_path, last_band_alpha=last_band_alpha) r_gimg = gimage.load(reference_path, last_band_alpha=last_band_alpha) gimage.check_comparable([c_gimg, r_gimg]) combined_alpha = numpy.logical_and(c_gimg.alpha, r_gimg.alpha) valid_pixels = numpy.nonzero(combined_alpha) file_name = '{}_histograms.png'.format(base_name) display.plot_histograms( file_name, [c_band[valid_pixels] for c_band in c_gimg.bands], [r_band[valid_pixels] for r_band in r_gimg.bands], color_order, x_limits, y_limits)
Example 19
Project: radiometric_normalization Author: planetlabs File: transformation.py License: Apache License 2.0 | 6 votes |
def generate_ols_regression(candidate_band, reference_band, pif_mask): ''' Performs PCA analysis on the valid pixels and filters according to the distance from the principle eigenvector. :param array candidate_band: A 2D array representing the image data of the candidate band :param array reference_band: A 2D array representing the image data of the reference image :param array pif_mask: A 2D array representing the PIF pixels in the images :returns: A LinearTransformation object (gain and offset) ''' candidate_pifs = candidate_band[numpy.nonzero(pif_mask)] reference_pifs = reference_band[numpy.nonzero(pif_mask)] return generate_ols_regression_pixel_list(candidate_pifs, reference_pifs)
Example 20
Project: pyGSTi Author: pyGSTio File: dataset.py License: Apache License 2.0 | 6 votes |
def _get_counts(self, timestamp=None, all_outcomes=False): """ Returns this row's sequence of "repetition counts", that is, the number of repetitions of each outcome label in the `outcomes` list, or equivalently, each outcome label index in this rows `.oli` member. """ #Note: when all_outcomes == False we don't add outcome labels that # aren't present for any of this row's elements (i.e. the #summed # is zero) cntDict = _ld.OutcomeLabelDict() if timestamp is not None: tslc = _np.where(_np.isclose(self.time, timestamp))[0] else: tslc = slice(None) if self.reps is None: for ol, i in self.dataset.olIndex.items(): cnt = float(_np.count_nonzero(_np.equal(self.oli[tslc], i))) if all_outcomes or cnt > 0: cntDict[ol] = cnt else: for ol, i in self.dataset.olIndex.items(): inds = _np.nonzero(_np.equal(self.oli[tslc], i))[0] if all_outcomes or len(inds) > 0: cntDict[ol] = float(sum(self.reps[tslc][inds])) return cntDict
Example 21
Project: DIB-R Author: nv-tlabs File: utils_mesh.py License: MIT License | 6 votes |
def meshresample(pointnp_px3, facenp_fx3, edgenp_ex2): p1 = pointnp_px3[edgenp_ex2[:, 0], :] p2 = pointnp_px3[edgenp_ex2[:, 1], :] pmid = (p1 + p2) / 2 point2np_px3 = np.concatenate((pointnp_px3, pmid), axis=0) # delete f # add 4 new faces face2np_fx3 = [] pnum = np.max(facenp_fx3) + 1 for f in facenp_fx3: p1, p2, p3 = f p12 = (edgenp_ex2 == (min(p1, p2), max(p1, p2))).all(axis=1).nonzero()[0] + pnum p23 = (edgenp_ex2 == (min(p2, p3), max(p2, p3))).all(axis=1).nonzero()[0] + pnum p31 = (edgenp_ex2 == (min(p3, p1), max(p3, p1))).all(axis=1).nonzero()[0] + pnum face2np_fx3.append([p1, p12, p31]) face2np_fx3.append([p12, p2, p23]) face2np_fx3.append([p31, p23, p3]) face2np_fx3.append([p12, p23, p31]) face2np_fx3 = np.array(face2np_fx3, dtype=np.int64) return point2np_px3, face2np_fx3
Example 22
Project: poeai Author: nicholastoddsmith File: TargetingSystem.py License: MIT License | 6 votes |
def GetItemPixels(self, I): ''' Locates items that should be picked up on the screen ''' ws = [8, 14] D1 = np.abs(I - np.array([10.8721, 12.8995, 13.9932])).sum(axis = 2) < 15 D2 = np.abs(I - np.array([118.1302, 116.0938, 106.9063])).sum(axis = 2) < 76 R1 = view_as_windows(D1, ws, ws).sum(axis = (2, 3)) R2 = view_as_windows(D2, ws, ws).sum(axis = (2, 3)) FR = ((R1 + R2 / np.prod(ws)) >= 1.0) & (R1 > 10) & (R2 > 10) PL = np.transpose(np.nonzero(FR)) * np.array(ws) if len(PL) <= 0: return [] bc = Birch(threshold = 50, n_clusters = None) bc.fit(PL) return bc.subcluster_centers_
Example 23
Project: vehicle_counting_tensorflow Author: ahmetozlu File: argmax_matcher_test.py License: MIT License | 6 votes |
def test_return_correct_matches_with_default_thresholds(self): def graph_fn(similarity_matrix): matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None) match = matcher.match(similarity_matrix) matched_cols = match.matched_column_indicator() unmatched_cols = match.unmatched_column_indicator() match_results = match.match_results return (matched_cols, unmatched_cols, match_results) similarity = np.array([[1., 1, 1, 3, 1], [2, -1, 2, 0, 4], [3, 0, -1, 0, 0]], dtype=np.float32) expected_matched_rows = np.array([2, 0, 1, 0, 1]) (res_matched_cols, res_unmatched_cols, res_match_results) = self.execute(graph_fn, [similarity]) self.assertAllEqual(res_match_results[res_matched_cols], expected_matched_rows) self.assertAllEqual(np.nonzero(res_matched_cols)[0], [0, 1, 2, 3, 4]) self.assertFalse(np.all(res_unmatched_cols))
Example 24
Project: vehicle_counting_tensorflow Author: ahmetozlu File: argmax_matcher_test.py License: MIT License | 6 votes |
def test_return_correct_matches_with_matched_threshold(self): def graph_fn(similarity): matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.) match = matcher.match(similarity) matched_cols = match.matched_column_indicator() unmatched_cols = match.unmatched_column_indicator() match_results = match.match_results return (matched_cols, unmatched_cols, match_results) similarity = np.array([[1, 1, 1, 3, 1], [2, -1, 2, 0, 4], [3, 0, -1, 0, 0]], dtype=np.float32) expected_matched_cols = np.array([0, 3, 4]) expected_matched_rows = np.array([2, 0, 1]) expected_unmatched_cols = np.array([1, 2]) (res_matched_cols, res_unmatched_cols, match_results) = self.execute(graph_fn, [similarity]) self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows) self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols) self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], expected_unmatched_cols)
Example 25
Project: vehicle_counting_tensorflow Author: ahmetozlu File: argmax_matcher_test.py License: MIT License | 6 votes |
def test_return_correct_matches_with_matched_and_unmatched_threshold(self): def graph_fn(similarity): matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3., unmatched_threshold=2.) match = matcher.match(similarity) matched_cols = match.matched_column_indicator() unmatched_cols = match.unmatched_column_indicator() match_results = match.match_results return (matched_cols, unmatched_cols, match_results) similarity = np.array([[1, 1, 1, 3, 1], [2, -1, 2, 0, 4], [3, 0, -1, 0, 0]], dtype=np.float32) expected_matched_cols = np.array([0, 3, 4]) expected_matched_rows = np.array([2, 0, 1]) expected_unmatched_cols = np.array([1]) # col 2 has too high maximum val (res_matched_cols, res_unmatched_cols, match_results) = self.execute(graph_fn, [similarity]) self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows) self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols) self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], expected_unmatched_cols)
Example 26
Project: vehicle_counting_tensorflow Author: ahmetozlu File: argmax_matcher_test.py License: MIT License | 6 votes |
def test_return_correct_matches_unmatched_row_while_using_force_match(self): def graph_fn(similarity): matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3., unmatched_threshold=2., force_match_for_each_row=True) match = matcher.match(similarity) matched_cols = match.matched_column_indicator() unmatched_cols = match.unmatched_column_indicator() match_results = match.match_results return (matched_cols, unmatched_cols, match_results) similarity = np.array([[1, 1, 1, 3, 1], [-1, 0, -2, -2, -1], [3, 0, -1, 2, 0]], dtype=np.float32) expected_matched_cols = np.array([0, 1, 3]) expected_matched_rows = np.array([2, 1, 0]) expected_unmatched_cols = np.array([2, 4]) # col 2 has too high max val (res_matched_cols, res_unmatched_cols, match_results) = self.execute(graph_fn, [similarity]) self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows) self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols) self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], expected_unmatched_cols)
Example 27
Project: vehicle_counting_tensorflow Author: ahmetozlu File: np_box_list_ops.py License: MIT License | 6 votes |
def prune_non_overlapping_boxes(boxlist1, boxlist2, minoverlap=0.0): """Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2. For each box in boxlist1, we want its IOA to be more than minoverlap with at least one of the boxes in boxlist2. If it does not, we remove it. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. minoverlap: Minimum required overlap between boxes, to count them as overlapping. Returns: A pruned boxlist with size [N', 4]. """ intersection_over_area = ioa(boxlist2, boxlist1) # [M, N] tensor intersection_over_area = np.amax(intersection_over_area, axis=0) # [N] tensor keep_bool = np.greater_equal(intersection_over_area, np.array(minoverlap)) keep_inds = np.nonzero(keep_bool)[0] new_boxlist1 = gather(boxlist1, keep_inds) return new_boxlist1
Example 28
Project: cat-bbs Author: aleju File: predict_video.py License: MIT License | 5 votes |
def _heatmap_to_rects(self, grid_pred, bb_img): """Convert a heatmap to rectangles / bounding box candidates.""" grid_pred = np.squeeze(grid_pred) # (1, H, W) => (H, W) # remove low activations grid_thresh = grid_pred >= self.heatmap_activation_threshold # find connected components grid_labeled, num_labels = morphology.label( grid_thresh, background=0, connectivity=1, return_num=True ) # for each connected components, # - draw a bounding box around it, # - shrink the bounding box to optimal size # - estimate a score/confidence value bbs = [] for label in range(1, num_labels+1): (yy, xx) = np.nonzero(grid_labeled == label) min_y, max_y = np.min(yy), np.max(yy) min_x, max_x = np.min(xx), np.max(xx) rect = RectangleOnImage(x1=min_x, x2=max_x+1, y1=min_y, y2=max_y+1, shape=grid_labeled) activation = self._rect_to_score(rect, grid_pred) rect_shrunk, activation_shrunk = self._shrink(grid_pred, rect) rect_rs_shrunk = rect_shrunk.on(bb_img) bbs.append((rect_rs_shrunk, activation_shrunk)) return bbs
Example 29
Project: fullrmc Author: bachiraoun File: Collection.py License: GNU Affero General Public License v3.0 | 5 votes |
def find_extrema(x, max = True, min = True, strict = False, withend = False): """ Get a vector extrema indexes and values. :Parameters: #. max (boolean): Whether to index the maxima. #. min (boolean): Whether to index the minima. #. strict (boolean): Whether not to index changes to zero gradient. #. withend (boolean): Whether to always include x[0] and x[-1]. :Returns: #. indexes (numpy.ndarray): Extrema indexes. #. values (numpy.ndarray): Extrema values. """ # This is the gradient dx = np.empty(len(x)) dx[1:] = np.diff(x) dx[0] = dx[1] # Clean up the gradient in order to pick out any change of sign dx = np.sign(dx) # define the threshold for whether to pick out changes to zero gradient threshold = 0 if strict: threshold = 1 # Second order diff to pick out the spikes d2x = np.diff(dx) if max and min: d2x = abs(d2x) elif max: d2x = -d2x # Take care of the two ends if withend: d2x[0] = 2 d2x[-1] = 2 # Sift out the list of extremas ind = np.nonzero(d2x > threshold)[0] return ind, x[ind]
Example 30
Project: discomll Author: romanorac File: measures.py License: Apache License 2.0 | 5 votes |
def info_gain_numeric(x, y, accuracy): x_unique = list(np.unique(x)) if len(x_unique) == 1: return None indices = x.argsort() # sort numeric attribute x, y = x[indices], y[indices] # save sorted features with sorted labels right_dist = np.bincount(y) dummy_class = np.array([len(right_dist)]) class_indices = right_dist.nonzero()[0] right_dist = right_dist[class_indices] left_dist = np.zeros(len(class_indices)) diffs = np.nonzero(y[:-1] != y[1:])[0] + 1 # different neighbor classes have value True if accuracy > 0: diffs = np.array([diffs[i] for i in range(1, len(diffs)) if diffs[i] - diffs[i - 1] > accuracy], dtype=np.int32) if len(diffs) > 15 else diffs intervals = np.array((np.concatenate(([0], diffs[:-1])), diffs)).T if len(diffs) < 2: return None max_ig, max_i, max_j = 0, 0, 0 prior_h = h(right_dist) # calculate prior entropy for i, j in intervals: dist = np.bincount(np.concatenate((dummy_class, y[i:j])))[class_indices] left_dist += dist right_dist -= dist coef = np.true_divide((np.sum(left_dist), np.sum(right_dist)), len(y)) ig = prior_h - np.dot(coef, [h(left_dist[left_dist.nonzero()]), h(right_dist[right_dist.nonzero()])]) if ig > max_ig: max_ig, max_i, max_j = ig, i, j if x[max_i] == x[max_j]: ind = x_unique.index(x[max_i]) mean = np.float32(np.mean((x_unique[1 if ind == 0 else ind - 1], x_unique[ind]))) else: mean = np.float32(np.mean((x[max_i], x[max_j]))) return float(max_ig), [mean, mean]