Python numpy.logical_and() Examples

The following are code examples for showing how to use numpy.logical_and(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: core   Author: lifemapper   File: layer_encoder.py    GNU General Public License v3.0 6 votes vote down vote up
def _get_presence_absence_method(min_presence, max_presence, min_coverage, 
                                 nodata):
    """Gets the function for determining presence for a data window

    Args:
        min_presence: Data cells must have a value greater than or equal to
            this to be considered present
        max_presence: Data cells must have a value less than or equal to this
            to be considered present
        min_coverage: At least the percentage of the window must be classified
            as present to consider the window present
        nodata: This values should be considered nodata
    """
    if min_coverage > 1.0:
        min_coverage = min_coverage / 100.0
    # ...............................
    def get_presence_absence(window):
        min_num = max(min_coverage * window.size, 1)
        valid_cells = np.logical_and(
            np.logical_and(window >= min_presence, window <= max_presence),
            window != nodata)
        return np.sum(valid_cells) >= min_num
    return get_presence_absence

# ............................................................................. 
Example 2
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: detection.py    Apache License 2.0 6 votes vote down vote up
def _update_labels(self, label, crop_box, height, width):
        """Convert labels according to crop box"""
        xmin = float(crop_box[0]) / width
        ymin = float(crop_box[1]) / height
        w = float(crop_box[2]) / width
        h = float(crop_box[3]) / height
        out = label.copy()
        out[:, (1, 3)] -= xmin
        out[:, (2, 4)] -= ymin
        out[:, (1, 3)] /= w
        out[:, (2, 4)] /= h
        out[:, 1:5] = np.maximum(0, out[:, 1:5])
        out[:, 1:5] = np.minimum(1, out[:, 1:5])
        coverage = self._calculate_areas(out[:, 1:]) * w * h / self._calculate_areas(label[:, 1:])
        valid = np.logical_and(out[:, 3] > out[:, 1], out[:, 4] > out[:, 2])
        valid = np.logical_and(valid, coverage > self.min_eject_coverage)
        valid = np.where(valid)[0]
        if valid.size < 1:
            return None
        out = out[valid, :]
        return out 
Example 3
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: detection.py    Apache License 2.0 6 votes vote down vote up
def _parse_label(self, label):
        """Helper function to parse object detection label.

        Format for raw label:
        n \t k \t ... \t [id \t xmin\t ymin \t xmax \t ymax \t ...] \t [repeat]
        where n is the width of header, 2 or larger
        k is the width of each object annotation, can be arbitrary, at least 5
        """
        if isinstance(label, nd.NDArray):
            label = label.asnumpy()
        raw = label.ravel()
        if raw.size < 7:
            raise RuntimeError("Label shape is invalid: " + str(raw.shape))
        header_width = int(raw[0])
        obj_width = int(raw[1])
        if (raw.size - header_width) % obj_width != 0:
            msg = "Label shape %s inconsistent with annotation width %d." \
                %(str(raw.shape), obj_width)
            raise RuntimeError(msg)
        out = np.reshape(raw[header_width:], (-1, obj_width))
        # remove bad ground-truths
        valid = np.where(np.logical_and(out[:, 3] > out[:, 1], out[:, 4] > out[:, 2]))[0]
        if valid.size < 1:
            raise RuntimeError('Encounter sample with no valid label.')
        return out[valid, :] 
Example 4
Project: FCOS_GluonCV   Author: DetectionTeamUCAS   File: accuracy.py    Apache License 2.0 6 votes vote down vote up
def update(self, labels, preds):
        """Updates the internal evaluation result.
        Parameters
        ----------
        labels : list of `NDArray`
            The labels of the data with class indices as values, one per sample.
        preds : list of `NDArray`
            Prediction values for samples. Each prediction value can either be the class index,
            or a vector of likelihoods for all classes.
        """
        labels, preds = check_label_shapes(labels, preds, True)

        for label, pred_label in zip(labels, preds):
            if pred_label.shape != label.shape:
                pred_label = ndarray.argmax(pred_label, axis=self.axis)
            pred_label = pred_label.asnumpy().astype('int32')
            label = label.asnumpy().astype('int32')

            labels, preds = check_label_shapes(label, pred_label)

            valid = (labels.reshape(-1, 1) != self.ignore_labels).all(axis=-1)

            self.sum_metric += np.logical_and(pred_label.flat == label.flat, valid).sum()
            self.num_inst += np.sum(valid) 
Example 5
Project: DOTA_models   Author: ringringyi   File: graph_utils.py    Apache License 2.0 6 votes vote down vote up
def get_hardness_distribution(gtG, max_dist, min_dist, rng, trials, bins, nodes,
                              n_ori, step_size):
  heuristic_fn = lambda node_ids, node_id: \
    heuristic_fn_vec(nodes[node_ids, :], nodes[[node_id], :], n_ori, step_size)
  num_nodes = gtG.num_vertices()
  gt_dists = []; h_dists = [];
  for i in range(trials):
    end_node_id = rng.choice(num_nodes)
    gt_dist = gt.topology.shortest_distance(gt.GraphView(gtG, reversed=True),
                                            source=gtG.vertex(end_node_id),
                                            target=None, max_dist=max_dist)
    gt_dist = np.array(gt_dist.get_array())
    ind = np.where(np.logical_and(gt_dist <= max_dist, gt_dist >= min_dist))[0]
    gt_dist = gt_dist[ind]
    h_dist = heuristic_fn(ind, end_node_id)[:,0]
    gt_dists.append(gt_dist)
    h_dists.append(h_dist)
  gt_dists = np.concatenate(gt_dists)
  h_dists = np.concatenate(h_dists)
  hardness = 1. - h_dists*1./gt_dists
  hist, _ = np.histogram(hardness, bins)
  hist = hist.astype(np.float64)
  hist = hist / np.sum(hist)
  return hist 
Example 6
Project: DOTA_models   Author: ringringyi   File: balanced_positive_negative_sampler_test.py    Apache License 2.0 6 votes vote down vote up
def test_subsample_all_examples(self):
    numpy_labels = np.random.permutation(300)
    indicator = tf.constant(np.ones(300) == 1)
    numpy_labels = (numpy_labels - 200) > 0

    labels = tf.constant(numpy_labels)

    sampler = (balanced_positive_negative_sampler.
               BalancedPositiveNegativeSampler())
    is_sampled = sampler.subsample(indicator, 64, labels)
    with self.test_session() as sess:
      is_sampled = sess.run(is_sampled)
      self.assertTrue(sum(is_sampled) == 64)
      self.assertTrue(sum(np.logical_and(numpy_labels, is_sampled)) == 32)
      self.assertTrue(sum(np.logical_and(
          np.logical_not(numpy_labels), is_sampled)) == 32) 
Example 7
Project: SLiPy   Author: glentner   File: Spectrum.py    GNU General Public License v2.0 6 votes vote down vote up
def __rand__(self, other):
        """
        Logical and, '&' - Right Handed (scalar)

        The comparison operations ( >, <, >=, <=, ==, !=, &, ^, | ) are defined
        to return a binary Spectrum of True and False values. The same convention
        applies as above. Either the LHS or RHS must be contained within the other,
        and the LHS is compared on the overlapping regions. Data outside this
        overlapping region is returned as False.
        """

        result = self.copy()

        # if no units are detected, implicitely assume data units
        if not hasattr(other, 'unit'): other *= result.data.unit

        result.data = np.logical_and(other.to(result.data.unit).value,
            result.data.value) * u.dimensionless_unscaled

        return result 
Example 8
Project: where   Author: kartverket   File: gnss.py    MIT License 6 votes vote down vote up
def check_satellite_eclipse(dset):
    """Check if a satellite is an eclipse

    TODO: Check if a better algorithm exists (e.g. based on beta angle).

    Args:
        dset(Dataset):    Model data
    """
    cos_gamma = np.einsum(
        "ij,ij->i", mathp.unit_vector(dset.sat_posvel.itrs_pos), dset.sat_posvel.itrs_pos_sun
    )  # TODO:  dot product -> better solution dot() function in mathp
    h = np.linalg.norm(dset.sat_posvel.itrs_pos, axis=1) * np.sqrt(1.0 - cos_gamma ** 2)

    satellites_in_eclipse = list()
    for satellite in dset.unique("satellite"):
        idx = dset.filter(satellite=satellite)
        satellite_eclipse = np.logical_and(cos_gamma[idx] < 0, h[idx] < constant.a)
        if np.any(satellite_eclipse == True):
            satellites_in_eclipse.append(satellite)

    return satellites_in_eclipse 
Example 9
Project: where   Author: kartverket   File: __init__.py    MIT License 6 votes vote down vote up
def detect_outliers(config_key, dset):
    """Detect all outliers for a given session

    Args:
        config_key (String):  The configuration key listing which detectors to apply.
        dset (Dataset):       Dataset containing analysis data.
    """
    prefix = config.analysis.get("analysis", default="").str
    log.info(f"Detecting outliers")
    keep_idxs = plugins.call_all(package_name=__name__, config_key=config_key, prefix=prefix, dset=dset)

    all_keep_idx = np.ones(dset.num_obs, dtype=bool)
    for detector, detector_keep_idx in keep_idxs.items():
        log.info(f"Detecting {sum(~detector_keep_idx):5d} outliers based on {detector}")
        report.data("detector_data", dset, detector_name=detector, keep_idx=detector_keep_idx)
        all_keep_idx = np.logical_and(all_keep_idx, detector_keep_idx)

    log.info(f"Removing {sum(~all_keep_idx)} of {dset.num_obs} observations")
    return all_keep_idx 
Example 10
Project: where   Author: kartverket   File: ignore_epochs.py    MIT License 6 votes vote down vote up
def ignore_epochs(dset):
    """Edits data based on data quality

    Args:
        dset:     A Dataset containing model data.

    Returns:
        Array containing False for observations to throw away
    """
    intervals = config.tech[_SECTION].intervals.as_list(split_re=", *")

    keep_idx = np.ones(dset.num_obs, dtype=bool)
    for interval in intervals:
        interval = interval.split()
        start_time = Time(" ".join(interval[-4:-2]), scale="utc", format="iso")
        end_time = Time(" ".join(interval[-2:]), scale="utc", format="iso")
        # station name may contain spaces
        station = " ".join(interval[:-4])

        remove_idx = np.logical_and(start_time < dset.time, dset.time < end_time)
        if len(interval) == 5:
            remove_idx = dset.filter(station=station, idx=remove_idx)
        keep_idx = np.logical_and(keep_idx, np.logical_not(remove_idx))

    return keep_idx 
Example 11
Project: where   Author: kartverket   File: elevation.py    MIT License 6 votes vote down vote up
def elevation(dset):
    """Edits data based on elevation

    If there are more than one station per observation (i.e. VLBI), all stations need to observe above the threshold
    elevation for the observation to be kept.

    Args:
        dset (Dataset):   A Dataset containing model data.

    Returns:
        numpy.ndarray:    Array containing False for observations to throw away.
    """
    elev_threshold = config.tech[_SECTION].cut_off.float
    keep_idx = np.ones(dset.num_obs, dtype=bool)

    # Avoid rounding errors close to 0 when converting to radians
    if elev_threshold <= 0:
        return keep_idx

    # Check elevation for each station in an observation
    for _ in dset.for_each("site_pos"):
        keep_idx = np.logical_and(keep_idx, dset.site_pos.elevation >= np.radians(elev_threshold))

    return keep_idx 
Example 12
Project: EXOSIMS   Author: dsavransky   File: SurveySimulation.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def is_earthlike(self, plan_inds, sInd):
        """
        Is the planet earthlike?
        """
        TL = self.TargetList
        SU = self.SimulatedUniverse
        PPop = self.PlanetPopulation

        # extract planet and star properties
        Rp_plan = SU.Rp[plan_inds].value
        L_star = TL.L[sInd]
        if PPop.scaleOrbits:
            a_plan = (SU.a[plan_inds]/np.sqrt(L_star)).value
        else:
            a_plan = (SU.a[plan_inds]).value
        # Definition: planet radius (in earth radii) and solar-equivalent luminosity must be
        # between the given bounds.
        Rp_plan_lo = 0.80/np.sqrt(a_plan)
        # We use the numpy versions so that plan_ind can be a numpy vector.
        return np.logical_and(
           np.logical_and(Rp_plan >= Rp_plan_lo, Rp_plan <= 1.4),
           np.logical_and(a_plan  >= 0.95,     a_plan  <= 1.67)) 
Example 13
Project: RSSMOSPipeline   Author: mattyowl   File: RSSMOSTools.py    GNU General Public License v3.0 6 votes vote down vote up
def maskNoisyData(data, sigmaCut = 3.0):
    """Return a mask with zeros for data with values < sigma*sigmaCut selected. 
    
    """

    mean=0
    sigma=1e6
    for i in range(20):
        lastSigma=sigma
        nonZeroMask=np.not_equal(data, 0)
        mask=np.less(abs(data-mean), sigmaCut*sigma)
        mask=np.logical_and(nonZeroMask, mask)
        mean=np.mean(data[mask])
        sigma=np.std(data[mask])
        if sigma == lastSigma:
            break            
    mask=np.greater(abs(data-mean), sigmaCut*sigma)

    return mask
    
#------------------------------------------------------------------------------------------------------------- 
Example 14
Project: object_detector_app   Author: datitran   File: balanced_positive_negative_sampler_test.py    MIT License 6 votes vote down vote up
def test_subsample_selection(self):
    # Test random sampling when only some examples can be sampled:
    # 100 samples, 20 positives, 10 positives cannot be sampled
    numpy_labels = np.arange(100)
    numpy_indicator = numpy_labels < 90
    indicator = tf.constant(numpy_indicator)
    numpy_labels = (numpy_labels - 80) >= 0

    labels = tf.constant(numpy_labels)

    sampler = (balanced_positive_negative_sampler.
               BalancedPositiveNegativeSampler())
    is_sampled = sampler.subsample(indicator, 64, labels)
    with self.test_session() as sess:
      is_sampled = sess.run(is_sampled)
      self.assertTrue(sum(is_sampled) == 64)
      self.assertTrue(sum(np.logical_and(numpy_labels, is_sampled)) == 10)
      self.assertTrue(sum(np.logical_and(
          np.logical_not(numpy_labels), is_sampled)) == 54)
      self.assertAllEqual(is_sampled, np.logical_and(is_sampled,
                                                     numpy_indicator)) 
Example 15
Project: wise_ils   Author: ElementAI   File: prm.py    Apache License 2.0 6 votes vote down vote up
def instance_nms(self, instance_list, threshold=0.3, merge_peak_response=True):
        selected_instances = []
        while len(instance_list) > 0:
            instance = instance_list.pop(0)
            selected_instances.append(instance)
            src_mask = instance[2].astype(bool)
            src_peak_response = instance[3]
            def iou_filter(x):
                dst_mask = x[2].astype(bool)
                # IoU
                intersection = np.logical_and(src_mask, dst_mask).sum()
                union = np.logical_or(src_mask, dst_mask).sum()
                iou = intersection / (union + 1e-10)
                if iou < threshold:
                    return x
                else:
                    if merge_peak_response:
                        nonlocal src_peak_response
                        src_peak_response += x[3]
                    return None
            instance_list = list(filter(iou_filter, instance_list))
        return selected_instances 
Example 16
Project: NeuroKit   Author: neuropsychology   File: tests_complexity.py    MIT License 6 votes vote down vote up
def pyeeg_ap_entropy(X, M, R):
    N = len(X)

    Em = pyeeg_embed_seq(X, 1, M)
    A = np.tile(Em, (len(Em), 1, 1))
    B = np.transpose(A, [1, 0, 2])
    D = np.abs(A - B)  # D[i,j,k] = |Em[i][k] - Em[j][k]|
    InRange = np.max(D, axis=2) <= R

    # Probability that random M-sequences are in range
    Cm = InRange.mean(axis=0)

    # M+1-sequences in range if M-sequences are in range & last values are close
    Dp = np.abs(
        np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T
    )

    Cmp = np.logical_and(Dp <= R, InRange[:-1, :-1]).mean(axis=0)

    Phi_m, Phi_mp = np.sum(np.log(Cm)), np.sum(np.log(Cmp))

    Ap_En = (Phi_m - Phi_mp) / (N - M)

    return Ap_En 
Example 17
Project: NeuroKit   Author: neuropsychology   File: tests_complexity.py    MIT License 6 votes vote down vote up
def pyeeg_samp_entropy(X, M, R):
    N = len(X)

    Em = pyeeg_embed_seq(X, 1, M)
    A = np.tile(Em, (len(Em), 1, 1))
    B = np.transpose(A, [1, 0, 2])
    D = np.abs(A - B)  # D[i,j,k] = |Em[i][k] - Em[j][k]|
    InRange = np.max(D, axis=2) <= R
    np.fill_diagonal(InRange, 0)  # Don't count self-matches

    Cm = InRange.sum(axis=0)  # Probability that random M-sequences are in range
    Dp = np.abs(
        np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T
    )

    Cmp = np.logical_and(Dp <= R, InRange[:-1, :-1]).sum(axis=0)

    # Avoid taking log(0)
    Samp_En = np.log(np.sum(Cm + 1e-100) / np.sum(Cmp + 1e-100))

    return Samp_En

# =============================================================================
# Entropy
# ============================================================================= 
Example 18
Project: radiometric_normalization   Author: planetlabs   File: display_wrapper.py    Apache License 2.0 6 votes vote down vote up
def create_pixel_plots(candidate_path, reference_path, base_name,
                       last_band_alpha=False, limits=None, custom_alpha=None):
    c_ds, c_alpha, c_band_count = _open_image_and_get_info(
        candidate_path, last_band_alpha)
    r_ds, r_alpha, r_band_count = _open_image_and_get_info(
        reference_path, last_band_alpha)

    _assert_consistent(c_alpha, r_alpha, c_band_count, r_band_count)

    if custom_alpha != None:
        combined_alpha = custom_alpha
    else:
        combined_alpha = numpy.logical_and(c_alpha, r_alpha)
    valid_pixels = numpy.nonzero(combined_alpha)

    for band_no in range(1, c_band_count + 1):
        c_band = gimage.read_single_band(c_ds, band_no)
        r_band = gimage.read_single_band(r_ds, band_no)
        file_name = '{}_{}.png'.format(base_name, band_no)
        display.plot_pixels(file_name, c_band[valid_pixels],
                            r_band[valid_pixels], limits) 
Example 19
Project: radiometric_normalization   Author: planetlabs   File: display_wrapper.py    Apache License 2.0 6 votes vote down vote up
def create_all_bands_histograms(candidate_path, reference_path, base_name,
                                last_band_alpha=False,
                                color_order=['b', 'g', 'r', 'y'],
                                x_limits=None, y_limits=None):
    c_gimg = gimage.load(candidate_path, last_band_alpha=last_band_alpha)
    r_gimg = gimage.load(reference_path, last_band_alpha=last_band_alpha)

    gimage.check_comparable([c_gimg, r_gimg])

    combined_alpha = numpy.logical_and(c_gimg.alpha, r_gimg.alpha)
    valid_pixels = numpy.nonzero(combined_alpha)

    file_name = '{}_histograms.png'.format(base_name)
    display.plot_histograms(
        file_name,
        [c_band[valid_pixels] for c_band in c_gimg.bands],
        [r_band[valid_pixels] for r_band in r_gimg.bands],
        color_order, x_limits, y_limits) 
Example 20
Project: DRCOG_Urbansim   Author: apdjustino   File: join_attribute_modification_model.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def run(self, dataset, secondary_dataset, index=None, attribute_to_be_modified=None, value=0, filter=None, dataset_pool=None):
        """
        'dataset' must contain an attribute of the same name as the id attribute of the secondary_dataset (join_attribute).
        The model finds members of 'dataset' for which the values of the join_attribute correspond to values of that attribute 
        of the secondary_dataset (possibly restricted by the 'index' and/or 'filter' which is an expression). For all those members
        is the attribute 'attribute_to_be_modified' changed to 'value'. If 'attribute_to_be_modified' is not given,
        the 'join_attribute' is modified. 
        """
        
        if index is None:
            index = arange(secondary_dataset.size())
        if filter is not None:
            members_to_consider = zeros(secondary_dataset.size(), dtype='bool8')
            members_to_consider[index] = True
            pass_filter = secondary_dataset.compute_variables([filter], dataset_pool=dataset_pool) > 0
            members_to_consider = logical_and(members_to_consider, pass_filter)
            index = where(members_to_consider)[0]
        ids = secondary_dataset.get_id_attribute()[index]
        join_attribute = secondary_dataset.get_id_name()[0]
        members_idx = where(ismember(dataset.get_attribute(join_attribute), ids))[0]
        if attribute_to_be_modified is None:
            attribute_to_be_modified = join_attribute
        dataset.modify_attribute(name=attribute_to_be_modified, data=array(index.size*[value]), index=members_idx)
        logger.log_status("%s values of %s.%s are set to %s." % (members_idx.size, dataset.get_dataset_name(), attribute_to_be_modified, value)) 
Example 21
Project: DRCOG_Urbansim   Author: apdjustino   File: misc.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def remove_elements_with_matched_prefix_from_list(valuelist, prefixlist):
    """Remove all occurences of elements with one of the prefix in the prefixlist
       from valuelist.
    """
    from numpy import array, reshape, compress, apply_along_axis, logical_and

    def match_prefix(prefix, arr):
        def match_single_element(item, prefix):
            q = re.match('^'+prefix, item[0])
            if q:
                return 0
            return 1
        t = apply_along_axis(match_single_element, 1, arr.reshape(arr.size,1), prefix[0])
        return t

    result = valuelist
    pl = array(prefixlist)
    m = apply_along_axis(match_prefix, 1, pl.reshape(pl.size,1), array(valuelist))
    l = logical_and(m[0,:], m[1,:])
    return compress(l, result) 
Example 22
Project: DRCOG_Urbansim   Author: apdjustino   File: abstract_dataset.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def get_filtered_index(self, filter, threshold=0, index=None, dataset_pool=None, resources=None):
        """Return only those indices of index that pass through the given filter. Filter can be an expression/variable,
        it is computed within this method."""
        self.compute_variables([filter], dataset_pool=dataset_pool, resources=resources)
        name = VariableName(filter)
        filtered_index = self.get_index_where_variable_larger_than_threshold(
                name.get_short_name(), threshold=threshold)
        new_index = zeros((self.size(),), dtype=int32)
        if index == None:
            index = arange(self.size())
        if not isinstance(index,ndarray):
            index=array(index)
        new_index[index]=1
        new_index_tmp = zeros((self.size(),), dtype=int32)
        new_index_tmp[filtered_index]=1
        new_index = logical_and(new_index, new_index_tmp)
        return where(new_index)[0]
    
    ##################################################################################
    ## Methods for joining two datasets
    ################################################################################## 
Example 23
Project: nonogram-solver   Author: mprat   File: solver.py    MIT License 5 votes vote down vote up
def possibilities_generator(
        prior, min_pos, max_start_pos, constraint_len, total_filled):
    """
    Given a row prior, a min_pos, max_start_pos, and constraint length,
    yield each potential row

    prior is an array of:
        -1 (unknown),
        0 (definitely empty),
        1 (definitely filled)
    """
    prior_filled = np.zeros(len(prior)).astype(bool)
    prior_filled[prior == 1] = True
    prior_empty = np.zeros(len(prior)).astype(bool)
    prior_empty[prior == 0] = True
    for start_pos in range(min_pos, max_start_pos + 1):
        possible = -1 * np.ones(len(prior))
        possible[start_pos:start_pos + constraint_len] = 1
        if start_pos + constraint_len < len(possible):
            possible[start_pos + constraint_len] = 0
        if start_pos > 0:
            possible[start_pos - 1] = 0

        # add in the prior
        possible[np.logical_and(possible == -1, prior == 0)] = 0
        possible[np.logical_and(possible == -1, prior == 1)] = 1

        # if contradiction with prior, continue
        # 1. possible changes prior = 1 to something else
        # 2. possible changes prior = 0 to something else
        # 3. everything is assigned in possible but there are not
        #    enough filled in
        # 4. possible changes nothing about the prior
        if np.any(possible[np.where(prior == 1)[0]] != 1) or \
                np.any(possible[np.where(prior == 0)[0]] != 0) or \
                np.sum(possible == 1) > total_filled or \
                (np.all(possible >= 0) and np.sum(possible == 1) <
                    total_filled) or \
                np.all(prior == possible):
            continue
        yield possible 
Example 24
Project: ddd-utils   Author: inconvergent   File: random.py    MIT License 5 votes vote down vote up
def random_points_in_rectangle(n, xx, yy, w, h):

  rnd = random((n,2))
  height_mask = logical_and(rnd[:,1]>yy-h*0.5, rnd[:,1]<yy+h*0.5)
  width_mask = logical_and(rnd[:,0]>xx-w*0.5, rnd[:,0]<xx+w*0.5)
  mask = logical_and(height_mask, width_mask)
  return rnd[mask,:] 
Example 25
Project: mmdetection   Author: open-mmlab   File: iou_balanced_neg_sampler.py    Apache License 2.0 5 votes vote down vote up
def sample_via_interval(self, max_overlaps, full_set, num_expected):
        max_iou = max_overlaps.max()
        iou_interval = (max_iou - self.floor_thr) / self.num_bins
        per_num_expected = int(num_expected / self.num_bins)

        sampled_inds = []
        for i in range(self.num_bins):
            start_iou = self.floor_thr + i * iou_interval
            end_iou = self.floor_thr + (i + 1) * iou_interval
            tmp_set = set(
                np.where(
                    np.logical_and(max_overlaps >= start_iou,
                                   max_overlaps < end_iou))[0])
            tmp_inds = list(tmp_set & full_set)
            if len(tmp_inds) > per_num_expected:
                tmp_sampled_set = self.random_choice(tmp_inds,
                                                     per_num_expected)
            else:
                tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
            sampled_inds.append(tmp_sampled_set)

        sampled_inds = np.concatenate(sampled_inds)
        if len(sampled_inds) < num_expected:
            num_extra = num_expected - len(sampled_inds)
            extra_inds = np.array(list(full_set - set(sampled_inds)))
            if len(extra_inds) > num_extra:
                extra_inds = self.random_choice(extra_inds, num_extra)
            sampled_inds = np.concatenate([sampled_inds, extra_inds])

        return sampled_inds 
Example 26
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: detection.py    Apache License 2.0 5 votes vote down vote up
def _check_valid_label(self, label):
        """Validate label and its shape."""
        if len(label.shape) != 2 or label.shape[1] < 5:
            msg = "Label with shape (1+, 5+) required, %s received." % str(label)
            raise RuntimeError(msg)
        valid_label = np.where(np.logical_and(label[:, 0] >= 0, label[:, 3] > label[:, 1],
                                              label[:, 4] > label[:, 2]))[0]
        if valid_label.size < 1:
            raise RuntimeError('Invalid label occurs.') 
Example 27
Project: DOTA_models   Author: ringringyi   File: graph_utils.py    Apache License 2.0 5 votes vote down vote up
def convert_traversible_to_graph(traversible, ff_cost=1., fo_cost=1.,
                                 oo_cost=1., connectivity=4):
  assert(connectivity == 4 or connectivity == 8)

  sz_x = traversible.shape[1]
  sz_y = traversible.shape[0]
  g, nodes = generate_lattice(sz_x, sz_y)

  # Assign costs.
  edge_wts = g.new_edge_property('float')
  g.edge_properties['wts'] = edge_wts
  wts = np.ones(g.num_edges(), dtype=np.float32)
  edge_wts.get_array()[:] = wts

  if connectivity == 8:
    add_diagonal_edges(g, nodes, sz_x, sz_y, np.sqrt(2.))

  se = np.array([[int(e.source()), int(e.target())] for e in g.edges()])
  s_xy = nodes[se[:,0]]
  t_xy = nodes[se[:,1]]
  s_t = np.ravel_multi_index((s_xy[:,1], s_xy[:,0]), traversible.shape)
  t_t = np.ravel_multi_index((t_xy[:,1], t_xy[:,0]), traversible.shape)
  s_t = traversible.ravel()[s_t]
  t_t = traversible.ravel()[t_t]

  wts = np.zeros(g.num_edges(), dtype=np.float32)
  wts[np.logical_and(s_t == True, t_t == True)] = ff_cost
  wts[np.logical_and(s_t == False, t_t == False)] = oo_cost
  wts[np.logical_xor(s_t, t_t)] = fo_cost

  edge_wts = g.edge_properties['wts']
  for i, e in enumerate(g.edges()):
    edge_wts[e] = edge_wts[e] * wts[i]
  # d = edge_wts.get_array()*1.
  # edge_wts.get_array()[:] = d*wts 
  return g, nodes 
Example 28
Project: DOTA_models   Author: ringringyi   File: np_box_list_ops.py    Apache License 2.0 5 votes vote down vote up
def _update_valid_indices_by_removing_high_iou_boxes(
    selected_indices, is_index_valid, intersect_over_union, threshold):
  max_iou = np.max(intersect_over_union[:, selected_indices], axis=1)
  return np.logical_and(is_index_valid, max_iou <= threshold) 
Example 29
Project: DOTA_models   Author: ringringyi   File: minibatch_sampler_test.py    Apache License 2.0 5 votes vote down vote up
def test_subsample_indicator_when_more_true_elements_than_num_samples(self):
    np_indicator = [True, False, True, False, True, True, False]
    indicator = tf.constant(np_indicator)
    samples = minibatch_sampler.MinibatchSampler.subsample_indicator(
        indicator, 3)
    with self.test_session() as sess:
      samples_out = sess.run(samples)
      self.assertTrue(np.sum(samples_out), 3)
      self.assertAllEqual(samples_out,
                          np.logical_and(samples_out, np_indicator)) 
Example 30
Project: DOTA_models   Author: ringringyi   File: minibatch_sampler_test.py    Apache License 2.0 5 votes vote down vote up
def test_subsample_when_more_true_elements_than_num_samples_no_shape(self):
    np_indicator = [True, False, True, False, True, True, False]
    indicator = tf.placeholder(tf.bool)
    feed_dict = {indicator: np_indicator}

    samples = minibatch_sampler.MinibatchSampler.subsample_indicator(
        indicator, 3)
    with self.test_session() as sess:
      samples_out = sess.run(samples, feed_dict=feed_dict)
      self.assertTrue(np.sum(samples_out), 3)
      self.assertAllEqual(samples_out,
                          np.logical_and(samples_out, np_indicator)) 
Example 31
Project: DOTA_models   Author: ringringyi   File: minibatch_sampler_test.py    Apache License 2.0 5 votes vote down vote up
def test_subsample_indicator_when_less_true_elements_than_num_samples(self):
    np_indicator = [True, False, True, False, True, True, False]
    indicator = tf.constant(np_indicator)
    samples = minibatch_sampler.MinibatchSampler.subsample_indicator(
        indicator, 5)
    with self.test_session() as sess:
      samples_out = sess.run(samples)
      self.assertTrue(np.sum(samples_out), 4)
      self.assertAllEqual(samples_out,
                          np.logical_and(samples_out, np_indicator)) 
Example 32
Project: Parallel.GAMIT   Author: demiangomez   File: pyStack.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, vertices, project, date, rot=True, aligned=False):

        self.project = project
        self.date = date
        self.aligned = aligned
        self.helmert = None
        self.wrms = None
        self.stations_used = None
        self.iterations = None
        self.rot = rot
        # initialize the vertices of the polyhedron
        # self.vertices = [v for v in vertices if v[5] == date.year and v[6] == date.doy]

        self.vertices = vertices[np.logical_and(vertices['yr'] == date.year, vertices['dd'] == date.doy)]
        # sort using network code station code to make sure that intersect (in align) will get the data in the correct
        # order, otherwise the differences in X Y Z don't make sense...
        self.vertices.sort(order='stn')

        if not self.vertices.size:
            raise ValueError('No polyhedron data found for ' + str(date))

        self.rows = self.vertices.shape[0]

        # create the design matrix for this day
        rx = np.array([np.zeros(self.rows), -self.vertices['z'], self.vertices['y']]).transpose() * 1e-9
        ry = np.array([self.vertices['z'], np.zeros(self.rows), -self.vertices['x']]).transpose() * 1e-9
        rz = np.array([-self.vertices['y'], self.vertices['x'], np.zeros(self.rows)]).transpose() * 1e-9

        tx = np.array([np.ones(self.rows), np.zeros(self.rows), np.zeros(self.rows)]).transpose()
        ty = np.array([np.zeros(self.rows), np.ones(self.rows), np.zeros(self.rows)]).transpose()
        tz = np.array([np.zeros(self.rows), np.zeros(self.rows), np.ones(self.rows)]).transpose()

        if rot:
            self.Ax = np.concatenate((rx, tx), axis=1)
            self.Ay = np.concatenate((ry, ty), axis=1)
            self.Az = np.concatenate((rz, tz), axis=1)
        else:
            self.Ax = tx
            self.Ay = ty
            self.Az = tz 
Example 33
Project: Parallel.GAMIT   Author: demiangomez   File: pyETM1.py    GNU General Public License v3.0 5 votes vote down vote up
def todictionary(self, time_series=False):
        # convert the ETM adjustment into a dirtionary
        # optionally, output the whole time series as well

        # start with the parameters
        etm = dict()
        etm['Network'] = self.NetworkCode
        etm['Station'] = self.StationCode
        etm['lat'] = self.soln.lat[0]
        etm['lon'] = self.soln.lon[0]
        etm['Jumps'] = [to_list(jump.p.toDict()) for jump in self.Jumps.table]

        if self.A is not None:

            etm['Polynomial'] = to_list(self.Linear.p.toDict())

            etm['Periodic'] = to_list(self.Periodic.p.toDict())

            etm['rms'] = {'x': self.factor[0], 'y': self.factor[1], 'z': self.factor[2]}

            etm['xyz_covariance'] = self.covar.tolist()

            etm['neu_covariance'] = self.rotate_sig_cov(covar=self.covar).tolist()

        if time_series:
            ts = dict()
            ts['t'] = self.soln.t.tolist()
            ts['x'] = self.soln.x.tolist()
            ts['y'] = self.soln.y.tolist()
            ts['z'] = self.soln.z.tolist()

            if self.A is not None:
                ts['filter'] = np.logical_and(np.logical_and(self.F[0], self.F[1]), self.F[2]).tolist()
            else:
                ts['filter'] = []

            etm['time_series'] = ts

        return etm 
Example 34
Project: Parallel.GAMIT   Author: demiangomez   File: pyETM.py    GNU General Public License v3.0 5 votes vote down vote up
def save_excluded_soln(self, cnn):

        for date, f, r in zip(self.soln.date, np.logical_and(np.logical_and(self.F[0], self.F[1]), self.F[2]),
                              np.sqrt(np.sum(np.square(self.R), axis=0))):

            if not cnn.query_float('SELECT * FROM gamit_soln_excl WHERE "NetworkCode" = \'%s\' AND '
                                   '"StationCode" = \'%s\' AND "Project" = \'%s\' AND "Year" = %i AND "DOY" = %i'
                                   % (self.NetworkCode, self.StationCode, self.soln.stack_name, date.year, date.doy)) \
                    and not f:

                cnn.query('INSERT INTO gamit_soln_excl ("NetworkCode", "StationCode", "Project", "Year", "DOY", '
                          'residual) VALUES (\'%s\', \'%s\', \'%s\', %i ,%i, %.4f)'
                          % (self.NetworkCode, self.StationCode, self.soln.stack_name, date.year, date.doy, r)) 
Example 35
Project: SLiPy   Author: glentner   File: Spectrum.py    GNU General Public License v2.0 5 votes vote down vote up
def insert(self, other, kind = 'linear'):
        """
        Given a Spectrum, `other`, contained within the wavelength domain
        of `self`, replace all pixels in the overlapping region with that
        of an interpolation built on `other`. `kind` is passed to interp1d.
        """
        if type(other) is not Spectrum:
            raise SpectrumError('Spectrum.insert() expects an argument of '
            'type Spectrum!')

        if other not in self:
            raise SpectrumError('The domain of the `other` spectrum is not '
            'contained within this spectrum!')

        # interpolant of the `other` spectrum
        f = interp1d(other.wave.to(self.wave.unit), other.data, kind = kind)

        self.data = np.hstack((

            # the lhs (not affected)
            self.data[np.where(self.wave < other.wave[0])],

            # the interpolated pixels covered by `other`
            f(self.wave[np.where(np.logical_and(self.wave >= other.wave[0],
                self.wave <= other.wave[-1]))]) * self.data.unit,

            # the rhs (not affected)
            self.data[np.where(self.wave > other.wave[-1])]

        # the units get messed up, so we have to re-initialize them
        )).value * self.data.unit 
Example 36
Project: SLiPy   Author: glentner   File: Spectrum.py    GNU General Public License v2.0 5 votes vote down vote up
def __and__(self, other):
        """
        Logical and, '&'

        The comparison operations ( >, <, >=, <=, ==, !=, &, ^, | ) are defined
        to return a binary Spectrum of True and False values. The same convention
        applies as above. Either the LHS or RHS must be contained within the other,
        and the LHS is compared on the overlapping regions. Data outside this
        overlapping region is returned as False.
        """
        if type(other) is Spectrum:

            if ((self.wave.unit == u.pixel or other.wave.unit == u.pixel) and
                len(self) != len(other) ):
                raise SpectrumError('Spectrum comparisons can only work with '
                'pixel units when both wavelength arrays have pixel '
                'units and are of the same length!')

            # resample the data and return `new` spectrum objects
            lhs, rhs = self.__new_pair(other)
            lhs.data = np.logical_and(lhs.data.value,
                rhs.data.to(lhs.data.unit).value) * u.dimensionless_unscaled

            result = self.copy()

            # assume false
            result.data = np.zeros(len(self)) * u.dimensionless_unscaled
            result.insert(lhs)
            return result

        # otherwise, assume we have a scalar (be careful)
        result = self.copy()

        # if no units are detected, implicitely assume data units
        if not hasattr(other, 'unit'): other *= result.data.unit

        result.data = np.logical_and(result.data.value,
            other.to(result.data.unit).value) * u.dimensionless_unscaled
        return result 
Example 37
Project: Old-school-processing   Author: cianfrocco-lab   File: imagefilter.py    MIT License 5 votes vote down vote up
def _gradient(cs_shape,zeroradius):
	oneradius = min(cs_shape[0]/2.0,cs_shape[1]/2.0)
	a = numpy.indices(cs_shape)
	cut = zeroradius/float(oneradius)
	radii = numpy.hypot(a[0,:]-(cs_shape[0]/2.0-0.5),a[1,:]-(cs_shape[1]/2.0-0.5))/oneradius	
	def _grad(r):
		return (r-cut)/(1-cut)
	g = numpy.piecewise(radii,[radii < cut,numpy.logical_and(radii < 1, radii >=cut),
         radii>=1-cut],[0,_grad,1])
	return g

#========================= 
Example 38
Project: Deformable-ConvNets   Author: guanfuchen   File: mask_transform.py    MIT License 5 votes vote down vote up
def mask_overlap(box1, box2, mask1, mask2):
    """
    This function calculate region IOU when masks are
    inside different boxes
    Returns:
        intersection over unions of this two masks
    """
    x1 = max(box1[0], box2[0])
    y1 = max(box1[1], box2[1])
    x2 = min(box1[2], box2[2])
    y2 = min(box1[3], box2[3])
    if x1 > x2 or y1 > y2:
        return 0
    w = x2 - x1 + 1
    h = y2 - y1 + 1
    # get masks in the intersection part
    start_ya = y1 - box1[1]
    start_xa = x1 - box1[0]
    inter_maska = mask1[start_ya: start_ya + h, start_xa:start_xa + w]

    start_yb = y1 - box2[1]
    start_xb = x1 - box2[0]
    inter_maskb = mask2[start_yb: start_yb + h, start_xb:start_xb + w]

    assert inter_maska.shape == inter_maskb.shape

    inter = np.logical_and(inter_maskb, inter_maska).sum()
    union = mask1.sum() + mask2.sum() - inter
    if union < 1.0:
        return 0
    return float(inter) / float(union) 
Example 39
Project: where   Author: kartverket   File: vtrf.py    MIT License 5 votes vote down vote up
def _calculate_pos_itrs(self, site):
        """Calculate positions for the given time epochs

        The positions are calculated as simple linear offsets based on the reference epoch.

        Args:
            site (String):    Key saying which site to calculate position for.

        Returns:
            Array:  Positions, one 3-vector for each time epoch.
        """
        station_info = self.data[site]
        ref_epoch = Time(station_info["ref_epoch"], scale="utc")

        pos = np.zeros((self.time.size, 3))
        for pv in station_info["pos_vel"].values():
            idx = np.logical_and(self.time.utc.datetime >= pv["start"], self.time.utc.datetime < pv["end"])
            if idx.size == 1:
                idx = np.array([idx])
            if not any(idx):
                continue
            ref_pos = np.array([pv["STAX"], pv["STAY"], pv["STAZ"]])
            ref_vel = np.array([pv["VELX"], pv["VELY"], pv["VELZ"]])
            interval_years = (self.time - ref_epoch).jd * Unit.day2julian_years
            if isinstance(interval_years, float):
                interval_years = np.array([interval_years])
            pos[idx, :] = ref_pos + interval_years[idx, None] * ref_vel[None, :]

        if self.time.size == 1:
            pos = pos[0, :]
        return pos 
Example 40
Project: where   Author: kartverket   File: slrf.py    MIT License 5 votes vote down vote up
def _calculate_pos_itrs(self, site):
        """Calculate positions for the given time epochs

        The positions are calculated as simple linear offsets based on the reference epoch.

        Args:
            site (String):    Key saying which site to calculate position for.

        Returns:
            Array:  Positions, one 3-vector for each time epoch.
        """
        station_info = self.data[site]
        ref_epoch = Time(station_info["ref_epoch"], scale="utc")

        pos = np.zeros((self.time.size, 3))
        for pv in station_info["pos_vel"].values():
            idx = np.logical_and(self.time.utc.datetime >= pv["start"], self.time.utc.datetime < pv["end"])
            if idx.size == 1:
                idx = np.array([idx])
            if not any(idx):
                continue
            ref_pos = np.array([pv["STAX"], pv["STAY"], pv["STAZ"]])
            ref_vel = np.array([pv["VELX"], pv["VELY"], pv["VELZ"]])
            interval_years = (self.time - ref_epoch).jd * Unit.day2julian_years
            if isinstance(interval_years, float):
                interval_years = np.array([interval_years])
            pos[idx, :] = ref_pos + interval_years[idx, None] * ref_vel[None, :]

        if self.time.size == 1:
            pos = pos[0, :]
        return pos 
Example 41
Project: where   Author: kartverket   File: dataset.py    MIT License 5 votes vote down vote up
def filter(self, idx=None, **filters):
        """Filter observations

        Example:
            > dset.filter(station='NYALES20')
            array([True, True, False, ..., False, False, True], dtype=bool)

        Args:
            idx:      Optional, index with already filtered observations
            filters:  Key-value pairs representing field names and values.

        Returns:
            Array of booleans of shape (num_obs, ).
        """
        if idx is None:
            idx = np.ones(self.num_obs, dtype=bool)

        for field, filter_value in filters.items():
            if field in self._fields:
                table = self._fields[field]
                field_idx = self._data[table].filter_field(field, filter_value)
            elif self.default_field_suffix and field + self.default_field_suffix in self._fields:
                table = self._fields[field + self.default_field_suffix]
                field_idx = self._data[table].filter_field(field + self.default_field_suffix, filter_value)
            else:
                field_idx = np.zeros(self.num_obs, dtype=bool)
                for or_field in [f for f in self._fields if f.startswith(field + "_")]:
                    table = self._fields[or_field]
                    field_idx = np.logical_or(field_idx, self._data[table].filter_field(or_field, filter_value))
            idx = np.logical_and(idx, field_idx)
        return idx 
Example 42
Project: where   Author: kartverket   File: vlbi_netcdf.py    MIT License 5 votes vote down vote up
def _get_data(self, variable):
        variable.set_auto_mask(False)
        if variable.dtype == "S1":
            try:
                values = np.core.defchararray.strip(netCDF4.chartostring(variable[:]))
            except (UnicodeDecodeError, ValueError):
                # TODO: only happened with fields we are ignoring anyway so far
                values = np.core.defchararray.strip(netCDF4.chartostring(variable[:], encoding="bytes"))
        else:
            values = variable[:]

        if hasattr(variable, "REPEAT"):
            if values.ndim < 2:
                values = np.tile(values, variable.REPEAT)
            else:
                values = np.tile(values, variable.REPEAT).reshape(variable.REPEAT, -1)

        if "YMDHM" in variable.name:
            # Make sure year has 4 digits
            idx_add1900 = np.logical_and((values[:, 0] >= 50), (values[:, 0] < 100))
            idx_add2000 = values[:, 0] < 50
            if idx_add1900.any() or idx_add2000.any():
                values[:, 0][idx_add1900] += 1900
                values[:, 0][idx_add2000] += 2000

        return values 
Example 43
Project: where   Author: kartverket   File: gnss_obs_zero.py    MIT License 5 votes vote down vote up
def gnss_obs_zero(dset):
    """Edits data based on data quality

    NOTE: This only a workaround and works only if observations from one GNSS are used. If several GNSSs are used then
          it can happen that due to the fact that some observation type are only given for a certain GNSS, that all
          observations are deleted.

    Args:
        dset:     A Dataset containing model data.

    Returns:
        Numpy array:  False for observations to throw away.
    """
    session = dset.dataset_name
    edit_dset = np.full(dset.num_obs, True, dtype=bool)

    # Loop over GNSSs and observation types
    for sys in dset.meta["obstypes"]:
        for obstype in dset.meta["obstypes"][sys]:

            # Exclude Doppler and SNR observations
            if (obstype not in dset.fields) or (obstype[0] in "DS"):
                continue

            # Remove observations with (close to) zero value
            edit_dset = np.logical_and(edit_dset, dset[obstype] > 10000)

    return edit_dset 
Example 44
Project: object_detector_app   Author: datitran   File: per_image_evaluation.py    MIT License 5 votes vote down vote up
def _remove_invalid_boxes(self, detected_boxes, detected_scores,
                            detected_class_labels):
    valid_indices = np.logical_and(detected_boxes[:, 0] < detected_boxes[:, 2],
                                   detected_boxes[:, 1] < detected_boxes[:, 3])
    return (detected_boxes[valid_indices, :], detected_scores[valid_indices],
            detected_class_labels[valid_indices]) 
Example 45
Project: object_detector_app   Author: datitran   File: np_box_list_ops.py    MIT License 5 votes vote down vote up
def _update_valid_indices_by_removing_high_iou_boxes(
    selected_indices, is_index_valid, intersect_over_union, threshold):
  max_iou = np.max(intersect_over_union[:, selected_indices], axis=1)
  return np.logical_and(is_index_valid, max_iou <= threshold) 
Example 46
Project: object_detector_app   Author: datitran   File: minibatch_sampler_test.py    MIT License 5 votes vote down vote up
def test_subsample_indicator_when_more_true_elements_than_num_samples(self):
    np_indicator = [True, False, True, False, True, True, False]
    indicator = tf.constant(np_indicator)
    samples = minibatch_sampler.MinibatchSampler.subsample_indicator(
        indicator, 3)
    with self.test_session() as sess:
      samples_out = sess.run(samples)
      self.assertTrue(np.sum(samples_out), 3)
      self.assertAllEqual(samples_out,
                          np.logical_and(samples_out, np_indicator)) 
Example 47
Project: object_detector_app   Author: datitran   File: minibatch_sampler_test.py    MIT License 5 votes vote down vote up
def test_subsample_when_more_true_elements_than_num_samples_no_shape(self):
    np_indicator = [True, False, True, False, True, True, False]
    indicator = tf.placeholder(tf.bool)
    feed_dict = {indicator: np_indicator}

    samples = minibatch_sampler.MinibatchSampler.subsample_indicator(
        indicator, 3)
    with self.test_session() as sess:
      samples_out = sess.run(samples, feed_dict=feed_dict)
      self.assertTrue(np.sum(samples_out), 3)
      self.assertAllEqual(samples_out,
                          np.logical_and(samples_out, np_indicator)) 
Example 48
Project: object_detector_app   Author: datitran   File: minibatch_sampler_test.py    MIT License 5 votes vote down vote up
def test_subsample_indicator_when_less_true_elements_than_num_samples(self):
    np_indicator = [True, False, True, False, True, True, False]
    indicator = tf.constant(np_indicator)
    samples = minibatch_sampler.MinibatchSampler.subsample_indicator(
        indicator, 5)
    with self.test_session() as sess:
      samples_out = sess.run(samples)
      self.assertTrue(np.sum(samples_out), 4)
      self.assertAllEqual(samples_out,
                          np.logical_and(samples_out, np_indicator)) 
Example 49
Project: scanorama   Author: brianhie   File: utils.py    MIT License 5 votes vote down vote up
def visualize_expr(X, coords, genes, viz_gene, image_suffix='.svg',
                   new_fig=True, size=1, viz_prefix='ve'):
    genes = [ gene.upper() for gene in genes ]
    viz_gene = viz_gene.upper()
    
    if not viz_gene.upper() in genes:
        sys.stderr.write('Warning: Could not find gene {}\n'.format(viz_gene))
        return
    
    image_fname = '{}_{}{}'.format(
        viz_prefix, viz_gene, image_suffix
    )

    # Color based on percentiles.
    x_gene = X[:, list(genes).index(viz_gene)].toarray()
    colors = np.zeros(x_gene.shape)
    n_tiles = 100
    prev_percentile = min(x_gene)
    for i in range(n_tiles):
        q = (i+1) / float(n_tiles) * 100.
        percentile = np.percentile(x_gene, q)
        idx = np.logical_and(prev_percentile <= x_gene,
                             x_gene <= percentile)
        colors[idx] = i
        prev_percentile = percentile

    colors = colors.flatten()

    if new_fig:
        plt.figure()
        plt.title(viz_gene)
    plt.scatter(coords[:, 0], coords[:, 1],
                c=colors, cmap=cm.get_cmap('Reds'), s=size)
    plt.savefig(image_fname, dpi=500) 
Example 50
Project: scanorama   Author: brianhie   File: utils.py    MIT License 5 votes vote down vote up
def visualize_dropout(X, coords, image_suffix='.svg',
                      new_fig=True, size=1, viz_prefix='dropout'):
    image_fname = '{}{}'.format(
        viz_prefix, image_suffix
    )

    # Color based on percentiles.
    x_gene = np.array(np.sum(X != 0, axis=1))
    colors = np.zeros(x_gene.shape)
    n_tiles = 100
    prev_percentile = min(x_gene)
    for i in range(n_tiles):
        q = (i+1) / float(n_tiles) * 100.
        percentile = np.percentile(x_gene, q)
        idx = np.logical_and(prev_percentile <= x_gene,
                             x_gene <= percentile)
        colors[idx] = i
        prev_percentile = percentile

    colors = colors.flatten()

    if new_fig:
        plt.figure()
        plt.title(viz_prefix)
    plt.scatter(coords[:, 0], coords[:, 1],
                c=colors, cmap=cm.get_cmap('Reds'), s=size)
    plt.savefig(image_fname, dpi=500) 
Example 51
Project: NivLink   Author: nivlab   File: moat.py    MIT License 5 votes vote down vote up
def make_screen_idx(n_trials, featmap):
    """Sets screen and AoIs for MOAT experiment.
    
    Parameters
    ----------
    n_trials : int
        Number of trials in the experiment. 

    featmap : array, shape (n_trials, n_aois)
        Key for mapping AoIs to cues.

    Returns
    -------
    screen_idx : array, shape(n_trials, 1)
        Vector defining which AoI configuration present 
        on each trial.  
    """

    screen_idx = np.zeros((n_trials,1))
    empty_count = np.count_nonzero(featmap == 99, axis = 1)  
    idx_aoi1_filled = featmap[:,0] != 99
    idx_aoi2_filled = featmap[:,1] != 99

    idx_screen_1 = empty_count == 4
    idx_screen_2 = np.logical_and(empty_count == 3, idx_aoi2_filled)
    idx_screen_3 = np.logical_and(empty_count == 3, idx_aoi1_filled)
    idx_screen_4 = empty_count == 2

    screen_idx[idx_screen_1] = 1
    screen_idx[idx_screen_2] = 2
    screen_idx[idx_screen_3] = 3
    screen_idx[idx_screen_4] = 4

    return screen_idx 
Example 52
Project: NivLink   Author: nivlab   File: epochs.py    MIT License 5 votes vote down vote up
def _align_artifacts(self, artifacts, raw_ix):
        """Re-aligns artifacts (blinks, saccades) from raw to epochs times.
        
        Parameters
        ----------
        artifacts : array, shape=(n_artifacts, 2)
            Blinks or saccades index array from Raw object.
        raw_ix : array, shape=(n_events, 2)
            Events index array computed in Epoching.
            
        Returns
        -------
        artifacts : array, shape=(n_artifacts, 3)
            Artifacts (blinks, saccades) overlapping with events. First column 
            denotes event number, second column denotes artifact onset, and
            third column denotes artifact offset.
        """
            
        ## Broadcast trial onsets/offsets to number of blinks.
        n_events, _, _, n_times = self.data.shape
        onsets  = np.broadcast_to(raw_ix[:,0], (artifacts.shape[0], n_events)).T
        offsets = np.broadcast_to(raw_ix[:,1], (artifacts.shape[0], n_events)).T

        ## Identify where blinks overlap with trials.
        overlap = np.logical_and(onsets < artifacts[:,1], offsets > artifacts[:,0])
        trials, ix = np.where(overlap)

        ## Assemble blinks data. Realign to epoch times.
        artifacts = np.column_stack([trials, artifacts[ix]])
        for i in np.unique(artifacts[:,0]):
            artifacts[artifacts[:,0]==i,1:] -= int(raw_ix[i,0])
            
        ## Boundary correction.
        artifacts = np.where(artifacts < 0, 0, artifacts)
        artifacts = np.where(artifacts > n_times, n_times, artifacts)
        
        return artifacts 
Example 53
Project: DualFisheye   Author: ooterness   File: fisheye.py    MIT License 5 votes vote down vote up
def get_mask(self, uv_px):
        # Check whether each coordinate is within outer image bounds,
        # and within the illuminated area under the fisheye lens.
        x_mask = np.logical_and(0 <= uv_px[0], uv_px[0] < self.cols)
        y_mask = np.logical_and(0 <= uv_px[1], uv_px[1] < self.rows)
        # Check whether each coordinate is within the illuminated area.
        r_mask = matrix_len(uv_px - self.lens.center_px) < self.lens.radius_px
        # All three checks must pass to be considered visible.
        all_mask = np.logical_and(r_mask, np.logical_and(x_mask, y_mask))
        return np.squeeze(np.asarray(all_mask))

    # Given an 2xN array of UV pixel coordinates, return a weight score
    # that is proportional to the distance from the edge. 
Example 54
Project: DualFisheye   Author: ooterness   File: fisheye.py    MIT License 5 votes vote down vote up
def _score(self, svec, xyz, wt_pixel, wt_blank):
        # Update lens parameters from state vector.
        self._set_state_vector(svec)
        # Determine masks for each input image.
        uv0 = self.sources[0].get_uv(xyz)
        uv1 = self.sources[1].get_uv(xyz)
        wt0 = self.sources[0].get_weight(uv0) > 0
        wt1 = self.sources[1].get_weight(uv1) > 0
        # Count overlapping pixels.
        ovr_mask = np.logical_and(wt0, wt1)             # Overlapping pixel
        pix_count = np.sum(wt0) + np.sum(wt1)           # Total drawn pixels
        blk_count = np.sum(np.logical_and(~wt0, ~wt1))  # Number of blank pixels
        # Allocate Nx3 or Nx1 "1D" pixel-list (raster-order).
        pcount = max(xyz.shape)
        img1d = np.zeros((pcount, self.clrs), dtype='float32')
        # Render the difference image, overlapping region only.
        self.sources[0].add_pixels(uv0, img1d, 1.0*ovr_mask)
        self.sources[1].add_pixels(uv1, img1d, -1.0*ovr_mask)
        # Sum-of-differences.
        sum_sqd = np.sum(np.sum(np.sum(np.square(img1d))))
        # Compute overall score.  (Note: Higher = Better)
        score = sum_sqd + wt_blank * blk_count - wt_pixel * pix_count
        # (Debug) Print status information.
        if (self.debug):
            print str(svec) + ' --> ' + str(score)
        return score


# Tkinter GUI window for loading a fisheye image. 
Example 55
Project: radiometric_normalization   Author: planetlabs   File: pca_filter.py    Apache License 2.0 5 votes vote down vote up
def _pca_filter_single_band(pca, cand_valid, ref_valid, threshold):
    ''' Uses SciKit Learn PCA module to transform the data and filter
    '''
    major_pca_values = _pca_transform_get_only_major_values(
        pca, cand_valid, ref_valid)

    # Filter
    pixels_pass_filter = numpy.logical_and(
        major_pca_values >= (threshold * -1), major_pca_values <= threshold)

    return pixels_pass_filter 
Example 56
Project: insightface   Author: deepinsight   File: assign_levels.py    MIT License 5 votes vote down vote up
def compute_assign_targets(rois, threshold):
    rois_area = np.sqrt((rois[:, 2] - rois[:, 0] + 1) * (rois[:, 3] - rois[:, 1] + 1))
    num_rois = np.shape(rois)[0]
    assign_levels = np.zeros(num_rois, dtype=np.uint8)
    for i, stride in enumerate(config.RCNN_FEAT_STRIDE):
        thd = threshold[i]
        idx = np.logical_and(thd[1] <= rois_area, rois_area < thd[0])
        assign_levels[idx] = stride

    assert 0 not in assign_levels, "All rois should assign to specify levels."
    return assign_levels 
Example 57
Project: DRCOG_Urbansim   Author: apdjustino   File: equation_specification.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def get_coefficient_fixed_values_for_submodel(self, submodel):
        """Return a tuple with two arrays: first one is an array of coefficient names that have fixed values (i.e. <> 0).
        The second array are the corresponding fixed values. The fixed values are considered for given submodel."""
        fixed_values = self.get_fixed_values()
        if fixed_values.size == 0:
            return (array([]), array([]))
        if self.get_submodels().size > 0:
            idx = self.get_submodels() == submodel
        else: 
            idx = ones(fixed_values.size, dtype="bool8")
        idx = logical_and(idx, fixed_values <> 0)
        return (self.get_coefficient_names()[idx], fixed_values[idx]) 
Example 58
Project: StanShock   Author: IhmeGroup   File: stanShock.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def __call__(self,Re):
        cf = np.zeros_like(Re)
        laminarIndices = np.logical_and(Re>0.0, Re <= self.ReCrit)
        cf[laminarIndices]=16.0/Re[laminarIndices]
        turbulentIndices = Re> self.ReCrit
        cf[turbulentIndices] = np.interp(Re[turbulentIndices],self.ReTable,self.cfTable)
        if np.any(Re>self.ReMax): raise Exception("Error: Reynolds number exceeds the maximum value of %f: skinFriction Table bounds must be adjusted" % (self.ReMax))
        return cf
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 
Example 59
Project: TGC-Designer-Tools   Author: chadrockey   File: infill_image.py    Apache License 2.0 5 votes vote down vote up
def get_binary_mask(cv2_mask):
    if cv2_mask is None:
        return None, None

    # The two reds in MS Paint are
    # Bright Red: 236, 28, 36
    # Dark Red: 136, 0, 27
    # Try to support both here
    red_pixels = np.logical_and(np.logical_and(cv2_mask[:,:,0] < 40, cv2_mask[:,:,1] < 40), cv2_mask[:,:,2] > 130)

    # The blues in Paint3d are:
    # Indigo: 62, 73, 204
    # Turquoise: 0, 168, 243
    blue_pixels = np.logical_and(np.logical_and(cv2_mask[:,:,0] > 200, cv2_mask[:,:,1] < 180), cv2_mask[:,:,2] < 70)

    # Don't infill areas painted in Red
    # Red turns to black, otherwise, white.  Black will not be infilled or sent in the output image
    remove_mask = (255.0*np.ones((cv2_mask.shape[0], cv2_mask.shape[1], 1))).astype('uint8')
    remove_mask[red_pixels] = 0

    # Preserve original terrain for areas marked in Blue
    preserve_mask = (np.zeros((cv2_mask.shape[0], cv2_mask.shape[1], 1))).astype('uint8')
    preserve_mask[blue_pixels] = 255

    return remove_mask, preserve_mask

# Uses scipy griddata to interpolate and "recompute" the terrain data based on only the valid image points
# Seems to produce a smoother and more natural result
# Also allows us to sample in arbitrary sizes 
Example 60
Project: prediction-constrained-topic-models   Author: dtak   File: select_best_runs_and_snapshots.py    MIT License 4 votes vote down vote up
def simplify_best_df_and_make_unicode_friendly(
        best_df,
        replacements={'WEIGHT_Y':'λ', '==':'=', "'":""},
        replacements_ascii={'λ':'WEIGHT_Y', '=':''},
        at_best_keys=[
            'LOGPDF_X_PERTOK_AT_BEST_SNAPSHOT',
            'LOGPDF_Y_PERDOC_AT_BEST_SNAPSHOT'],
        ):
    ''' Update legend names to be shorter/unicode friendly.

    Also adds _AT_BEST_SNAPSHOT fields
    '''
    legcolid = best_df.columns.tolist().index('LEGEND_NAME')
    best_df["LEGEND_NAME"] = best_df["LEGEND_NAME"].apply(lambda x: unicode(x))
    best_df["LEGEND_NAME_ASCII"] = best_df["LEGEND_NAME"].apply(lambda x: unicode(x))
    for row_id in range(best_df.shape[0]):
        leg_str = best_df.iloc[row_id, legcolid]
        for before, after in replacements.items():
            leg_str = leg_str.replace(before, after)
        leg_str = ' '.join(leg_str.split())
        best_df.iloc[row_id, legcolid] = leg_str

        # Now make ascii-safe version of each name
        leg_str_ascii = leg_str
        for before, after in replacements_ascii.items():
            leg_str_ascii = leg_str_ascii.replace(before, after)
        best_df.loc[row_id, 'LEGEND_NAME_ASCII'] = (
            ' '.join(leg_str_ascii.decode('ascii').split())).replace(' ', '_')
        
    at_best_row_mask = best_df.IS_BEST_SNAPSHOT.values > 0
    for leg in np.unique(best_df['_UNIQUE_LEGEND_NAME'].values):
        for split in np.unique(best_df['SPLIT_NAME'].values):
            leg_split_row_mask = np.logical_and(
                best_df._UNIQUE_LEGEND_NAME.values == leg,
                best_df.SPLIT_NAME.values == split)
            best_leg_split_row_mask = np.logical_and(
                at_best_row_mask, leg_split_row_mask)

            assert np.sum(best_leg_split_row_mask) == 1
            assert np.sum(best_leg_split_row_mask) < np.sum(leg_split_row_mask)
            for at_best_key in at_best_keys:
                target_key = at_best_key.replace('_AT_BEST_SNAPSHOT', '')
                best_leg_split_row_id = np.flatnonzero(best_leg_split_row_mask)[0]
                val_at_best = best_df[target_key].values[best_leg_split_row_id]
                best_df.loc[leg_split_row_mask, at_best_key] = val_at_best

    # Verify all row indices are unique
    assert best_df.shape[0] == np.unique(best_df.index.values).size

    return best_df 
Example 61
Project: mmdetection   Author: open-mmlab   File: iou_balanced_neg_sampler.py    Apache License 2.0 4 votes vote down vote up
def _sample_neg(self, assign_result, num_expected, **kwargs):
        neg_inds = torch.nonzero(assign_result.gt_inds == 0)
        if neg_inds.numel() != 0:
            neg_inds = neg_inds.squeeze(1)
        if len(neg_inds) <= num_expected:
            return neg_inds
        else:
            max_overlaps = assign_result.max_overlaps.cpu().numpy()
            # balance sampling for negative samples
            neg_set = set(neg_inds.cpu().numpy())

            if self.floor_thr > 0:
                floor_set = set(
                    np.where(
                        np.logical_and(max_overlaps >= 0,
                                       max_overlaps < self.floor_thr))[0])
                iou_sampling_set = set(
                    np.where(max_overlaps >= self.floor_thr)[0])
            elif self.floor_thr == 0:
                floor_set = set(np.where(max_overlaps == 0)[0])
                iou_sampling_set = set(
                    np.where(max_overlaps > self.floor_thr)[0])
            else:
                floor_set = set()
                iou_sampling_set = set(
                    np.where(max_overlaps > self.floor_thr)[0])
                # for sampling interval calculation
                self.floor_thr = 0

            floor_neg_inds = list(floor_set & neg_set)
            iou_sampling_neg_inds = list(iou_sampling_set & neg_set)
            num_expected_iou_sampling = int(num_expected *
                                            (1 - self.floor_fraction))
            if len(iou_sampling_neg_inds) > num_expected_iou_sampling:
                if self.num_bins >= 2:
                    iou_sampled_inds = self.sample_via_interval(
                        max_overlaps, set(iou_sampling_neg_inds),
                        num_expected_iou_sampling)
                else:
                    iou_sampled_inds = self.random_choice(
                        iou_sampling_neg_inds, num_expected_iou_sampling)
            else:
                iou_sampled_inds = np.array(
                    iou_sampling_neg_inds, dtype=np.int)
            num_expected_floor = num_expected - len(iou_sampled_inds)
            if len(floor_neg_inds) > num_expected_floor:
                sampled_floor_inds = self.random_choice(
                    floor_neg_inds, num_expected_floor)
            else:
                sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
            sampled_inds = np.concatenate(
                (sampled_floor_inds, iou_sampled_inds))
            if len(sampled_inds) < num_expected:
                num_extra = num_expected - len(sampled_inds)
                extra_inds = np.array(list(neg_set - set(sampled_inds)))
                if len(extra_inds) > num_extra:
                    extra_inds = self.random_choice(extra_inds, num_extra)
                sampled_inds = np.concatenate((sampled_inds, extra_inds))
            sampled_inds = torch.from_numpy(sampled_inds).long().to(
                assign_result.gt_inds.device)
            return sampled_inds 
Example 62
Project: fuku-ml   Author: fukuball   File: SupportVectorRegression.py    MIT License 4 votes vote down vote up
def train(self):

        if (self.status != 'init'):
            print("Please load train data and init W first.")
            return self.W

        self.status = 'train'

        original_X = self.train_X[:, 1:]

        K = utility.Kernel.kernel_matrix(self, original_X)

        # P = Q, q = p, G = -A, h = -c

        P = cvxopt.matrix(np.bmat([[K, -K], [-K, K]]))
        q = cvxopt.matrix(np.bmat([self.epsilon - self.train_Y, self.epsilon + self.train_Y]).reshape((-1, 1)))
        G = cvxopt.matrix(np.bmat([[-np.eye(2 * self.data_num)], [np.eye(2 * self.data_num)]]))
        h = cvxopt.matrix(np.bmat([[np.zeros((2 * self.data_num, 1))], [self.C * np.ones((2 * self.data_num, 1))]]))
        # A = cvxopt.matrix(np.append(np.ones(self.data_num), -1 * np.ones(self.data_num)), (1, 2*self.data_num))
        # b = cvxopt.matrix(0.0)
        cvxopt.solvers.options['show_progress'] = False
        solution = cvxopt.solvers.qp(P, q, G, h)

        # Lagrange multipliers
        alpha = np.array(solution['x']).reshape((2, -1))
        self.alpha_upper = alpha[0]
        self.alpha_lower = alpha[1]
        self.beta = self.alpha_upper - self.alpha_lower

        sv = abs(self.beta) > 1e-5
        self.sv_index = np.arange(len(self.beta))[sv]
        self.sv_beta = self.beta[sv]
        self.sv_X = original_X[sv]
        self.sv_Y = self.train_Y[sv]

        free_sv_upper = np.logical_and(self.alpha_upper > 1e-5, self.alpha_upper < self.C)
        self.free_sv_index_upper = np.arange(len(self.alpha_upper))[free_sv_upper]
        self.free_sv_alpha_upper = self.alpha_upper[free_sv_upper]
        self.free_sv_X_upper = original_X[free_sv_upper]
        self.free_sv_Y_upper = self.train_Y[free_sv_upper]

        free_sv_lower = np.logical_and(self.alpha_lower > 1e-5, self.alpha_lower < self.C)
        self.free_sv_index_lower = np.arange(len(self.alpha_lower))[free_sv_lower]
        self.free_sv_alpha_lower = self.alpha_lower[free_sv_lower]
        self.free_sv_X_lower = original_X[free_sv_lower]
        self.free_sv_Y_lower = self.train_Y[free_sv_lower]

        short_b_upper = self.free_sv_Y_upper[0] - np.sum(self.sv_beta * utility.Kernel.kernel_matrix_xX(self, self.free_sv_X_upper[0], self.sv_X)) - self.epsilon
        short_b_lower = self.free_sv_Y_lower[0] - np.sum(self.sv_beta * utility.Kernel.kernel_matrix_xX(self, self.free_sv_X_lower[0], self.sv_X)) + self.epsilon

        self.sv_avg_b = (short_b_upper + short_b_lower) / 2

        return self.W 
Example 63
Project: FCOS_GluonCV   Author: DetectionTeamUCAS   File: bbox.py    Apache License 2.0 4 votes vote down vote up
def crop(bbox, crop_box=None, allow_outside_center=True):
    """Crop bounding boxes according to slice area.

    This method is mainly used with image cropping to ensure bonding boxes fit
    within the cropped image.

    Parameters
    ----------
    bbox : numpy.ndarray
        Numpy.ndarray with shape (N, 4+) where N is the number of bounding boxes.
        The second axis represents attributes of the bounding box.
        Specifically, these are :math:`(x_{min}, y_{min}, x_{max}, y_{max})`,
        we allow additional attributes other than coordinates, which stay intact
        during bounding box transformations.
    crop_box : tuple
        Tuple of length 4. :math:`(x_{min}, y_{min}, width, height)`
    allow_outside_center : bool
        If `False`, remove bounding boxes which have centers outside cropping area.

    Returns
    -------
    numpy.ndarray
        Cropped bounding boxes with shape (M, 4+) where M <= N.
    """
    bbox = bbox.copy()
    if crop_box is None:
        return bbox
    if not len(crop_box) == 4:
        raise ValueError(
            "Invalid crop_box parameter, requires length 4, given {}".format(str(crop_box)))
    if sum([int(c is None) for c in crop_box]) == 4:
        return bbox

    l, t, w, h = crop_box

    left = l if l else 0
    top = t if t else 0
    right = left + (w if w else np.inf)
    bottom = top + (h if h else np.inf)
    crop_bbox = np.array((left, top, right, bottom))

    if allow_outside_center:
        mask = np.ones(bbox.shape[0], dtype=bool)
    else:
        centers = (bbox[:, :2] + bbox[:, 2:4]) / 2
        mask = np.logical_and(crop_bbox[:2] <= centers, centers < crop_bbox[2:]).all(axis=1)

    # transform borders
    bbox[:, :2] = np.maximum(bbox[:, :2], crop_bbox[:2])
    bbox[:, 2:4] = np.minimum(bbox[:, 2:4], crop_bbox[2:4])
    bbox[:, :2] -= crop_bbox[:2]
    bbox[:, 2:4] -= crop_bbox[:2]

    mask = np.logical_and(mask, (bbox[:, :2] < bbox[:, 2:4]).all(axis=1))
    bbox = bbox[mask]
    return bbox 
Example 64
Project: DOTA_models   Author: ringringyi   File: graph_utils.py    Apache License 2.0 4 votes vote down vote up
def rng_next_goal_rejection_sampling(start_node_ids, batch_size, gtG, rng,
                                     max_dist, min_dist, max_dist_to_compute,
                                     sampling_d, target_d,
                                     nodes, n_ori, step_size, bins, M):
  sample_start_nodes = start_node_ids is None
  dists = []; pred_maps = []; end_node_ids = []; start_node_ids_ = [];
  hardnesss = []; gt_dists = [];
  num_nodes = gtG.num_vertices()
  for i in range(batch_size):
    done = False
    while not done:
      if sample_start_nodes:
        start_node_id = rng.choice(num_nodes)
      else:
        start_node_id = start_node_ids[i]

      gt_dist = gt.topology.shortest_distance(
          gt.GraphView(gtG, reversed=False), source=start_node_id, target=None,
          max_dist=max_dist)
      gt_dist = np.array(gt_dist.get_array())
      ind = np.where(np.logical_and(gt_dist <= max_dist, gt_dist >= min_dist))[0]
      ind = rng.permutation(ind)
      gt_dist = gt_dist[ind]*1.
      h_dist = heuristic_fn_vec(nodes[ind, :], nodes[[start_node_id], :],
                                n_ori, step_size)[:,0]
      hardness = 1. - h_dist / gt_dist
      sampled_ind = _rejection_sampling(rng, sampling_d, target_d, bins,
                                        hardness, M)
      if sampled_ind < ind.size:
        # print sampled_ind
        end_node_id = ind[sampled_ind]
        hardness = hardness[sampled_ind]
        gt_dist = gt_dist[sampled_ind]
        done = True

    # Compute distance from end node to all nodes, to return.
    dist, pred_map = gt.topology.shortest_distance(
        gt.GraphView(gtG, reversed=True), source=end_node_id, target=None,
        max_dist=max_dist_to_compute, pred_map=True)
    dist = np.array(dist.get_array())
    pred_map = np.array(pred_map.get_array())

    hardnesss.append(hardness); dists.append(dist); pred_maps.append(pred_map);
    start_node_ids_.append(start_node_id); end_node_ids.append(end_node_id);
    gt_dists.append(gt_dist);
    paths = None
  return start_node_ids_, end_node_ids, dists, pred_maps, paths, hardnesss, gt_dists 
Example 65
Project: DOTA_models   Author: ringringyi   File: graph_utils.py    Apache License 2.0 4 votes vote down vote up
def rng_next_goal(start_node_ids, batch_size, gtG, rng, max_dist,
                  max_dist_to_compute, node_room_ids, nodes=None,
                  compute_path=False, dists_from_start_node=None):
  # Compute the distance field from the starting location, and then pick a
  # destination in another room if possible otherwise anywhere outside this
  # room.
  dists = []; pred_maps = []; paths = []; end_node_ids = [];
  for i in range(batch_size):
    room_id = node_room_ids[start_node_ids[i]]
    # Compute distances.
    if dists_from_start_node == None:
      dist, pred_map = gt.topology.shortest_distance(
        gt.GraphView(gtG, reversed=False), source=gtG.vertex(start_node_ids[i]),
        target=None, max_dist=max_dist_to_compute, pred_map=True)
      dist = np.array(dist.get_array())
    else:
      dist = dists_from_start_node[i]

    # Randomly sample nodes which are within max_dist.
    near_ids = dist <= max_dist
    near_ids = near_ids[:, np.newaxis]
    # Check to see if there is a non-negative node which is close enough.
    non_same_room_ids = node_room_ids != room_id
    non_hallway_ids = node_room_ids != -1
    good1_ids = np.logical_and(near_ids, np.logical_and(non_same_room_ids, non_hallway_ids))
    good2_ids = np.logical_and(near_ids, non_hallway_ids)
    good3_ids = near_ids
    if np.any(good1_ids):
      end_node_id = rng.choice(np.where(good1_ids)[0])
    elif np.any(good2_ids):
      end_node_id = rng.choice(np.where(good2_ids)[0])
    elif np.any(good3_ids):
      end_node_id = rng.choice(np.where(good3_ids)[0])
    else:
      logging.error('Did not find any good nodes.')

    # Compute distance to this new goal for doing distance queries.
    dist, pred_map = gt.topology.shortest_distance(
        gt.GraphView(gtG, reversed=True), source=gtG.vertex(end_node_id),
        target=None, max_dist=max_dist_to_compute, pred_map=True)
    dist = np.array(dist.get_array())
    pred_map = np.array(pred_map.get_array())

    dists.append(dist)
    pred_maps.append(pred_map)
    end_node_ids.append(end_node_id)

    path = None
    if compute_path:
      path = get_path_ids(start_node_ids[i], end_node_ids[i], pred_map)
    paths.append(path)
  
  return start_node_ids, end_node_ids, dists, pred_maps, paths 
Example 66
Project: DOTA_models   Author: ringringyi   File: graph_utils.py    Apache License 2.0 4 votes vote down vote up
def rng_room_to_room(batch_size, gtG, rng, max_dist, max_dist_to_compute,
                     node_room_ids, nodes=None, compute_path=False):
  # Sample one of the rooms, compute the distance field. Pick a destination in
  # another room if possible otherwise anywhere outside this room.
  dists = []; pred_maps = []; paths = []; start_node_ids = []; end_node_ids = [];
  room_ids = np.unique(node_room_ids[node_room_ids[:,0] >= 0, 0])
  for i in range(batch_size):
    room_id = rng.choice(room_ids)
    end_node_id = rng.choice(np.where(node_room_ids[:,0] == room_id)[0])
    end_node_ids.append(end_node_id)

    # Compute distances.
    dist, pred_map = gt.topology.shortest_distance(
        gt.GraphView(gtG, reversed=True), source=gtG.vertex(end_node_id),
        target=None, max_dist=max_dist_to_compute, pred_map=True)
    dist = np.array(dist.get_array())
    pred_map = np.array(pred_map.get_array())
    dists.append(dist)
    pred_maps.append(pred_map)

    # Randomly sample nodes which are within max_dist.
    near_ids = dist <= max_dist
    near_ids = near_ids[:, np.newaxis]

    # Check to see if there is a non-negative node which is close enough.
    non_same_room_ids = node_room_ids != room_id
    non_hallway_ids = node_room_ids != -1
    good1_ids = np.logical_and(near_ids, np.logical_and(non_same_room_ids, non_hallway_ids))
    good2_ids = np.logical_and(near_ids, non_hallway_ids)
    good3_ids = near_ids
    if np.any(good1_ids):
      start_node_id = rng.choice(np.where(good1_ids)[0])
    elif np.any(good2_ids):
      start_node_id = rng.choice(np.where(good2_ids)[0])
    elif np.any(good3_ids):
      start_node_id = rng.choice(np.where(good3_ids)[0])
    else:
      logging.error('Did not find any good nodes.')

    start_node_ids.append(start_node_id)

    path = None
    if compute_path:
      path = get_path_ids(start_node_ids[i], end_node_ids[i], pred_map)
    paths.append(path)

  return start_node_ids, end_node_ids, dists, pred_maps, paths 
Example 67
Project: Parallel.GAMIT   Author: demiangomez   File: pyETM.py    GNU General Public License v3.0 4 votes vote down vote up
def todictionary(self, time_series=False, model=False):
        # convert the ETM adjustment into a dictionary
        # optionally, output the whole time series as well

        L = self.l

        # start with the parameters
        etm = dict()
        etm['Network'] = self.NetworkCode
        etm['Station'] = self.StationCode
        etm['lat'] = self.soln.lat[0]
        etm['lon'] = self.soln.lon[0]
        etm['ref_x'] = self.soln.auto_x[0]
        etm['ref_y'] = self.soln.auto_y[0]
        etm['ref_z'] = self.soln.auto_z[0]
        etm['Jumps'] = [to_list(jump.p.toDict()) for jump in self.Jumps.table]

        if self.A is not None:

            etm['Polynomial'] = to_list(self.Linear.p.toDict())

            etm['Periodic'] = to_list(self.Periodic.p.toDict())

            etm['wrms'] = {'n': self.factor[0], 'e': self.factor[1], 'u': self.factor[2]}

            etm['xyz_covariance'] = self.rotate_sig_cov(covar=self.covar).tolist()

            etm['neu_covariance'] = self.covar.tolist()

        if time_series:
            ts = dict()
            ts['t'] = np.array([self.soln.t.tolist(), self.soln.mjd.tolist()]).transpose().tolist()
            ts['mjd'] = self.soln.mjd.tolist()
            ts['x'] = self.soln.x.tolist()
            ts['y'] = self.soln.y.tolist()
            ts['z'] = self.soln.z.tolist()
            ts['n'] = L[0].tolist()
            ts['e'] = L[1].tolist()
            ts['u'] = L[2].tolist()
            ts['residuals'] = self.R.tolist()
            ts['weights'] = self.P.transpose().tolist()
            ts['model_neu'] = []
            if model:
                if self.A is not None:
                    for i in range(3):
                        ts['model_neu'].append((np.dot(self.As, self.C[i]).tolist()))

            if self.A is not None:
                ts['filter'] = np.logical_and(np.logical_and(self.F[0], self.F[1]), self.F[2]).tolist()
            else:
                ts['filter'] = []

            etm['time_series'] = ts

        return etm 
Example 68
Project: pyCEST   Author: pganssle   File: cjlib.py    MIT License 4 votes vote down vote up
def calculateOffsetMap( freq, data, percentileThreshold=60, extrapolate=1.0, freqFactor=10 ):
    ##  If extrapolate is > 1.0 then the curve will be extrapolated.

    sort_inds = freq.argsort()
    _freq = freq[sort_inds]
    _data = data[sort_inds]

    ##  Check to make sure the frequencies are all increasing
    if len( _data.shape ) != 4:
        print("calculateOffsetMap: The data must be a volume.")
        raise

    ##  Calculate the threshold on which to work
    thresh = prctile( _data[-1], percentileThreshold )    

    ##  Set the array of frequncy offsets
    minp = zeros(_data[0].shape)

    ##  Find all the coordinates in the data that are above the threshold
    coords = array( np.nonzero( _data[-1] > thresh ) ).transpose()

    pbar = ProgressBar(widgets=['Calc Offset Map ', Percentage(), Bar(), ETA()], maxval=coords.shape[0]).start()

    x, A, w = -1, -1, -1

    #cjinds = logical_and( _freq<inf, _freq!=0 )
    cjinds = nonzero( _freq<inf )[0]
    for ii,coord in enumerate(coords):
        
        ##  Get the data.
        s,r,c = coord
        mm = _data[:,s,r,c]

        x = wassrLL( _freq[cjinds], mm[cjinds] )
        minp[s,r,c] = x

        pbar.update(ii)

    pbar.finish()

    return minp

##  Fix the asymmetry map based on the water offset calculated above 
Example 69
Project: where   Author: kartverket   File: itrf.py    MIT License 4 votes vote down vote up
def _calculate_pos_itrs(self, site):
        """Calculate positions for the given time epochs

        The positions are calculated as simple linear offsets based on the reference epoch. If there is a post-seismic
        deformations model for a station the motion due to that model is added to the linear velocity model. Makes sure
        to pick out the correct time interval to use.

        Args:
            site (String):    Key saying which site to calculate position for.

        Returns:
            Array:  Positions, one 3-vector for each time epoch.
        """
        station_info = self.data[site]
        ref_epoch = Time(station_info["ref_epoch"], scale="utc")

        pos = np.zeros((self.time.size, 3))
        for pv in station_info["pos_vel"].values():
            idx = np.logical_and(self.time.utc.datetime >= pv["start"], self.time.utc.datetime < pv["end"])
            if self.time.size == 1:
                idx = np.array([idx])
            if not any(idx):
                continue
            ref_pos = np.array([pv["STAX"], pv["STAY"], pv["STAZ"]])
            ref_vel = np.array([pv["VELX"], pv["VELY"], pv["VELZ"]])
            interval_years = (self.time - ref_epoch).jd * Unit.day2julian_years
            if isinstance(interval_years, float):
                interval_years = np.array([interval_years])
            pos[idx, :] = ref_pos + interval_years[idx, None] * ref_vel[None, :]

        # Post-seismic deformations, see Appendix C in :cite:'itrf2014'
        if "psd" in station_info:
            llh = sofa.vectorized_llh(pos)
            psd = station_info["psd"]
            denu = dict(H=np.zeros(self.time.size), E=np.zeros(self.time.size), N=np.zeros(self.time.size))
            for param in psd.values():
                t_0 = Time(param["epoch"], format="datetime", scale="utc")
                delta_t = (self.time - t_0).jd * Unit.day2julian_years
                if isinstance(delta_t, float):
                    delta_t = np.array([delta_t])
                idx = delta_t > 0
                for L in "ENH":
                    aexp = np.array(param.get("AEXP_" + L, list()))
                    texp = np.array(param.get("TEXP_" + L, list()))
                    for a, t in zip(aexp, texp):
                        denu[L][idx] += a * (1 - np.exp(-delta_t[idx] / t))
                    alog = np.array(param.get("ALOG_" + L, list()))
                    tlog = np.array(param.get("TLOG_" + L, list()))
                    for a, t in zip(alog, tlog):
                        denu[L][idx] += a * np.log(1 + delta_t[idx] / t)

            rot = rotation.enu2trs(llh[:, 0], llh[:, 1])
            denu = np.vstack((denu["E"], denu["N"], denu["H"])).T
            dxyz = (rot @ denu[:, :, None])[:, :, 0]
            pos += dxyz

        if self.time.size == 1:
            pos = pos[0, :]
        return pos 
Example 70
Project: where   Author: kartverket   File: vlbi_src_dir.py    MIT License 4 votes vote down vote up
def src_dir(dset):
    """Calculate the partial derivative of the source coordinates

    Args:
        dset:     A Dataset containing model data.

    Returns:
        Tuple: Array of partial derivatives, list of their names, and their unit
    """
    column_names = ["ra", "dec"]
    sources = np.asarray(dset.unique("source"))
    icrf = apriori.get("crf", time=dset.time)

    # Remove sources that should be fixed
    fix_idx = np.zeros(len(sources))

    for group in config.tech[PARAMETER].fix_sources.list:
        fix_idx = np.logical_or(
            [icrf[src].meta[group] if group in icrf[src].meta else src == group for src in sources], fix_idx
        )

    for group in config.tech[PARAMETER].except_sources.list:
        except_idx = np.array([icrf[src].meta[group] if group in icrf[src].meta else src == group for src in sources])
        fix_idx = np.logical_and(np.logical_not(except_idx), fix_idx)

    # Remove sources with few observations
    # TODO redo this type of test after outlier elimination, maybe this should be done in the estimator?
    # Similar test might be useful for stations
    limit = 5
    for idx, src in enumerate(sources):
        src_idx = dset.filter(source=src)
        if np.sum(src_idx) < limit:
            fix_idx[idx] = True
            log.info(f"Radio source {src} has less than {limit} observations. Keeping coordinates fixed.")

    sources = sources[np.logical_not(fix_idx)]

    # Calculate partials
    partials = np.zeros((dset.num_obs, len(sources) * 2))
    zero = np.zeros(dset.num_obs)

    cos_ra = np.cos(dset.src_dir.right_ascension)
    sin_ra = np.sin(dset.src_dir.right_ascension)
    cos_dec = np.cos(dset.src_dir.declination)
    sin_dec = np.sin(dset.src_dir.declination)

    baseline = (dset.site_pos_2.gcrs_pos - dset.site_pos_1.gcrs_pos)[:, :, None]
    dK_dra = np.array([-cos_dec * sin_ra, cos_dec * cos_ra, zero]).T[:, None, :]
    dK_ddec = np.array([-sin_dec * cos_ra, -sin_dec * sin_ra, cos_dec]).T[:, None, :]
    all_partials = np.hstack((-dK_dra @ baseline, -dK_ddec @ baseline))[:, :, 0]

    for idx, src in enumerate(sources):
        src_idx = dset.filter(source=src)
        partials[src_idx, idx * 2 : idx * 2 + 2] = all_partials[src_idx]

    column_names = [s + "_" + name for s in sources for name in column_names]

    return partials, column_names, "meter" 
Example 71
Project: where   Author: kartverket   File: gnss_clean_orbit.py    MIT License 4 votes vote down vote up
def _check_first_epoch_sample_point(dset: "Dataset", precise, epoch_interval):
    """Keep first observation epoch depending on existing precise orbit sample points

    Precise orbit sample points are needed to carry out interpolation of precise orbits for the first observation
    epoch. If no precise orbit sample point is available before the first satellite observation epoch, then this
    epoch will be removed for this satellite.

    Args:
        dset (Dataset):            A Dataset containing model data.
        dset_idx (numpy.ndarray):  Array containing False for observations to throw away. The array is returned by
                                   function `ignore_unavailable_orbit_satellites()`, which deletes unavailable
                                   apriori orbit satellites.
        precise (PreciseOrbit):    Precise orbit object with precise orbit information.
        epoch_interval (float):    Epoch interval of precise orbit sample points

    Returns:
        tuple: Tuple with array containing False for first observations to throw away and indices indicating first
               observation epoch.
    """

    # Get indices for first observation epochs
    first_idx = 0
    first_epoch_idx = np.ones(dset.num_obs, dtype=bool)
    first_epoch_idx = dset.time.gps.mjd == dset.time.gps.mjd[first_idx]

    # Get set with satellite and time entries for getting corresponding precise orbit sample points
    satellites = dset.satellite[first_epoch_idx]
    time = Time(
        val=dset.time.gps.datetime[first_epoch_idx], format="datetime", scale=dset.time.data.scale
    ) - TimeDelta(epoch_interval, format="sec")
    precise_idx = precise._get_nearest_sample_point(satellites, np.full(len(satellites), time))

    # Keep observations epochs, where a precise orbit sample point exists before the first observation epoch
    diff_time = (dset.time.gps.mjd[first_epoch_idx] - precise.dset_edit.time.gps.mjd[precise_idx]) * Unit.day2second
    keep_idx = np.logical_and(diff_time < (epoch_interval + 1), diff_time > 0)

    removed_entries = "DEBUG: ".join(
        [
            f"{s} {t.strftime('  %Y-%m-%d %H:%M:%S (GPS)')}, dt = {dt:8.2f} s (0 < dt < {epoch_interval + 1})\n"
            for s, t, dt in zip(
                satellites[np.logical_not(keep_idx)],
                dset.time.gps.datetime[first_epoch_idx][np.logical_not(keep_idx)],
                diff_time[np.logical_not(keep_idx)],
            )
        ]
    )
    log.info(f"Following first epoch entries are removed: \nDEBUG: {removed_entries}")

    return keep_idx, first_epoch_idx 
Example 72
Project: where   Author: kartverket   File: gnss_clean_orbit.py    MIT License 4 votes vote down vote up
def _check_last_epoch_sample_point(dset, precise, epoch_interval):
    """Keep last observation epoch depending on existing precise orbit sample points

    Precise orbit sample points are needed to carry out interpolation of precise orbits for the last observation
    epochs. If no precise orbit sample point is available after the last satellite observation epochs, then this
    epochs will be removed for this satellite.

    The time difference between the last observation epochs and the next precise orbit sample point is determined. 'Last
    observation epoch' + 'sampling rate' is chosen as reference time for the selection of the nearest orbit sample
    point, which corresponds normally to 0:00 GPS time. If the time difference exceeds the following intervall, then the
    observation epochs are rejected:
                       -(precise orbit epoch interval + 1) < time difference < 0

    Args:
        dset (Dataset):            A Dataset containing model data.
        dset_idx (numpy.ndarray):  Array containing False for observations to throw away. The array is returned by
                                   function `ignore_unavailable_orbit_satellites()`, which deletes unavailable
                                   apriori orbit satellites.
        precise (PreciseOrbit):    Precise orbit object with precise orbit information.
        epoch_interval (float):    Epoch interval of precise orbit sample points

    Returns:
        tuple: Tuple with array containing False for last observations to throw away and indices indicating last
               observation epoch.
    """
    sampling_rate = config.tech.sampling_rate.float

    # Get indices for last observation epochs
    last_idx = -1
    last_epoch_idx = np.ones(dset.num_obs, dtype=bool)
    last_epoch_idx = (
        dset.time.gps.mjd
        >= dset.time.gps.mjd[last_idx] - (epoch_interval - sampling_rate) * Unit.second2day
    )

    # Get set with satellite and time entries for getting corresponding precise orbit sample points
    # Note: Sample point reference time is 'last observation epoch' + 'sampling rate', which corresponds normally to
    #       0:00 GPS time.
    satellites = dset.satellite[last_epoch_idx]
    time = Time(val=dset.time.gps.datetime[last_idx], format="datetime", scale=dset.time.data.scale) + TimeDelta(
        sampling_rate, format="sec"
    )
    precise_idx = precise._get_nearest_sample_point(satellites, np.full(len(satellites), time))

    # Keep observations epochs, where a precise orbit sample point exists after the last observation epoch
    diff_time = (dset.time.gps.mjd[last_epoch_idx] - precise.dset_edit.time.gps.mjd[precise_idx]) * Unit.day2second
    keep_idx = np.logical_and(diff_time > -(epoch_interval + 1), diff_time < 0)

    removed_entries = "DEBUG: ".join(
        [
            f"{s} {t.strftime('  %Y-%m-%d %H:%M:%S (GPS)')}, dt = {dt:8.2f} s ({-(epoch_interval + 1)} < dt < 0)\n"
            for s, t, dt in zip(
                satellites[np.logical_not(keep_idx)],
                dset.time.gps.datetime[last_epoch_idx][np.logical_not(keep_idx)],
                diff_time[np.logical_not(keep_idx)],
            )
        ]
    )
    log.info(f"Following last epoch entries are removed: \nDEBUG: {removed_entries}")

    return keep_idx, last_epoch_idx 
Example 73
Project: EXOSIMS   Author: dsavransky   File: tieredScheduler_sotoSS.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def promote_coro_targets(self, occ_sInds, sInds):
        """
        Determines which coronograph targets to promote to occulter targets
        Args:
            occ_sInds (numpy array):
                occulter targets
            sInds (numpy array):
                coronograph targets
        Returns:
            occ_sInds (numpy array):
                updated occulter targets
        """
        TK = self.TimeKeeping
        SU = self.SimulatedUniverse
        TL = self.TargetList
        promoted_occ_sInds = np.array([], dtype=int)

        # if phase 1 has ended
        if TK.currentTimeAbs > self.phase1_end:
            if self.is_phase1 is True:
                self.vprint( 'Entering detection phase 2: target list for occulter expanded')
                self.is_phase1 = False
            # If we only want to promote stars that have planets in the habitable zone
            if self.promote_hz_stars:
                # stars must have had > n_det_min detections
                promote_stars = sInds[np.where(self.sInd_detcounts[sInds] > self.n_det_min)[0]]
                if np.any(promote_stars):
                    for sInd in promote_stars:
                        pInds = np.where(SU.plan2star == sInd)[0]
                        sp = SU.s[pInds]
                        Ms = TL.MsTrue[sInd]
                        Mp = SU.Mp[pInds]
                        mu = const.G*(Mp + Ms)
                        T = (2.*np.pi*np.sqrt(sp**3/mu)).to('d')
                        # star must have detections that span longer than half a period and be in the habitable zone
                        # and have a smaller radius that a sub-neptune
                        is_earthlike = np.logical_and(
                                          np.logical_and(
                                            (SU.a[pInds] > .95*u.AU), (SU.a[pInds] < 1.67*u.AU)),
                                          (SU.Rp.value[pInds] < 1.75))
                        if (np.any((T/2.0 < (self.sInd_dettimes[sInd][-1] - self.sInd_dettimes[sInd][0]))) 
                          and np.any(is_earthlike)):
                            earthlikes = pInds[np.where(is_earthlike)[0]]
                            self.earth_candidates = np.union1d(self.earth_candidates, earthlikes).astype(int)
                            promoted_occ_sInds = np.append(promoted_occ_sInds, sInd)
                            if sInd not in self.promoted_stars:
                                self.promoted_stars.append(sInd)
                occ_sInds = np.union1d(occ_sInds, promoted_occ_sInds)
            else:
                occ_sInds = np.union1d(occ_sInds, sInds[np.where((self.starVisits[sInds] == self.nVisitsMax) & 
                                                                 (self.occ_starVisits[sInds] == 0))[0]])
        occ_sInds = np.union1d(occ_sInds, np.intersect1d(sInds, self.known_rocky))
        self.promoted_stars = list(np.union1d(self.promoted_stars, np.intersect1d(sInds, self.known_rocky)).astype(int))
        return(occ_sInds.astype(int)) 
Example 74
Project: RSSMOSPipeline   Author: mattyowl   File: fixRefModel.py    GNU General Public License v3.0 4 votes vote down vote up
def detectLines(data, sigmaCut = 3.0, thresholdSigma = 2.0, featureMinPix = 5):
    """Detect lines in a 2d (or 1d) arc spectrum. If 2d, uses the central row of the 2d spectrum only.
    
    Returns: featureTable, segmentationMap
    
    NOTE: this should match that in rss_mos_reducer.py - we should make a module
    
    """
    
    # Detect arc lines
    mean=0
    sigma=1e6
    for i in range(20):
        nonZeroMask=np.not_equal(data, 0)
        mask=np.less(abs(data-mean), sigmaCut*sigma)
        mask=np.logical_and(nonZeroMask, mask)
        mean=np.mean(data[mask])
        sigma=np.std(data[mask])
    detectionThreshold=thresholdSigma*sigma
    mask=np.greater(data-mean, detectionThreshold)

    # Get feature positions, number of pixels etc.
    # Find features in 2d, match to wavelength coord in centre row
    segmentationMap, numObjects=ndimage.label(mask)
    sigPixMask=np.equal(mask, 1)
    objIDs=np.unique(segmentationMap)
    objNumPix=ndimage.sum(sigPixMask, labels = segmentationMap, index = objIDs)
    if data.ndim == 2:
        objPositions_centreRow=ndimage.center_of_mass(data[data.shape[0]/2], labels = segmentationMap, index = objIDs)
    elif data.ndim == 1:
        # ndmage.centre_of_mass can be led astray... just use local maximum
        #objPositions_centreRow=ndimage.center_of_mass(data, labels = segmentationMap, index = objIDs)
        objPositions_centreRow=ndimage.maximum_position(data, labels = segmentationMap, index = objIDs)
        objAmplitudes_centreRow=ndimage.maximum(data, labels = segmentationMap, index = objIDs)

    objPositions_centreRow=np.array(objPositions_centreRow).flatten()
    objAmplitudes_centreRow=np.array(objAmplitudes_centreRow).flatten()
    minPixMask=np.greater(objNumPix, featureMinPix)
    featureTable=atpy.Table()
    featureTable.add_column('id', objIDs[minPixMask])
    featureTable.add_column('x_centreRow', objPositions_centreRow[minPixMask])
    if data.ndim == 2:
        featureTable.add_column('y_centreRow', [data.shape[0]/2]*len(featureTable))
        featureTable.add_column('amplitude', data[data.shape[0]/2, np.array(np.round(featureTable['x_centreRow']), dtype = int)])
    elif data.ndim == 1:
        featureTable.add_column('amplitude', objAmplitudes_centreRow[minPixMask])

    # Sanity check plot
    #plt.matplotlib.interactive(True)
    #plt.figure(figsize=(12, 8))
    #plt.plot(data, 'k-')
    #plt.plot(featureTable['x_centreRow'], featureTable['amplitude'], 'bo')
    #for row in featureTable:
        #plt.text(row['x_centreRow'], row['amplitude'], "line")
    #plt.xlabel("x")
    #plt.ylabel("Relative Flux")
    
    return featureTable, segmentationMap

#------------------------------------------------------------------------------------------------------------- 
Example 75
Project: draco   Author: radiocosmology   File: flagging.py    MIT License 4 votes vote down vote up
def process(self, sstream):
        """Apply a day time mask.

        Parameters
        ----------
        sstream : containers.SiderealStream
            Unmasked sidereal stack.

        Returns
        -------
        mstream : containers.SiderealStream
            Masked sidereal stream.
        """

        sstream.redistribute("freq")

        ra_shift = (sstream.ra[:] - self.start) % 360.0
        end_shift = (self.end - self.start) % 360.0

        # Crudely mask the on and off regions
        mask_bool = ra_shift > end_shift

        # Put in the transition at the start of the day
        mask = np.where(
            ra_shift < self.width,
            0.5 * (1 + np.cos(np.pi * (ra_shift / self.width))),
            mask_bool,
        )

        # Put the transition at the end of the day
        mask = np.where(
            np.logical_and(ra_shift > end_shift - self.width, ra_shift <= end_shift),
            0.5 * (1 + np.cos(np.pi * ((ra_shift - end_shift) / self.width))),
            mask,
        )

        if self.remove_average:
            # Estimate the mean level from unmasked data
            import scipy.stats

            nanvis = (
                sstream.vis[:]
                * np.where(mask_bool, 1.0, np.nan)[np.newaxis, np.newaxis, :]
            )
            average = scipy.stats.nanmedian(nanvis, axis=-1)[:, :, np.newaxis]
            sstream.vis[:] -= average

        # Apply the mask to the data
        if self.zero_data:
            sstream.vis[:] *= mask

        # Modify the noise weights
        sstream.weight[:] *= mask ** 2

        return sstream 
Example 76
Project: pmda   Author: MDAnalysis   File: hbond_analysis.py    GNU General Public License v2.0 4 votes vote down vote up
def guess_hydrogens(self, selection='all', max_mass=1.1, min_charge=0.3):
        """Guesses which hydrogen atoms should be used in the analysis.

        Parameters
        ----------
        selection: str (optional)
            Selection string for atom group from which hydrogens will be
            identified.
        max_mass: float (optional)
            Maximum allowed mass of a hydrogen atom.
        min_charge: float (optional)
            Minimum allowed charge of a hydrogen atom.

        Returns
        -------
        potential_hydrogens: str
            String containing the :attr:`resname` and :attr:`name` of all
            hydrogen atoms potentially capable of forming hydrogen bonds.

        Notes
        -----
        This function makes use of atomic masses and atomic charges to
        identify which atoms are hydrogen atoms that are capable of
        participating in hydrogen bonding. If an atom has a mass less than
        :attr:`max_mass` and an atomic charge greater than :attr:`min_charge`
        then it is considered capable of participating in hydrogen bonds.

        If :attr:`hydrogens_sel` is `None`, this function is called to guess
        the selection.

        Alternatively, this function may be used to quickly generate a
        :class:`str` of potential hydrogen atoms involved in hydrogen bonding.
        This str may then be modified before being used to set the attribute
        :attr:`hydrogens_sel`.
        """

        u = self._universe()
        ag = u.select_atoms(selection)
        hydrogens_ag = ag[
            np.logical_and(
                ag.masses < max_mass,
                ag.charges > min_charge
            )
        ]

        hydrogens_list = np.unique(
            [
                '(resname {} and name {})'.format(r, p)
                for r, p in zip(hydrogens_ag.resnames, hydrogens_ag.names)
            ]
        )

        return " or ".join(hydrogens_list) 
Example 77
Project: insightface   Author: deepinsight   File: rcnn.py    MIT License 4 votes vote down vote up
def get_fpn_rcnn_testbatch(roidb):
    """
    return a dict of testbatch
    :param roidb: ['image', 'flipped'] + ['boxes']
    :return: data, label, im_info
    """
    assert len(roidb) == 1, 'Single batch only'
    imgs, roidb = get_image(roidb)
    im_array = imgs[0]
    im_info = np.array([roidb[0]['im_info']], dtype=np.float32)

    im_rois = roidb[0]['boxes']
    rois = im_rois

    # assign rois
    rois_area = np.sqrt((rois[:, 2] - rois[:, 0]) * (rois[:, 3] - rois[:, 1]))
    area_threshold = {'P5': 448, 'P4': 224, 'P3': 112}
    rois_p5 = rois[area_threshold['P5'] <= rois_area]
    rois_p4 = rois[np.logical_and(area_threshold['P4'] <= rois_area, rois_area < area_threshold['P5'])]
    rois_p3 = rois[np.logical_and(area_threshold['P3'] <= rois_area, rois_area < area_threshold['P4'])]
    rois_p2 = rois[np.logical_and(0 < rois_area, rois_area < area_threshold['P3'])]

    # pad a virtual rois if on rois assigned
    if rois_p5.size == 0:
        rois_p5 = np.array([[12,34,56,78]])
    if rois_p4.size == 0:
        rois_p4 = np.array([[12,34,56,78]])
    if rois_p3.size == 0:
        rois_p3 = np.array([[12,34,56,78]])
    if rois_p2.size == 0:
        rois_p2 = np.array([[12,34,56,78]])

    p5_batch_index = 0 * np.ones((rois_p5.shape[0], 1))
    rois_p5_array = np.hstack((p5_batch_index, rois_p5))[np.newaxis, :]

    p4_batch_index = 0 * np.ones((rois_p4.shape[0], 1))
    rois_p4_array = np.hstack((p4_batch_index, rois_p4))[np.newaxis, :]

    p3_batch_index = 0 * np.ones((rois_p3.shape[0], 1))
    rois_p3_array = np.hstack((p3_batch_index, rois_p3))[np.newaxis, :]

    p2_batch_index = 0 * np.ones((rois_p2.shape[0], 1))
    rois_p2_array = np.hstack((p2_batch_index, rois_p2))[np.newaxis, :]

    data = {'data': im_array,
            'rois_stride32': rois_p5_array,
            'rois_stride16': rois_p4_array,
            'rois_stride8': rois_p3_array,
            'rois_stride4': rois_p2_array}
    label = {}

    return data, label, im_info 
Example 78
Project: hand-detection.PyTorch   Author: zllrunning   File: data_augment.py    MIT License 4 votes vote down vote up
def _crop(image, boxes, labels, img_dim):
    height, width, _ = image.shape
    pad_image_flag = True

    for _ in range(250):
        if random.uniform(0, 1) <= 0.2:
            scale = 1
        else:
            scale = random.uniform(0.3, 1.)
        short_side = min(width, height)
        w = int(scale * short_side)
        h = w

        if width == w:
            l = 0
        else:
            l = random.randrange(width - w)
        if height == h:
            t = 0
        else:
            t = random.randrange(height - h)
        roi = np.array((l, t, l + w, t + h))

        value = matrix_iof(boxes, roi[np.newaxis])
        flag = (value >= 1)
        if not flag.any():
            continue

        centers = (boxes[:, :2] + boxes[:, 2:]) / 2
        mask_a = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1)
        boxes_t = boxes[mask_a].copy()
        labels_t = labels[mask_a].copy()

        # ignore tiny faces
        b_w_t = (boxes_t[:, 2] - boxes_t[:, 0] + 1) / w * img_dim
        b_h_t = (boxes_t[:, 3] - boxes_t[:, 1] + 1) / h * img_dim
        mask_b = np.minimum(b_w_t, b_h_t) > 16.0
        boxes_t = boxes_t[mask_b]
        labels_t = labels_t[mask_b]

        if boxes_t.shape[0] == 0:
            continue

        image_t = image[roi[1]:roi[3], roi[0]:roi[2]]

        boxes_t[:, :2] = np.maximum(boxes_t[:, :2], roi[:2])
        boxes_t[:, :2] -= roi[:2]
        boxes_t[:, 2:] = np.minimum(boxes_t[:, 2:], roi[2:])
        boxes_t[:, 2:] -= roi[:2]

        pad_image_flag = False

        return image_t, boxes_t, labels_t, pad_image_flag
    return image, boxes, labels, pad_image_flag 
Example 79
Project: smote_variants   Author: gykovacs   File: _smote_variants.py    MIT License 4 votes vote down vote up
def sample(self, X, y):
        """
        Does the sample generation according to the class paramters.
        
        Args:
            X (np.ndarray): training set
            y (np.array): target labels
            
        Returns:
            (np.ndarray, np.array): the extended training set and target labels
        """
        _logger.info(self.__class__.__name__ + ": " +"Running sampling via %s" % self.descriptor())
        
        self.class_label_statistics(X, y)
        
        if self.class_stats[self.minority_label] < 2:
            _logger.warning(self.__class__.__name__ + ": " + "The number of minority samples (%d) is not enough for sampling" % self.class_stats[self.minority_label])
            return X.copy(), y.copy()
        
        # determine the number of samples to generate
        num_to_sample= self.number_of_instances_to_sample(self.proportion, self.class_stats[self.majority_label], self.class_stats[self.minority_label])
        
        if num_to_sample == 0:
            _logger.warning(self.__class__.__name__ + ": " + "Sampling is not needed")
            return X.copy(), y.copy()
        
        # training and in-sample prediction (out-of-sample by k-fold cross validation might be better)
        predictions= []
        for e in self.ensemble:
            predictions.append(e.fit(X,y).predict(X))
        
        # constructing ensemble prediction
        pred= np.where(np.sum(np.vstack(predictions), axis= 0) > len(self.ensemble)/2, 1, 0)
        
        # create mask of minority samples to sample
        mask_to_sample= np.where(np.logical_and(np.logical_not(np.equal(pred, y)), y == self.minority_label))[0]
        if len(mask_to_sample) < 2:
            _logger.warning(self.__class__.__name__ + ": " +"Not enough minority samples selected %d" % len(mask_to_sample))
            return X.copy(), y.copy()
        
        X_min= X[y == self.minority_label]
        X_min_to_sample= X[mask_to_sample]
        
        # fitting nearest neighbors model for sampling
        nn= NearestNeighbors(n_neighbors= min([len(X_min), self.n_neighbors + 1]), n_jobs= self.n_jobs)
        nn.fit(X_min)
        dist, ind= nn.kneighbors(X_min_to_sample)
        
        # doing the sampling
        samples= []
        while len(samples) < num_to_sample:
            idx= self.random_state.randint(len(X_min_to_sample))
            mean= np.mean(X_min[ind[idx][1:]], axis= 0)
            samples.append(self.sample_between_points(X_min_to_sample[idx], mean))
        
        return np.vstack([X, np.vstack([samples])]), np.hstack([y, np.repeat(self.minority_label, len(samples))]) 
Example 80
Project: smote_variants   Author: gykovacs   File: _smote_variants.py    MIT License 4 votes vote down vote up
def sample(self, X, y):
        """
        Does the sample generation according to the class paramters.
        
        Args:
            X (np.ndarray): training set
            y (np.array): target labels
            
        Returns:
            (np.ndarray, np.array): the extended training set and target labels
        """
        _logger.info(self.__class__.__name__ + ": " +"Running sampling via %s" % self.descriptor())
        
        self.class_label_statistics(X, y)
        
        if self.class_stats[self.minority_label] < 2:
            _logger.warning(self.__class__.__name__ + ": " + "The number of minority samples (%d) is not enough for sampling" % self.class_stats[self.minority_label])
            return X.copy(), y.copy()
        
        # do SMOTE sampling
        X_samp, y_samp= SMOTE(self.proportion, self.n_neighbors, n_jobs= self.n_jobs, random_state= self.random_state).sample(X, y)
        
        n_folds= min([self.n_folds, np.sum(y == self.minority_label)])
        
        condition= 0
        while True:
            # validating the sampled dataset
            validator= StratifiedKFold(n_folds)
            predictions= []
            for train_index, _ in validator.split(X_samp, y_samp):
                self.classifier.fit(X_samp[train_index], y_samp[train_index])
                predictions.append(self.classifier.predict(X_samp))
            
            # do decision based on one of the voting schemes
            if self.voting == 'majority':
                pred_votes= (np.mean(predictions, axis= 0) > 0.5).astype(int)
                to_remove= np.where(np.not_equal(pred_votes, y_samp))[0]
            elif self.voting == 'consensus':
                pred_votes= (np.mean(predictions, axis= 0) > 0.5).astype(int)
                sum_votes= np.sum(predictions, axis= 0)
                to_remove= np.where(np.logical_and(np.not_equal(pred_votes, y_samp), np.equal(sum_votes, self.n_folds)))[0]
            else:
                raise ValueError(self.__class__.__name__ + ": " + 'Voting scheme %s is not implemented' % self.voting)
            
            # delete samples incorrectly classified
            _logger.info(self.__class__.__name__ + ": " +'Removing %d elements' % len(to_remove))
            X_samp= np.delete(X_samp, to_remove, axis= 0)
            y_samp= np.delete(y_samp, to_remove)
            
            # if the number of samples removed becomes small or k iterations were done quit
            if len(to_remove) < len(X_samp)*self.p:
                condition= condition + 1
            else:
                condition= 0
            if condition >= self.k:
                break
            
        return X_samp, y_samp