Python itertools.compress() Examples

The following are 30 code examples of itertools.compress(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module itertools , or try the search function .
Example #1
Source File: setup.py    From slurm-gcp with Apache License 2.0 6 votes vote down vote up
def mount_nfs_vols():

    mount_paths = (
        (HOME_DIR, EXTERNAL_MOUNT_HOME),
        (APPS_DIR, EXTERNAL_MOUNT_APPS),
        (MUNGE_DIR, EXTERNAL_MOUNT_MUNGE),
    )
    # compress yields values from the first arg that are matched with True
    # in the second arg. The result is the paths filtered by the booleans.
    # For non-controller instances, all three are always external nfs
    for path in it.compress(*zip(*mount_paths)):
        while not os.path.ismount(path):
            log.info(f"Waiting for {path} to be mounted")
            util.run(f"mount {path}", wait=5)
    util.run("mount -a", wait=1)

# END mount_nfs_vols()


# Tune the NFS server to support many mounts 
Example #2
Source File: __init__.py    From picklable-itertools with MIT License 6 votes vote down vote up
def test_compress():
    yield verify_same, compress, itertools.compress, None, [1, 2, 3], [1, 2, 3]
    yield verify_same, compress, itertools.compress, None, [1, 2, 3], [1, 0, 0]
    yield verify_same, compress, itertools.compress, None, [1, 2, 3], [1, 0]
    yield verify_same, compress, itertools.compress, None, [1, 2], [1, 0, 1]
    yield verify_same, compress, itertools.compress, None, [1, 2], [0, 0]
    yield verify_same, compress, itertools.compress, None, [1, 2], [0]
    yield verify_same, compress, itertools.compress, None, [1, 2], [0, 0, 0]
    yield (verify_pickle, compress, itertools.compress, 3, 1, [1, 2, 3],
           [1, 2, 3])
    yield (verify_pickle, compress, itertools.compress, 3, 0, [1, 2, 3],
           [1, 2, 3])
    yield (verify_pickle, compress, itertools.compress, 1, 0, [1, 2, 3],
           [1, 0, 0])
    yield (verify_pickle, compress, itertools.compress, 1, 0, [1, 2, 3],
           [1, 0])
    yield (verify_pickle, compress, itertools.compress, 1, 0, [1, 2],
           [1, 0, 1]) 
Example #3
Source File: main.py    From satellite_tracker with GNU General Public License v3.0 6 votes vote down vote up
def nearby_now(self) -> List[Tuple[str, Pos, float]]:
        now = datetime.utcnow()

        t1 = time()
        self.last_query_t = t1

        lons, lats, alts, errors = self.orbs.get_lonlatalt(now)
        t2 = time()
        rough_near = np.logical_and(np.abs(lats - self.loc.lat) < 3, np.abs(lons - self.loc.long) < 3)
        valid_satpos = list(
            zip(self.satnames[~errors][rough_near], lats[rough_near], lons[rough_near], alts[rough_near]))
        nearby = [(name, Pos(lat=lat, long=lon), alt) for name, lat, lon, alt in valid_satpos if
                  distance.distance(self.loc, (lat, lon)).km < 200]
        t3 = time()
        print("loc:{:.2f}s dist: {:.2f}s tot: {:.2f}s, sats: {:02d}".format(t2 - t1, t3 - t2, t3 - t1, len(nearby)))

        if not self.filtered_errors:
            print("filtering errors")
            self.satnames = self.satnames[~errors]
            self.tles = itertools.compress(self.tles, ~errors)
            self.create_orbitals()
            self.filtered_errors = True
        return nearby 
Example #4
Source File: nx_edge_augmentation.py    From ibeis with Apache License 2.0 6 votes vote down vote up
def _unpack_available_edges(avail, weight=None, G=None):
    """Helper to separate avail into edges and corresponding weights"""
    if weight is None:
        weight = 'weight'
    if isinstance(avail, dict):
        avail_uv = list(avail.keys())
        avail_w = list(avail.values())
    else:
        def _try_getitem(d):
            try:
                return d[weight]
            except TypeError:
                return d
        avail_uv = [tup[0:2] for tup in avail]
        avail_w = [1 if len(tup) == 2 else _try_getitem(tup[-1])
                   for tup in avail]

    if G is not None:
        # Edges already in the graph are filtered
        # flags = [(G.has_node(u) and G.has_node(v) and not G.has_edge(u, v))
        #          for u, v in avail_uv]
        flags = [not G.has_edge(u, v) for u, v in avail_uv]
        avail_uv = list(it.compress(avail_uv, flags))
        avail_w = list(it.compress(avail_w, flags))
    return avail_uv, avail_w 
Example #5
Source File: display.py    From cliff with Apache License 2.0 6 votes vote down vote up
def _generate_columns_and_selector(self, parsed_args, column_names):
        """Generate included columns and selector according to parsed args.

        :param parsed_args: argparse.Namespace instance with argument values
        :param column_names: sequence of strings containing names
                             of output columns
        """
        if not parsed_args.columns:
            columns_to_include = column_names
            selector = None
        else:
            columns_to_include = [c for c in column_names
                                  if c in parsed_args.columns]
            if not columns_to_include:
                raise ValueError('No recognized column names in %s. '
                                 'Recognized columns are %s.' %
                                 (str(parsed_args.columns), str(column_names)))

            # Set up argument to compress()
            selector = [(c in columns_to_include)
                        for c in column_names]
        return columns_to_include, selector 
Example #6
Source File: apk_analyzer.py    From apk_api_key_extractor with Apache License 2.0 6 votes vote down vote up
def analyze_strings(mystrings):
    """
    A list of mystrings gets  classified and only the predicted API keys are returned

    :param mystrings: a list of mystrings to be analyzed
    :return: a list of valid api keys
    :rtype: list
    """
    # for performance it's better to create a new list instead of removing elements from list
    smali_strings_filtered = []
    strings_features = []
    for string in mystrings:
        features = string_classifier.calculate_all_features(string.value)
        if features:
            features_list = list(features)
            smali_strings_filtered.append(string)
            strings_features.append(features_list)
    if len(strings_features) > 0:
        prediction = classifier.predict(np.array(strings_features))
        api_keys_strings = itertools.compress(smali_strings_filtered, prediction)  # basically a bitmask
        return api_keys_strings
    return [] 
Example #7
Source File: PortalFiles.py    From single_cell_portal with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def subset_cells(self, keep_cells):
        """
        Write a file reduced to just the given cells
        """

        keep_cells = set(keep_cells)
        subset_file_name = self.tag_file_name(c_SUBSET_POSTFIX)
        if subset_file_name is None:
            return(None)

        with self.get_write_handle(subset_file_name) as file_writer:
            csv_writer = csv.writer(file_writer, delimiter=self.delimiter)
            check_handle = self.csv_handle
            keep_cells.add(c_EXPRESSION_00_ELEMENT)
            header = next(check_handle)
            row_1 = next(check_handle)
            if (len(header) == (len(row_1) - 1)) and (c_EXPRESSION_00_ELEMENT not in header):
                header = [c_EXPRESSION_00_ELEMENT] + header
            header_index = [cell in keep_cells for cell in header]
            # Need to add the header rows
            csv_writer.writerow(list(itertools.compress(header,header_index)))
            csv_writer.writerow(list(itertools.compress(row_1,header_index)))
            for file_line in check_handle:
                csv_writer.writerow(list(itertools.compress(file_line,header_index)))
        return(subset_file_name) 
Example #8
Source File: util_list.py    From ubelt with Apache License 2.0 6 votes vote down vote up
def compress(items, flags):
    """
    Selects from ``items`` where the corresponding value in ``flags`` is True.
    This is similar to :func:`numpy.compress`.

    This is actually a simple alias for :func:`itertools.compress`.

    Args:
        items (Iterable[Any]): a sequence to select items from

        flags (Iterable[bool]): corresponding sequence of bools

    Returns:
        Iterable[Any]: a subset of masked items

    Example:
        >>> import ubelt as ub
        >>> items = [1, 2, 3, 4, 5]
        >>> flags = [False, True, True, False, True]
        >>> list(ub.compress(items, flags))
        [2, 3, 5]
    """
    return it.compress(items, flags) 
Example #9
Source File: tabletools.py    From extratools with MIT License 6 votes vote down vote up
def trim(table: Table, blank=None) -> Table:
    def isempty(v):
        return v is None or str(v).strip(blank) == ""

    table = iter2seq(table)

    nonemptyflags = [
        any(not isempty(v) for v in col)
        for col in transpose(table)
    ]

    for row in table:
        if all(isempty(v) for v in row):
            continue

        yield list(itertools.compress(row, nonemptyflags)) 
Example #10
Source File: tabletools.py    From extratools with MIT License 6 votes vote down vote up
def trim(table: Table, blank=None) -> Table:
    def isempty(v):
        return v is None or str(v).strip(blank) == ""

    table = iter2seq(table)

    nonemptyflags = [
        any(not isempty(v) for v in col)
        for col in transpose(table)
    ]

    for row in table:
        if all(isempty(v) for v in row):
            continue

        yield list(itertools.compress(row, nonemptyflags)) 
Example #11
Source File: test_documentation.py    From keras-contrib with MIT License 6 votes vote down vote up
def assert_args_presence(args, doc, member, name):
    args_not_in_doc = [arg not in doc for arg in args]
    if any(args_not_in_doc):
        raise ValueError(
            "{} {} arguments are not present in documentation ".format(name, list(
                compress(args, args_not_in_doc))), member.__module__)
    words = doc.replace('*', '').split()
    # Check arguments styling
    styles = [arg + ":" not in words for arg in args]
    if any(styles):
        raise ValueError(
            "{} {} are not style properly 'argument': documentation".format(
                name,
                list(compress(args, styles))),
            member.__module__)

    # Check arguments order
    indexes = [words.index(arg + ":") for arg in args]
    if indexes != sorted(indexes):
        raise ValueError(
            "{} arguments order is different from the documentation".format(name),
            member.__module__) 
Example #12
Source File: loaddataSubClass.py    From drmad with MIT License 6 votes vote down vote up
def select_subclassdata(X, y,totalClassNum,SubClassNum, subClassIndexList,normalize=True):


    X= np.array(list(itertools.compress(X, [subClassIndexList.__contains__(c) for c in y])))
    y= np.array(list(itertools.compress(y, [subClassIndexList.__contains__(c) for c in y])))


    d = {}
    for i in xrange(SubClassNum):
        d.update({subClassIndexList[i]: (totalClassNum+i)})

    d1 = {}
    for i in xrange(SubClassNum):
        d1.update({(totalClassNum+i): i})

    for k, v in d.iteritems():
        np.place(y,y==k,v)
    for k, v in d1.iteritems():
        np.place(y,y==k,v)
    return X,y 
Example #13
Source File: statements_to_HdlStatementBlocks.py    From hwt with MIT License 6 votes vote down vote up
def cut_off_drivers_of(dstSignal, statements):
    """
    Cut off drivers from statements
    """
    separated = []
    stm_filter = []
    for stm in statements:
        stm._clean_signal_meta()
        d = stm._cut_off_drivers_of(dstSignal)
        if d is not None:
            separated.append(d)

        f = d is not stm
        stm_filter.append(f)

    return list(compress(statements, stm_filter)), separated 
Example #14
Source File: drop_words.py    From soft_patterns with MIT License 6 votes vote down vote up
def main(args):
        argc = len(args)

        ratio = 0.05

        if argc < 3:
                print("Usage:", args[0], "<if> <of> <ratio={}>".format(ratio))
                return -1
        elif argc > 3:
            ratio = float(args[3])

        with open(args[1], encoding='utf-8') as ifh, open(args[2], 'w', encoding='utf-8') as ofh:
            for l in ifh:
                words = l.rstrip().split()
                indices = np.random.random_sample((len(words),)) > ratio
                selected_words = list(compress(words, indices))
                ofh.write(" ".join(selected_words)+"\n")

        return 0 
Example #15
Source File: process.py    From Lifting-from-the-Deep-release with GNU General Public License v3.0 6 votes vote down vote up
def import_json(path='json/MPI_annotations.json', order='json/MPI_order.npy'):
    """Get the json file containing the dataset.
    We want the data to be shuffled, however the training has to be repeatable.
    This means that once shuffled the order has to me mantained."""
    with open(path) as data_file:
        data_this = json.load(data_file)
        data_this = np.array(data_this['root'])
    num_samples = len(data_this)

    if os.path.exists(order):
        idx = np.load(order)
    else:
        idx = np.random.permutation(num_samples).tolist()
        np.save(order, idx)

    is_not_validation = [not data_this[i]['isValidation']
                         for i in range(num_samples)]
    keep_data_idx = list(compress(idx, is_not_validation))

    data = data_this[keep_data_idx]
    return data, len(keep_data_idx) 
Example #16
Source File: utilities.py    From minian with GNU General Public License v3.0 6 votes vote down vote up
def handle_crash(varr, vpath, ssname, vlist, varr_list, frame_dict):
    seg1_list = list(filter(lambda v: re.search('seg1', v), vlist))
    seg2_list = list(filter(lambda v: re.search('seg2', v), vlist))
    if seg1_list and seg2_list:
        tframe = frame_dict[ssname]
        varr1 = darr.concatenate(
            list(compress(varr_list, seg1_list)),
            axis=0)
        varr2 = darr.concatenate(
            list(compress(varr_list, seg2_list)),
            axis=0)
        fm1, fm2 = varr1.shape[0], varr2.shape[0]
        fm_crds = varr.coords['frame']
        fm_crds1 = fm_crds.sel(frame=slice(None, fm1 - 1)).values
        fm_crds2 = fm_crds.sel(frame=slice(fm1, None)).values
        fm_crds2 = fm_crds2 + (tframe - fm_crds2.max())
        fm_crds_new = np.concatenate([fm_crds1, fm_crds2], axis=0)
        return varr.assign_coords(frame=fm_crds_new)
    else:
        return varr 
Example #17
Source File: citation_utilities.py    From allofplos with MIT License 6 votes vote down vote up
def micc_dictionary(paper):
    '''
    Analogous to citation_number_dictionary, but for MICCs rather than the number of citations.
    Co-citations are when two citations are included in the same end note (e.g, '[3-5]')
    :return: dict of counts for co-citation occurrences
    '''
    all_groups = [group_cleaner(g) for g in citation_grouper(paper)]
    references = paper.find_all("ref")
    max_ref_num = len(references)
    results = {}
    for i in range(1, max_ref_num + 1):
        counts = [g.count(i) for g in all_groups]
        cite_groups = compress(all_groups, counts)
        cocite_counts = [len(g) - 1 for g in cite_groups]
        if len(cocite_counts) == 0:
            cocite_counts = [-1]
        results[i] = median(cocite_counts)

    return results 
Example #18
Source File: prune.py    From GraphRole with MIT License 6 votes vote down vote up
def _group_features(self, features: DataFrameLike) -> Iterator[Set[str]]:
        """
        Group features according to connected components of feature graph induced
        by pairwise distances below distance threshold
        :param features: DataFrame of features
        """
        # apply binning to features
        # note that some (non-pruned) features will be rebinned each time when this class is
        # used for pruning multiple generations of features, but this slight inefficiency removes
        # maintaining binned features in the state of the feature extraction class and is thus an
        # intentional tradeoff
        binned_features = features.apply(vertical_log_binning)
        # get condensed vector of pairwise distances measuring
        # max_i |u[i] - v[i]| for features u, v
        dists = pdist(binned_features.T, metric='chebychev')
        # construct feature graph by connecting features within
        # dist_thresh of each other and return connected components
        nodes = binned_features.columns
        all_edges = it.combinations(nodes, 2)
        edges = it.compress(all_edges, dists <= self._feature_group_thresh)
        feature_graph = AdjacencyDictGraph(edges)
        groups = feature_graph.get_connected_components()
        return groups 
Example #19
Source File: edge_augmentation.py    From Carnets with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _unpack_available_edges(avail, weight=None, G=None):
    """Helper to separate avail into edges and corresponding weights"""
    if weight is None:
        weight = 'weight'
    if isinstance(avail, dict):
        avail_uv = list(avail.keys())
        avail_w = list(avail.values())
    else:
        def _try_getitem(d):
            try:
                return d[weight]
            except TypeError:
                return d
        avail_uv = [tup[0:2] for tup in avail]
        avail_w = [1 if len(tup) == 2 else _try_getitem(tup[-1])
                   for tup in avail]

    if G is not None:
        # Edges already in the graph are filtered
        flags = [not G.has_edge(u, v) for u, v in avail_uv]
        avail_uv = list(it.compress(avail_uv, flags))
        avail_w = list(it.compress(avail_w, flags))
    return avail_uv, avail_w 
Example #20
Source File: setup.py    From slurm-gcp with Apache License 2.0 6 votes vote down vote up
def setup_nfs_exports():

    export_paths = (
        (HOME_DIR, not EXTERNAL_MOUNT_HOME),
        (APPS_DIR, not EXTERNAL_MOUNT_APPS),
        (MUNGE_DIR, not EXTERNAL_MOUNT_MUNGE),
        (SEC_DISK_DIR, cfg.controller_secondary_disk),
    )

    # export path if corresponding selector boolean is True
    for path in it.compress(*zip(*export_paths)):
        util.run(rf"sed -i '\#{path}#d' /etc/exports")
        with open('/etc/exports', 'a') as f:
            f.write(f"\n{path}  *(rw,no_subtree_check,no_root_squash)")

    util.run("exportfs -a")
# END setup_nfs_exports() 
Example #21
Source File: SeriouslyCommands.py    From Seriously with MIT License 5 votes vote down vote up
def filter_true_fn(srs):
    a,b = srs.pop(), srs.pop()
    if isinstance(a, SeriousFunction):
        res = []
        for x in b:
            s2 = srs.make_new(x)
            aout = a(s2)
            if aout and aout[0]:
                res.append(x)
        srs.push(res)
    else:
        srs.push(itertools.compress(b,a)) 
Example #22
Source File: memorize.py    From pylivetrader with Apache License 2.0 5 votes vote down vote up
def alive(self):
        return all(item() is not None
                   for item in compress(self._items, self._selectors)) 
Example #23
Source File: market_calendar.py    From pandas_market_calendars with MIT License 5 votes vote down vote up
def _overwrite_special_dates(midnight_utcs,
                             opens_or_closes,
                             special_opens_or_closes):
    """
    Overwrite dates in open_or_closes with corresponding dates in
    special_opens_or_closes, using midnight_utcs for alignment.
    """
    # Short circuit when nothing to apply.
    if not len(special_opens_or_closes):
        return

    len_m, len_oc = len(midnight_utcs), len(opens_or_closes)
    if len_m != len_oc:
        raise ValueError(
            "Found misaligned dates while building calendar.\n"
            "Expected midnight_utcs to be the same length as open_or_closes,\n"
            "but len(midnight_utcs)=%d, len(open_or_closes)=%d" % (len_m, len_oc)
        )

    # Find the array indices corresponding to each special date.
    indexer = midnight_utcs.get_indexer(special_opens_or_closes.normalize())

    # -1 indicates that no corresponding entry was found.  If any -1s are
    # present, then we have special dates that doesn't correspond to any
    # trading day. Filter these out
    good_indexes = [i != -1 for i in indexer]
    indexer = list(compress(indexer, good_indexes))

    # NOTE: This is a slightly dirty hack.  We're in-place overwriting the
    # internal data of an Index, which is conceptually immutable.  Since we're
    # maintaining sorting, this should be ok, but this is a good place to
    # sanity check if things start going haywire with calendar computations.
    opens_or_closes.values[indexer] = special_opens_or_closes.values[good_indexes] 
Example #24
Source File: manipulation.py    From multi-agent-emergence-environments with MIT License 5 votes vote down vote up
def reset(self):
        obs = self.env.reset()
        sim = self.unwrapped.sim

        if self.obj_in_game_metadata_keys is not None:
            self.actual_body_slice = np.concatenate([self.metadata[k] for k in self.obj_in_game_metadata_keys])
        else:
            self.actual_body_slice = np.ones((len(self.body_names))).astype(np.bool)
        actual_body_names = list(compress(self.body_names, self.actual_body_slice))
        self.n_obj = len(actual_body_names)

        # Cache body ids
        self.obj_body_idxs = np.array([sim.model.body_name2id(body_name) for body_name in actual_body_names])
        self.agent_body_idxs = np.array([sim.model.body_name2id(f"agent{i}:particle") for i in range(self.n_agents)])

        # Cache geom ids
        self.obj_geom_ids = np.array([sim.model.geom_name2id(body_name) for body_name in actual_body_names])
        self.agent_geom_ids = np.array([sim.model.geom_name2id(f'agent{i}:agent') for i in range(self.n_agents)])

        # Cache constraint ids
        self.agent_eq_ids = np.array(
            [i for i, obj1 in enumerate(sim.model.eq_obj1id)
             if sim.model.body_names[obj1] == f"agent{i}:particle"])
        assert len(self.agent_eq_ids) == self.n_agents

        # turn off equality constraints
        sim.model.eq_active[self.agent_eq_ids] = 0
        self.obj_grabbed = np.zeros((self.n_agents, self.n_obj), dtype=bool)
        self.last_obj_grabbed = np.zeros((self.n_agents, self.n_obj), dtype=bool)

        return self.observation(obs) 
Example #25
Source File: utils.py    From dask-ml with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _list_indexing(X, key, key_dtype):
    """Index a Python list."""
    if np.isscalar(key) or isinstance(key, slice):
        # key is a slice or a scalar
        return X[key]
    if key_dtype == "bool":
        # key is a boolean array-like
        return list(compress(X, key))
    # key is a integer array-like of key
    return [X[idx] for idx in key] 
Example #26
Source File: test_iter_jy.py    From CTFCrackTools-V2 with GNU General Public License v3.0 5 votes vote down vote up
def test_compress(self):
        
        class TestCount(object):
            def __init__(self, data, selectors):
                self.data = data
                self.selectors = selectors
            def __iter__(self):
                return itertools.compress(self.data, self.selectors)
        
        obj = TestCount((1, 2, 3, 4, 5), (1, 0, 1, 0, 1))
        self.assertEqual(list(obj), [1, 3, 5])
        self.assertEqual(list(obj), list(obj.__iter__())) 
Example #27
Source File: test_NN.py    From deep-transfer-learning-crop-prediction with MIT License 5 votes vote down vote up
def initialize_uninitialized_vars(sess):
    global_vars = tf.global_variables()
    is_not_initialized = sess.run([~(tf.is_variable_initialized(var)) \
                                   for var in global_vars])
    not_initialized_vars = list(compress(global_vars, is_not_initialized))

    if len(not_initialized_vars):
        sess.run(tf.variables_initializer(not_initialized_vars)) 
Example #28
Source File: status_handling.py    From parsl with Apache License 2.0 5 votes vote down vote up
def _filter_scale_in_ids(self, to_kill, killed):
        """ Filter out job id's that were not killed
        """
        assert len(to_kill) == len(killed)
        # Filters first iterable by bool values in second
        return list(compress(to_kill, killed)) 
Example #29
Source File: tools.py    From asynq with Apache License 2.0 5 votes vote down vote up
def afilter(function, sequence):
    """Equivalent of filter() that takes an async filter function.

    Returns a list.

    """
    if function is None:
        result(filter(None, sequence))
        return
    should_include = yield [function.asynq(elt) for elt in sequence]
    result(list(itertools.compress(sequence, should_include)))
    return 
Example #30
Source File: tools.py    From asynq with Apache License 2.0 5 votes vote down vote up
def afilterfalse(function, sequence):
    """Equivalent of itertools.ifilterfalse() that takes an async filter function.

    Returns a list.

    """
    should_exclude = yield [function.asynq(elt) for elt in sequence]
    should_include = [not res for res in should_exclude]
    result(list(itertools.compress(sequence, should_include)))
    return