Python six.moves.zip_longest() Examples

The following are 30 code examples of six.moves.zip_longest(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module six.moves , or try the search function .
Example #1
Source File: test_adjusted_array.py    From catalyst with Apache License 2.0 6 votes vote down vote up
def test_overwrite_adjustment_cases(self,
                                        name,
                                        baseline,
                                        lookback,
                                        adjustments,
                                        missing_value,
                                        perspective_offset,
                                        expected):
        array = AdjustedArray(baseline, NOMASK, adjustments, missing_value)

        for _ in range(2):  # Iterate 2x ensure adjusted_arrays are re-usable.
            window_iter = array.traverse(
                lookback,
                perspective_offset=perspective_offset,
            )
            for yielded, expected_yield in zip_longest(window_iter, expected):
                check_arrays(yielded, expected_yield) 
Example #2
Source File: utils.py    From pykube with Apache License 2.0 6 votes vote down vote up
def obj_check(a, b):
    c = None
    if not isinstance(a, type(b)):
        c = a
    else:
        if isinstance(a, dict):
            c = obj_merge(a, b)
        elif isinstance(a, list):
            z = []
            for x, y in zip_longest(a, b, fillvalue=empty):
                if x is empty:
                    z.append(y)
                elif y is empty:
                    z.append(x)
                else:
                    z.append(obj_check(x, y))
            c = z
        else:
            c = a
    return c 
Example #3
Source File: test_parse.py    From eliot with Apache License 2.0 6 votes vote down vote up
def test_parse_into_tasks(
        self, structure_and_messages1, structure_and_messages2, structure_and_messages3
    ):
        """
        Adding messages to a L{Parser} parses them into a L{Task} instances.
        """
        _, messages1 = structure_and_messages1
        _, messages2 = structure_and_messages2
        _, messages3 = structure_and_messages3
        all_messages = (messages1, messages2, messages3)
        # Need unique UUIDs per task:
        assume(len(set(m[0][TASK_UUID_FIELD] for m in all_messages)) == 3)

        parser = Parser()
        all_tasks = []
        for message in chain(*zip_longest(*all_messages)):
            if message is not None:
                completed_tasks, parser = parser.add(message)
                all_tasks.extend(completed_tasks)

        assertCountEqual(
            self, all_tasks, [parse_to_task(msgs) for msgs in all_messages]
        ) 
Example #4
Source File: test_six.py    From data with GNU General Public License v3.0 6 votes vote down vote up
def test_move_items(item_name):
    """Ensure that everything loads correctly."""
    try:
        item = getattr(six.moves, item_name)
        if isinstance(item, types.ModuleType):
            __import__("six.moves." + item_name)
    except AttributeError:
        if item_name == "zip_longest" and sys.version_info < (2, 6):
            py.test.skip("zip_longest only available on 2.6+")
    except ImportError:
        if item_name == "winreg" and not sys.platform.startswith("win"):
            py.test.skip("Windows only module")
        if item_name.startswith("tkinter"):
            if not have_tkinter:
                py.test.skip("requires tkinter")
            if item_name == "tkinter_ttk" and sys.version_info[:2] <= (2, 6):
                py.test.skip("ttk only available on 2.7+")
        if item_name.startswith("dbm_gnu") and not have_gdbm:
            py.test.skip("requires gdbm")
        raise
    if sys.version_info[:2] >= (2, 6):
        assert item_name in dir(six.moves) 
Example #5
Source File: compare.py    From wub with Mozilla Public License 2.0 6 votes vote down vote up
def calc_consistency_score(segment_one, segment_two, offset_one, offset_two):
    """Calculate the number of bases aligned to the same reference bases in two
    alignments.
    :param segment_one: Pysam aligned segments.
    :param segment_two: Pysam aligned segments.
    :param offset_one: Hard clipping offset for the first alignment.
    :param offset_two: Hard clipping offset for the second alignment.
    :retruns: Number of matching base alignments.
    :rtype: int
     """
    matches_one = aligned_pairs_to_matches(
        segment_one.get_aligned_pairs(), offset_one)
    matches_two = aligned_pairs_to_matches(
        segment_two.get_aligned_pairs(), offset_two)

    score = 0
    for matches in zip_longest(matches_one, matches_two, fillvalue=False):
        if matches[0] == matches[1]:
            score += 1

    return score 
Example #6
Source File: test_six.py    From data with GNU General Public License v3.0 6 votes vote down vote up
def test_move_items(item_name):
    """Ensure that everything loads correctly."""
    try:
        item = getattr(six.moves, item_name)
        if isinstance(item, types.ModuleType):
            __import__("six.moves." + item_name)
    except AttributeError:
        if item_name == "zip_longest" and sys.version_info < (2, 6):
            py.test.skip("zip_longest only available on 2.6+")
    except ImportError:
        if item_name == "winreg" and not sys.platform.startswith("win"):
            py.test.skip("Windows only module")
        if item_name.startswith("tkinter"):
            if not have_tkinter:
                py.test.skip("requires tkinter")
            if item_name == "tkinter_ttk" and sys.version_info[:2] <= (2, 6):
                py.test.skip("ttk only available on 2.7+")
        if item_name.startswith("dbm_gnu") and not have_gdbm:
            py.test.skip("requires gdbm")
        raise
    if sys.version_info[:2] >= (2, 6):
        assert item_name in dir(six.moves) 
Example #7
Source File: posts.py    From dcard-spider with MIT License 6 votes vote down vote up
def get(self, content=True, links=True, comments=True):

        _content = self.gen_content_reqs(self.ids) if content else []
        _links = self.gen_links_reqs(self.ids) if links else []
        _comments = self.get_comments(self.ids, self.metas) if comments else ()

        def gen_posts():
            for content, links, comments in zip_longest(
                api.imap(_content), api.imap(_links), _comments
            ):
                post = {}
                post.update(content.json()) if content else None
                post.update({
                    'links': links.json() if links else None,
                    'comments': self.extract_comments(comments)
                })
                if post:
                    yield post

            logger.info('[Posts.gen_posts <gen>] Processed.')

        return PostsResult(gen_posts) 
Example #8
Source File: test_six.py    From c4ddev with MIT License 6 votes vote down vote up
def test_move_items(item_name):
    """Ensure that everything loads correctly."""
    try:
        item = getattr(six.moves, item_name)
        if isinstance(item, types.ModuleType):
            __import__("six.moves." + item_name)
    except AttributeError:
        if item_name == "zip_longest" and sys.version_info < (2, 6):
            py.test.skip("zip_longest only available on 2.6+")
    except ImportError:
        if item_name == "winreg" and not sys.platform.startswith("win"):
            py.test.skip("Windows only module")
        if item_name.startswith("tkinter"):
            if not have_tkinter:
                py.test.skip("requires tkinter")
            if item_name == "tkinter_ttk" and sys.version_info[:2] <= (2, 6):
                py.test.skip("ttk only available on 2.7+")
        if item_name.startswith("dbm_gnu") and not have_gdbm:
            py.test.skip("requires gdbm")
        raise
    if sys.version_info[:2] >= (2, 6):
        assert item_name in dir(six.moves) 
Example #9
Source File: test_adjusted_array.py    From catalyst with Apache License 2.0 6 votes vote down vote up
def test_multiplicative_adjustments(self,
                                        name,
                                        data,
                                        lookback,
                                        adjustments,
                                        missing_value,
                                        perspective_offset,
                                        expected):

        array = AdjustedArray(data, NOMASK, adjustments, missing_value)
        for _ in range(2):  # Iterate 2x ensure adjusted_arrays are re-usable.
            window_iter = array.traverse(
                lookback,
                perspective_offset=perspective_offset,
            )
            for yielded, expected_yield in zip_longest(window_iter, expected):
                check_arrays(yielded, expected_yield) 
Example #10
Source File: background_network_workload_test.py    From PerfKitBenchmarker with Apache License 2.0 6 votes vote down vote up
def _CheckVmCallCounts(self, spec, working_groups, working_expected_counts,
                         non_working_groups, non_working_expected_counts):
    # TODO(skschneider): This is also used in TestBackgroundNetworkWorkload.
    # Consider moving to a shared function or base class.
    expected_call_counts = {group: working_expected_counts
                            for group in working_groups}
    expected_call_counts.update({group: non_working_expected_counts
                                 for group in non_working_groups})
    for group_name, vm_expected_call_counts in six.iteritems(
        expected_call_counts):
      group_vms = spec.vm_groups[group_name]
      self.assertEqual(len(group_vms), 1,
                       msg='VM group "{0}" had {1} VMs'.format(group_name,
                                                               len(group_vms)))
      vm = group_vms[0]
      iter_mocked_functions = zip_longest(_MOCKED_VM_FUNCTIONS,
                                          vm_expected_call_counts)
      for function_name, expected_call_count in iter_mocked_functions:
        call_count = getattr(vm, function_name).call_count
        self.assertEqual(call_count, expected_call_count, msg=(
            'Expected {0} from VM group "{1}" to be called {2} times, but it '
            'was called {3} times.'.format(function_name, group_name,
                                           expected_call_count, call_count))) 
Example #11
Source File: orderedmultidict.py    From pipenv with MIT License 6 votes vote down vote up
def __eq__(self, other):
        if callable_attr(other, 'iterallitems'):
            myiter, otheriter = self.iterallitems(), other.iterallitems()
            for i1, i2 in zip_longest(myiter, otheriter, fillvalue=_absent):
                if i1 != i2 or i1 is _absent or i2 is _absent:
                    return False
        elif not hasattr(other, '__len__') or not hasattr(other, _items_attr):
            return False
        # Ignore order so we can compare ordered omdicts with unordered dicts.
        else:
            if len(self) != len(other):
                return False
            for key, value in six.iteritems(other):
                if self.get(key, _absent) != value:
                    return False
        return True 
Example #12
Source File: background_cpu_test.py    From PerfKitBenchmarker with Apache License 2.0 6 votes vote down vote up
def _CheckVmCallCounts(self, spec, working_groups, working_expected_counts,
                         non_working_groups, non_working_expected_counts):
    # TODO(skschneider): This is also used in TestBackgroundNetworkWorkload.
    # Consider moving to a shared function or base class.
    expected_call_counts = {group: working_expected_counts
                            for group in working_groups}
    expected_call_counts.update({group: non_working_expected_counts
                                 for group in non_working_groups})
    for group_name, vm_expected_call_counts in six.iteritems(
        expected_call_counts):
      group_vms = spec.vm_groups[group_name]
      self.assertEqual(len(group_vms), 1,
                       msg='VM group "{0}" had {1} VMs'.format(group_name,
                                                               len(group_vms)))
      vm = group_vms[0]
      iter_mocked_functions = zip_longest(_MOCKED_VM_FUNCTIONS,
                                          vm_expected_call_counts)
      for function_name, expected_call_count in iter_mocked_functions:
        call_count = getattr(vm, function_name).call_count
        self.assertEqual(call_count, expected_call_count, msg=(
            'Expected {0} from VM group "{1}" to be called {2} times, but it '
            'was called {3} times.'.format(function_name, group_name,
                                           expected_call_count, call_count))) 
Example #13
Source File: functions.py    From graphite-api with Apache License 2.0 6 votes vote down vote up
def averageSeries(requestContext, *seriesLists):
    """
    Short Alias: avg()

    Takes one metric or a wildcard seriesList.
    Draws the average value of all metrics passed at each time.

    Example::

        &target=averageSeries(company.server.*.threads.busy)

    """
    if not seriesLists or not any(seriesLists):
        return []
    seriesList, start, end, step = normalize(seriesLists)
    name = "averageSeries(%s)" % formatPathExpressions(seriesList)
    values = (safeDiv(safeSum(row), safeLen(row))
              for row in zip_longest(*seriesList))
    series = TimeSeries(name, start, end, step, values)
    series.pathExpression = name
    return [series] 
Example #14
Source File: layout.py    From pycbc with GNU General Public License v3.0 5 votes vote down vote up
def grouper(iterable, n, fillvalue=None):
    """ Group items into chunks of n length
    """
    args = [iter(iterable)] * n
    return zip_longest(*args, fillvalue=fillvalue) 
Example #15
Source File: functions.py    From graphite-api with Apache License 2.0 5 votes vote down vote up
def sumSeries(requestContext, *seriesLists):
    """
    Short form: sum()

    This will add metrics together and return the sum at each datapoint. (See
    integral for a sum over time)

    Example::

        &target=sum(company.server.application*.requestsHandled)

    This would show the sum of all requests handled per minute (provided
    requestsHandled are collected once a minute).     If metrics with different
    retention rates are combined, the coarsest metric is graphed, and the sum
    of the other metrics is averaged for the metrics with finer retention
    rates.

    """
    if not seriesLists or not any(seriesLists):
        return []
    seriesList, start, end, step = normalize(seriesLists)
    name = "sumSeries(%s)" % formatPathExpressions(seriesList)
    values = (safeSum(row) for row in zip_longest(*seriesList))
    series = TimeSeries(name, start, end, step, values)
    series.pathExpression = name
    return [series] 
Example #16
Source File: recipes.py    From Tautulli with GNU General Public License v3.0 5 votes vote down vote up
def grouper(n, iterable, fillvalue=None):
    """Collect data into fixed-length chunks or blocks.

        >>> list(grouper(3, 'ABCDEFG', 'x'))
        [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]

    """
    args = [iter(iterable)] * n
    return zip_longest(fillvalue=fillvalue, *args) 
Example #17
Source File: tcping.py    From tcping with MIT License 5 votes vote down vote up
def cost(self, funcs, args):
        self.start()
        for func, arg in zip_longest(funcs, args):
            if arg:
                func(*arg)
            else:
                func()

        self.stop()
        return self._stop - self._start 
Example #18
Source File: more.py    From Tautulli with GNU General Public License v3.0 5 votes vote down vote up
def interleave_longest(*iterables):
    """Return a new iterable yielding from each iterable in turn,
    skipping any that are exhausted.

        >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
        [1, 4, 6, 2, 5, 7, 3, 8]

    This function produces the same output as :func:`roundrobin`, but may
    perform better for some inputs (in particular when the number of iterables
    is large).

    """
    i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
    return (x for x in i if x is not _marker) 
Example #19
Source File: more.py    From Tautulli with GNU General Public License v3.0 5 votes vote down vote up
def zip_offset(*iterables, **kwargs):
    """``zip`` the input *iterables* together, but offset the `i`-th iterable
    by the `i`-th item in *offsets*.

        >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
        [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]

    This can be used as a lightweight alternative to SciPy or pandas to analyze
    data sets in which some series have a lead or lag relationship.

    By default, the sequence will end when the shortest iterable is exhausted.
    To continue until the longest iterable is exhausted, set *longest* to
    ``True``.

        >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
        [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]

    By default, ``None`` will be used to replace offsets beyond the end of the
    sequence. Specify *fillvalue* to use some other value.

    """
    offsets = kwargs['offsets']
    longest = kwargs.get('longest', False)
    fillvalue = kwargs.get('fillvalue', None)

    if len(iterables) != len(offsets):
        raise ValueError("Number of iterables and offsets didn't match")

    staggered = []
    for it, n in zip(iterables, offsets):
        if n < 0:
            staggered.append(chain(repeat(fillvalue, -n), it))
        elif n > 0:
            staggered.append(islice(it, n, None))
        else:
            staggered.append(it)

    if longest:
        return zip_longest(*staggered, fillvalue=fillvalue)

    return zip(*staggered) 
Example #20
Source File: util_talbes.py    From deep-crf with MIT License 5 votes vote down vote up
def _get_printable_row(self, row):
        maxes = self._calc_maxes()
        return '| ' + ' | '.join([('{0: <%d}' % m).format(r) for r, m in zip_longest(row, maxes, fillvalue='')]) + ' |' 
Example #21
Source File: tile_coding.py    From rl_trading with Apache License 2.0 5 votes vote down vote up
def tileswrap(ihtORsize, numtilings, floats, wrapwidths, ints=[],
              readonly=False):
    '''
    returns num-tilings tile indices corresponding to the floats and ints,
    wrapping some floats

    :param ihtORsize: integer or IHT object. An index hash table or a positive
        integer specifying the upper range of returned indices
    :param numtilings: integer. the number of tilings desired. For best
        results, the second argument, numTilings, should be a power of two
        greater or equal to four times the number of floats
    :param memory-size: ineteger. the number of possible tile indices
    :param floats: list. a list of real values making up the input vector
    :param wrapwidths:
    :param ints*: list. optional list of integers to get different hashings
    :param readonly*: boolean.
    '''
    qfloats = [floor(f*numtilings) for f in floats]
    Tiles = []
    for tiling in range(numtilings):
        tilingX2 = tiling*2
        coords = [tiling]
        b = tiling
        for q, width in zip_longest(qfloats, wrapwidths):
            c = (q + b % numtilings) // numtilings
            coords.append(c % width if width else c)
            b += tilingX2
        coords.extend(ints)
        Tiles.append(hashcoords(coords, ihtORsize, readonly))
    return Tiles 
Example #22
Source File: iter_utils.py    From upvote with Apache License 2.0 5 votes vote down vote up
def Grouper(iterable, chunk_size, fillvalue=None):
  """Chunks an iterable.

  Source: http://docs.python.org/library/itertools.html.

  Args:
    iterable: iterable, An iterable.
    chunk_size: int, Chunk size.
    fillvalue: object, Fill value.

  Returns:
    An iterable of chunks.
  """
  args = [iter(iterable)] * chunk_size
  return zip_longest(*args, fillvalue=fillvalue) 
Example #23
Source File: test_syntax.py    From allura with Apache License 2.0 5 votes vote down vote up
def grouper(n, iterable, fillvalue=None):
    "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
    args = [iter(iterable)] * n
    return zip_longest(fillvalue=fillvalue, *args) 
Example #24
Source File: coqtail.py    From Coqtail with MIT License 5 votes vote down vote up
def _find_diff(x, y, stop=None):
    # type: (Sequence[Any], Sequence[Any], Optional[int]) -> Optional[int]
    """Locate the first differing element in 'x' and 'y' up to 'stop'."""
    seq = enumerate(zip_longest(x, y))  # type: Iterator[Tuple[int, Any]]
    if stop is not None:
        seq = islice(seq, stop)
    return next((i for i, vs in seq if vs[0] != vs[1]), None) 
Example #25
Source File: xlsx_model.py    From pptx-template with Apache License 2.0 5 votes vote down vote up
def _build_tsv(rect_list, side_by_side=False, transpose=False, format_cell=False):
    """
    Excel の範囲名(複数セル範囲)から一つの二次元配列を作る
    rect_list:    セル範囲自体の配列
    side_by_side: 複数のセル範囲を横に並べる。指定ない場合はタテに並べる
    transpose:    結果を行列入れ替えする(複数範囲を結合した後で処理する)
    """
    result = []
    for rect_index, rect in enumerate(rect_list):
        for row_index, row in enumerate(rect):
            line = []
            for cell in row:
                value = cell
                if not cell:
                    value = None
                elif hasattr(cell, 'value'):
                    value = _format_cell_value(cell) if format_cell else cell.value
                else:
                    raise ValueError("Unknown type %s for %s" % (type(cell), cell))
                line.append(value)
            if side_by_side and rect_index > 0:
                result[row_index].extend(line)
            else:
                result.append(line)

    if transpose:
        result = [list(row) for row in moves.zip_longest(*result, fillvalue=None)] # idiom for transpose

    return result 
Example #26
Source File: helpers.py    From pyescpos with Apache License 2.0 5 votes vote down vote up
def chunks(iterable, size):
    def chunk_factory(iterable, size):
        args = [iter(iterable)] * size
        return zip_longest(*args, fillvalue=None)
    for chunk in chunk_factory(iterable, size):
        yield ''.join([e for e in chunk if e is not None]) 
Example #27
Source File: graph.py    From GraphEmbeddingRecommendationSystem with MIT License 5 votes vote down vote up
def grouper(n, iterable, padvalue=None):
    "grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')"
    return zip_longest(*[iter(iterable)]*n, fillvalue=padvalue) 
Example #28
Source File: dictionary.py    From dateparser with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, locale_info, settings=None):
        dictionary = {}
        self._settings = settings
        self.info = locale_info

        if 'skip' in locale_info:
            skip = map(methodcaller('lower'), locale_info['skip'])
            dictionary.update(zip_longest(skip, [], fillvalue=None))
        if 'pertain' in locale_info:
            pertain = map(methodcaller('lower'), locale_info['pertain'])
            dictionary.update(zip_longest(pertain, [], fillvalue=None))
        for word in KNOWN_WORD_TOKENS:
            if word in locale_info:
                translations = map(methodcaller('lower'), locale_info[word])
                dictionary.update(zip_longest(translations, [], fillvalue=word))
        dictionary.update(zip_longest(ALWAYS_KEEP_TOKENS, ALWAYS_KEEP_TOKENS))
        dictionary.update(zip_longest(map(methodcaller('lower'),
                                          PARSER_KNOWN_TOKENS),
                                      PARSER_KNOWN_TOKENS))

        relative_type = locale_info.get('relative-type', {})
        for key, value in relative_type.items():
            relative_translations = map(methodcaller('lower'), value)
            dictionary.update(zip_longest(relative_translations, [], fillvalue=key))

        self._dictionary = dictionary

        no_word_spacing = locale_info.get('no_word_spacing', 'False')
        self._no_word_spacing = bool(eval(no_word_spacing))

        relative_type_regex = locale_info.get("relative-type-regex", {})
        self._relative_strings = list(chain.from_iterable(relative_type_regex.values())) 
Example #29
Source File: graph.py    From OpenHINE with MIT License 5 votes vote down vote up
def grouper(n, iterable, padvalue=None):
    "grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')"
    return zip_longest(*[iter(iterable)] * n, fillvalue=padvalue) 
Example #30
Source File: test_six.py    From six with MIT License 5 votes vote down vote up
def test_zip_longest():
    from six.moves import zip_longest
    it = zip_longest(range(2), range(1))

    assert six.advance_iterator(it) == (0, 0)
    assert six.advance_iterator(it) == (1, None)