Python itertools.repeat() Examples
The following are 30
code examples of itertools.repeat().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
itertools
, or try the search function
.

Example #1
Source File: exceptions.py From python-netsurv with MIT License | 6 votes |
def _hash_comparison(self): """ Return a comparison of actual and expected hash values. Example:: Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde or 123451234512345123451234512345123451234512345 Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef """ def hash_then_or(hash_name): # For now, all the decent hashes have 6-char names, so we can get # away with hard-coding space literals. return chain([hash_name], repeat(' or')) lines = [] for hash_name, expecteds in iteritems(self.allowed): prefix = hash_then_or(hash_name) lines.extend((' Expected %s %s' % (next(prefix), e)) for e in expecteds) lines.append(' Got %s\n' % self.gots[hash_name].hexdigest()) prefix = ' or' return '\n'.join(lines)
Example #2
Source File: timeit.py From jawfish with MIT License | 6 votes |
def timeit(self, number=default_number): """Time 'number' executions of the main statement. To be precise, this executes the setup statement once, and then returns the time it takes to execute the main statement a number of times, as a float measured in seconds. The argument is the number of times through the loop, defaulting to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. """ it = itertools.repeat(None, number) gcold = gc.isenabled() gc.disable() try: timing = self.inner(it, self.timer) finally: if gcold: gc.enable() return timing
Example #3
Source File: pyparsing.py From jbox with MIT License | 6 votes |
def matchPreviousExpr(expr): """Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousExpr(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches by expressions, will *not* match the leading C{"1:1"} in C{"1:10"}; the expressions are evaluated first, and then compared, so C{"1"} is compared with C{"10"}. Do *not* use with packrat parsing enabled. """ rep = Forward() e2 = expr.copy() rep <<= e2 def copyTokenToRepeater(s,l,t): matchTokens = _flatten(t.asList()) def mustMatchTheseTokens(s,l,t): theseTokens = _flatten(t.asList()) if theseTokens != matchTokens: raise ParseException("",0,"") rep.setParseAction( mustMatchTheseTokens, callDuringTry=True ) expr.addParseAction(copyTokenToRepeater, callDuringTry=True) return rep
Example #4
Source File: timeit.py From jawfish with MIT License | 6 votes |
def repeat(self, repeat=default_repeat, number=default_number): """Call timeit() a few times. This is a convenience function that calls the timeit() repeatedly, returning a list of results. The first argument specifies how many times to call timeit(), defaulting to 3; the second argument specifies the timer argument, defaulting to one million. Note: it's tempting to calculate mean and standard deviation from the result vector and report these. However, this is not very useful. In a typical case, the lowest value gives a lower bound for how fast your machine can run the given code snippet; higher values in the result vector are typically not caused by variability in Python's speed, but by other processes interfering with your timing accuracy. So the min() of the result is probably the only number you should be interested in. After that, you should look at the entire vector and apply common sense rather than statistics. """ r = [] for i in range(repeat): t = self.timeit(number) r.append(t) return r
Example #5
Source File: pyparsing.py From jbox with MIT License | 6 votes |
def matchPreviousLiteral(expr): """Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousLiteral(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches a previous literal, will also match the leading C{"1:1"} in C{"1:10"}. If this is not desired, use C{matchPreviousExpr}. Do *not* use with packrat parsing enabled. """ rep = Forward() def copyTokenToRepeater(s,l,t): if t: if len(t) == 1: rep << t[0] else: # flatten t tokens tflat = _flatten(t.asList()) rep << And( [ Literal(tt) for tt in tflat ] ) else: rep << Empty() expr.addParseAction(copyTokenToRepeater, callDuringTry=True) return rep
Example #6
Source File: exceptions.py From jbox with MIT License | 6 votes |
def _hash_comparison(self): """ Return a comparison of actual and expected hash values. Example:: Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde or 123451234512345123451234512345123451234512345 Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef """ def hash_then_or(hash_name): # For now, all the decent hashes have 6-char names, so we can get # away with hard-coding space literals. return chain([hash_name], repeat(' or')) lines = [] for hash_name, expecteds in iteritems(self.allowed): prefix = hash_then_or(hash_name) lines.extend((' Expected %s %s' % (next(prefix), e)) for e in expecteds) lines.append(' Got %s\n' % self.gots[hash_name].hexdigest()) prefix = ' or' return '\n'.join(lines)
Example #7
Source File: timeit.py From meddle with MIT License | 6 votes |
def main(args=None): """Main program, used when run as a script. The optional argument specifies the command line to be parsed, defaulting to sys.argv[1:]. The return value is an exit code to be passed to sys.exit(); it may be None to indicate success. When an exception happens during timing, a traceback is printed to stderr and the return value is 1. Exceptions at other times (including the template compilation) are not caught. """ if args is None: args = sys.argv[1:] import getopt try: opts, args = getopt.getopt(args, "n:s:r:tcvh", ["number=", "setup=", "repeat=", "time", "clock", "verbose", "help"]) except getopt.error, err: print err print "use -h/--help for command line help" return 2
Example #8
Source File: in_memory_dataset.py From pytorch_geometric with MIT License | 6 votes |
def get(self, idx): data = self.data.__class__() if hasattr(self.data, '__num_nodes__'): data.num_nodes = self.data.__num_nodes__[idx] for key in self.data.keys: item, slices = self.data[key], self.slices[key] start, end = slices[idx].item(), slices[idx + 1].item() # print(slices[idx], slices[idx + 1]) if torch.is_tensor(item): s = list(repeat(slice(None), item.dim())) s[self.data.__cat_dim__(key, item)] = slice(start, end) elif start + 1 == end: s = slices[start] else: s = slice(start, end) data[key] = item[s] return data
Example #9
Source File: compat.py From razzy-spinner with GNU General Public License v3.0 | 6 votes |
def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] If an element's count has been set to zero or is a negative number, elements() will ignore it. ''' for elem, count in self.iteritems(): for _ in repeat(None, count): yield elem # Override dict methods where the meaning changes for Counter # objects.
Example #10
Source File: timeit.py From meddle with MIT License | 6 votes |
def repeat(self, repeat=default_repeat, number=default_number): """Call timeit() a few times. This is a convenience function that calls the timeit() repeatedly, returning a list of results. The first argument specifies how many times to call timeit(), defaulting to 3; the second argument specifies the timer argument, defaulting to one million. Note: it's tempting to calculate mean and standard deviation from the result vector and report these. However, this is not very useful. In a typical case, the lowest value gives a lower bound for how fast your machine can run the given code snippet; higher values in the result vector are typically not caused by variability in Python's speed, but by other processes interfering with your timing accuracy. So the min() of the result is probably the only number you should be interested in. After that, you should look at the entire vector and apply common sense rather than statistics. """ r = [] for i in range(repeat): t = self.timeit(number) r.append(t) return r
Example #11
Source File: timeit.py From meddle with MIT License | 6 votes |
def timeit(self, number=default_number): """Time 'number' executions of the main statement. To be precise, this executes the setup statement once, and then returns the time it takes to execute the main statement a number of times, as a float measured in seconds. The argument is the number of times through the loop, defaulting to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. """ if itertools: it = itertools.repeat(None, number) else: it = [None] * number gcold = gc.isenabled() gc.disable() timing = self.inner(it, self.timer) if gcold: gc.enable() return timing
Example #12
Source File: misc.py From verge3d-blender-addon with GNU General Public License v3.0 | 6 votes |
def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) >>> product = 1 >>> for factor in prime_factors.elements(): # loop over factors ... product *= factor # and multiply them >>> product 1836 Note, if an element's count has been set to zero or is a negative number, elements() will ignore it. ''' # Emulate Bag.do from Smalltalk and Multiset.begin from C++. return _chain.from_iterable(_starmap(_repeat, self.items())) # Override dict methods where necessary
Example #13
Source File: exceptions.py From python-netsurv with MIT License | 6 votes |
def _hash_comparison(self): """ Return a comparison of actual and expected hash values. Example:: Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde or 123451234512345123451234512345123451234512345 Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef """ def hash_then_or(hash_name): # For now, all the decent hashes have 6-char names, so we can get # away with hard-coding space literals. return chain([hash_name], repeat(' or')) lines = [] for hash_name, expecteds in iteritems(self.allowed): prefix = hash_then_or(hash_name) lines.extend((' Expected %s %s' % (next(prefix), e)) for e in expecteds) lines.append(' Got %s\n' % self.gots[hash_name].hexdigest()) prefix = ' or' return '\n'.join(lines)
Example #14
Source File: exceptions.py From recruit with Apache License 2.0 | 6 votes |
def _hash_comparison(self): """ Return a comparison of actual and expected hash values. Example:: Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde or 123451234512345123451234512345123451234512345 Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef """ def hash_then_or(hash_name): # For now, all the decent hashes have 6-char names, so we can get # away with hard-coding space literals. return chain([hash_name], repeat(' or')) lines = [] for hash_name, expecteds in iteritems(self.allowed): prefix = hash_then_or(hash_name) lines.extend((' Expected %s %s' % (next(prefix), e)) for e in expecteds) lines.append(' Got %s\n' % self.gots[hash_name].hexdigest()) prefix = ' or' return '\n'.join(lines)
Example #15
Source File: test_block_internals.py From recruit with Apache License 2.0 | 6 votes |
def test_constructor_compound_dtypes(self): # GH 5191 # compound dtypes should raise not-implementederror def f(dtype): data = list(itertools.repeat((datetime(2001, 1, 1), "aa", 20), 9)) return DataFrame(data=data, columns=["A", "B", "C"], dtype=dtype) pytest.raises(NotImplementedError, f, [("A", "datetime64[h]"), ("B", "str"), ("C", "int32")]) # these work (though results may be unexpected) f('int64') f('float64') # 10822 # invalid error message on dt inference if not compat.is_platform_windows(): f('M8[ns]')
Example #16
Source File: __init__.py From benchexec with Apache License 2.0 | 6 votes |
def load_results( result_files, options, run_set_id=None, columns=None, columns_relevant_for_diff=set(), ): """Version of load_result for multiple input files that will be loaded concurrently.""" return parallel.map( load_result, result_files, itertools.repeat(options), itertools.repeat(run_set_id), itertools.repeat(columns), itertools.repeat(columns_relevant_for_diff), )
Example #17
Source File: misc.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) >>> product = 1 >>> for factor in prime_factors.elements(): # loop over factors ... product *= factor # and multiply them >>> product 1836 Note, if an element's count has been set to zero or is a negative number, elements() will ignore it. ''' # Emulate Bag.do from Smalltalk and Multiset.begin from C++. return _chain.from_iterable(_starmap(_repeat, self.items())) # Override dict methods where necessary
Example #18
Source File: misc.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) >>> product = 1 >>> for factor in prime_factors.elements(): # loop over factors ... product *= factor # and multiply them >>> product 1836 Note, if an element's count has been set to zero or is a negative number, elements() will ignore it. ''' # Emulate Bag.do from Smalltalk and Multiset.begin from C++. return _chain.from_iterable(_starmap(_repeat, self.items())) # Override dict methods where necessary
Example #19
Source File: quota.py From mars with Apache License 2.0 | 6 votes |
def alter_allocations(self, keys, quota_sizes=None, handle_shrink=True, new_keys=None, allocate=False, process_quota=False): """ Alter multiple requests :param keys: keys to update :param quota_sizes: new quota sizes, if None, no changes will be made :param handle_shrink: if True and the quota size less than the original, process requests in the queue :param new_keys: new allocation keys to replace current keys, if None, no changes will be made :param allocate: if True, will allocate resources for new items :param process_quota: call process_quotas() after allocated :return: """ quota_sizes = quota_sizes or itertools.repeat(None) new_keys = new_keys or itertools.repeat(None) shrink = False for k, s, nk in zip(keys, quota_sizes, new_keys): cur_shrink = self.alter_allocation( k, s, handle_shrink=False, new_key=nk, allocate=allocate, process_quota=process_quota) shrink = shrink or cur_shrink if shrink and handle_shrink: self._process_requests()
Example #20
Source File: core.py From jbox with MIT License | 6 votes |
def populate_obj(self, obj, name): values = getattr(obj, name, None) try: ivalues = iter(values) except TypeError: ivalues = iter([]) candidates = itertools.chain(ivalues, itertools.repeat(None)) _fake = type(str('_fake'), (object, ), {}) output = [] for field, data in izip(self.entries, candidates): fake_obj = _fake() fake_obj.data = data field.populate_obj(fake_obj, 'data') output.append(fake_obj.data) setattr(obj, name, output)
Example #21
Source File: table_sheet.py From openpyxl-templates with MIT License | 6 votes |
def object_from_row(self, row, row_number, exception_policy=TableSheetExceptionPolicy.RaiseCellException): data = OrderedDict() cell_exceptions = [] for cell, column in zip(chain(row, repeat(None)), self.columns): try: data[column.object_attribute] = column._from_excel(cell) except CellException as e: if exception_policy.value <= TableSheetExceptionPolicy.RaiseCellException.value: raise e else: cell_exceptions.append(e) if cell_exceptions: raise CellExceptions(cell_exceptions) # return self.row_class(**data) return self.create_object(row_number, **data)
Example #22
Source File: test_isoparser.py From plugin.video.emby with GNU General Public License v3.0 | 6 votes |
def __make_date_examples(): dates_no_day = [ date(1999, 12, 1), date(2016, 2, 1) ] if six.PY3: # strftime does not support dates before 1900 in Python 2 dates_no_day.append(date(1000, 11, 1)) # Only one supported format for dates with no day o = zip(dates_no_day, it.repeat('%Y-%m')) dates_w_day = [ date(1969, 12, 31), date(1900, 1, 1), date(2016, 2, 29), date(2017, 11, 14) ] dates_w_day_fmts = ('%Y%m%d', '%Y-%m-%d') o = it.chain(o, it.product(dates_w_day, dates_w_day_fmts)) return list(o)
Example #23
Source File: screens.py From TerminalView with MIT License | 6 votes |
def delete_lines(self, count=None): """Deletes the indicated # of lines, starting at line with cursor. As lines are deleted, lines displayed below cursor move up. Lines added to bottom of screen have spaces with same character attributes as last line moved up. :param int count: number of lines to delete. """ count = count or 1 top, bottom = self.margins # If cursor is outside scrolling margins it -- do nothin'. if top <= self.cursor.y <= bottom: # v -- +1 to include the bottom margin. for _ in range(min(bottom - self.cursor.y + 1, count)): self.buffer.pop(self.cursor.y) self.buffer.insert(bottom, list( repeat(self.cursor.attrs, self.columns))) self.carriage_return()
Example #24
Source File: extract.py From code2vec with MIT License | 6 votes |
def ExtractFeaturesForDirsList(args, dirs): global TMP_DIR TMP_DIR = "./tmp/feature_extractor%d/" % (os.getpid()) if os.path.exists(TMP_DIR): shutil.rmtree(TMP_DIR, ignore_errors=True) os.makedirs(TMP_DIR) try: p = multiprocessing.Pool(4) p.starmap(ParallelExtractDir, zip(itertools.repeat(args), dirs)) #for dir in dirs: # ExtractFeaturesForDir(args, dir, '') output_files = os.listdir(TMP_DIR) for f in output_files: os.system("cat %s/%s" % (TMP_DIR, f)) finally: shutil.rmtree(TMP_DIR, ignore_errors=True)
Example #25
Source File: extract.py From code2vec with MIT License | 6 votes |
def ExtractFeaturesForDirsList(args, dirs): global TMP_DIR TMP_DIR = "./tmp/feature_extractor%d/" % (os.getpid()) if os.path.exists(TMP_DIR): shutil.rmtree(TMP_DIR, ignore_errors=True) os.makedirs(TMP_DIR) try: p = multiprocessing.Pool(4) p.starmap(ParallelExtractDir, zip(itertools.repeat(args), dirs)) #for dir in dirs: # ExtractFeaturesForDir(args, dir, '') output_files = os.listdir(TMP_DIR) for f in output_files: os.system("cat %s/%s" % (TMP_DIR, f)) finally: shutil.rmtree(TMP_DIR, ignore_errors=True)
Example #26
Source File: datastructures.py From jbox with MIT License | 5 votes |
def fromkeys(cls, keys, value=None): instance = super(cls, cls).__new__(cls) instance.__init__(zip(keys, repeat(value))) return instance
Example #27
Source File: utils.py From mars with Apache License 2.0 | 5 votes |
def create_fetch_tensor(chunk_size, shape, dtype, tensor_key=None, tensor_id=None, chunk_keys=None): """ Construct Fetch tensor on the fly, using given chunk_size, shape, dtype, as well as possible tensor_key, tensor_id and chunk keys. """ from ..config import options from .fetch import TensorFetch if not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) if chunk_keys is None: chunk_keys = itertools.repeat(None) # compute chunks chunk_size = chunk_size or options.chunk_size chunk_size = decide_chunk_sizes(shape, chunk_size, dtype.itemsize) chunk_size_idxes = (range(len(size)) for size in chunk_size) fetch_op = TensorFetch(dtype=dtype).reset_key() chunks = [] for chunk_shape, chunk_idx, chunk_key in zip(itertools.product(*chunk_size), itertools.product(*chunk_size_idxes), chunk_keys): chunk = fetch_op.copy().reset_key().new_chunk(None, shape=chunk_shape, index=chunk_idx, _key=chunk_key, hex=uuid.uuid4().hex) chunks.append(chunk) return fetch_op.copy().new_tensor(None, shape=shape, dtype=dtype, nsplits=chunk_size, chunks=chunks, _key=tensor_key, _id=tensor_id, hex=uuid.uuid4().hex)
Example #28
Source File: pyparsing.py From jbox with MIT License | 5 votes |
def replaceWith(replStr): """Helper method for common parse actions that simply return a literal value. Especially useful when used with C{L{transformString<ParserElement.transformString>}()}. """ #def _replFunc(*args): # return [replStr] #return _replFunc return functools.partial(next, itertools.repeat([replStr]))
Example #29
Source File: planetoid.py From pytorch_geometric with MIT License | 5 votes |
def edge_index_from_dict(graph_dict, num_nodes=None): row, col = [], [] for key, value in graph_dict.items(): row += repeat(key, len(value)) col += value edge_index = torch.stack([torch.tensor(row), torch.tensor(col)], dim=0) # NOTE: There are duplicated edges and self loops in the datasets. Other # implementations do not remove them! edge_index, _ = remove_self_loops(edge_index) edge_index, _ = coalesce(edge_index, None, num_nodes, num_nodes) return edge_index
Example #30
Source File: pyparsing.py From jbox with MIT License | 5 votes |
def matchPreviousLiteral(expr): """Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousLiteral(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches a previous literal, will also match the leading C{"1:1"} in C{"1:10"}. If this is not desired, use C{matchPreviousExpr}. Do *not* use with packrat parsing enabled. """ rep = Forward() def copyTokenToRepeater(s,l,t): if t: if len(t) == 1: rep << t[0] else: # flatten t tokens tflat = _flatten(t.asList()) rep << And(Literal(tt) for tt in tflat) else: rep << Empty() expr.addParseAction(copyTokenToRepeater, callDuringTry=True) rep.setName('(prev) ' + _ustr(expr)) return rep