Python six.moves.range() Examples

The following are 30 code examples of six.moves.range(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module six.moves , or try the search function .
Example #1
Source File: config.py    From linter-pylama with MIT License 6 votes vote down vote up
def register_options_provider(self, provider, own_group=True):
        """register an options provider"""
        assert provider.priority <= 0, "provider's priority can't be >= 0"
        for i in range(len(self.options_providers)):
            if provider.priority > self.options_providers[i].priority:
                self.options_providers.insert(i, provider)
                break
        else:
            self.options_providers.append(provider)
        non_group_spec_options = [option for option in provider.options
                                  if 'group' not in option[1]]
        groups = getattr(provider, 'option_groups', ())
        if own_group and non_group_spec_options:
            self.add_option_group(provider.name.upper(), provider.__doc__,
                                  non_group_spec_options, provider)
        else:
            for opt, optdict in non_group_spec_options:
                self.add_optik_option(provider, self.cmdline_parser, opt, optdict)
        for gname, gdoc in groups:
            gname = gname.upper()
            goptions = [option for option in provider.options
                        if option[1].get('group', '').upper() == gname]
            self.add_option_group(gname, gdoc, goptions, provider) 
Example #2
Source File: simulation_ucb1.py    From striatum with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def main():
    context_dimension = 5
    action_storage = MemoryActionStorage()
    action_storage.add([Action(i) for i in range(5)])

    # Regret Analysis
    n_rounds = 10000
    context, desired_actions = simulation.simulate_data(
        n_rounds, context_dimension, action_storage, random_state=1)
    policy = UCB1(MemoryHistoryStorage(), MemoryModelStorage(),
                  action_storage)

    for t in range(n_rounds):
        history_id, recommendation = policy.get_action(context[t])
        action_id = recommendation.action.id
        if desired_actions[t] != action_id:
            policy.reward(history_id, {action_id: 0})
        else:
            policy.reward(history_id, {action_id: 1})

    policy.plot_avg_regret()
    plt.show() 
Example #3
Source File: util.py    From pwnypack with MIT License 6 votes vote down vote up
def deBruijn(n, k):
    """
    An implementation of the FKM algorithm for generating the de Bruijn
    sequence containing all k-ary strings of length n, as described in
    "Combinatorial Generation" by Frank Ruskey.
    """

    a = [ 0 ] * (n + 1)

    def gen(t, p):
        if t > n:
            for v in a[1:p + 1]:
                yield v
        else:
            a[t] = a[t - p]
         
            for v in gen(t + 1, p):
                yield v
         
            for j in range(a[t - p] + 1, k):
                a[t] = j
                for v in gen(t + 1, t):
                    yield v

    return gen(1, 1) 
Example #4
Source File: codec.py    From pwnypack with MIT License 6 votes vote down vote up
def caesar_app(parser, cmd, args):  # pragma: no cover
    """
    Caesar crypt a value with a key.
    """

    parser.add_argument('shift', type=int, help='the shift to apply')
    parser.add_argument('value', help='the value to caesar crypt, read from stdin if omitted', nargs='?')
    parser.add_argument(
        '-s', '--shift-range',
        dest='shift_ranges',
        action='append',
        help='specify a character range to shift (defaults to a-z, A-Z)'
    )

    args = parser.parse_args(args)
    if not args.shift_ranges:
        args.shift_ranges = ['az', 'AZ']

    return caesar(args.shift, pwnypack.main.string_value_or_stdin(args.value), args.shift_ranges) 
Example #5
Source File: voicebox.py    From pt-voicebox with MIT License 6 votes vote down vote up
def add_voice(self):
        new_voice = Voice({})     # creates new voice with no name and empty tree of corpora
        texts = os.listdir('texts')
        add_another_corpus = ''
        while add_another_corpus != 'n':
            for i in range(len(texts)):
                print("%s %s" % (i + 1, texts[i]))
            choice = input('Enter the number of the corpus you want to load:\n')
            corpus_name = texts[int(choice) - 1]
            path = 'texts/%s' % corpus_name
            f = open(path, 'r')
            text = f.read()
            corpus_weight_prompt = 'Enter the weight for %s:\n' % corpus_name
            corpus_weight = float(input(corpus_weight_prompt))
            new_voice.add_corpus(Corpus(text, corpus_name), corpus_weight)
            texts.remove(corpus_name)
            add_another_corpus = input('Add another corpus to this voice? y/n\n')
        voicename = input('Name this voice:\n')
        new_voice.name = voicename
        new_voice.normalize_weights()
        self.voices[voicename] = new_voice

    # asks user to specify a transcript and number of characters, and makes separate voices for that number of
    # the most represented characters in the transcript 
Example #6
Source File: voicebox.py    From pt-voicebox with MIT License 6 votes vote down vote up
def load_voices_from_transcript(self):
        transcripts = os.listdir('texts/transcripts')
        for i in range(len(transcripts)):
            print("%s %s" % (i + 1, transcripts[i]))
        choice = input('Enter the number of the transcript you want to load:\n')
        transcript_name = transcripts[int(choice) - 1]
        number = int(input('Enter the number of voices to load:\n'))
        for charname, size in self.biggest_characters(transcript_name, number):
            print(charname)
            path = 'texts/transcripts/%s/%s' % (transcript_name, charname)
            source_text = open(path).read()
            corpus_name = charname
            weighted_corpora = {}
            weighted_corpora[charname] = [Corpus(source_text, corpus_name), 1]
            self.voices[charname] = Voice(weighted_corpora, charname)

    # retrieves a list of the top 20 largest character text files in a transcript folder 
Example #7
Source File: main.py    From tensorflow-XNN with MIT License 6 votes vote down vote up
def get_subword_for_word(word, n1=3, n2=6, include_self=False):
    """only extract the prefix and suffix"""
    z = []
    if len(word) >= n1:
        word = "*" + word + "*"
        l = len(word)
        n1 = min(n1, l)
        n2 = min(n2, l)
        # bind method outside of loop to reduce overhead
        # https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/feature_extraction/text.py#L144
        z_append = z.append
        if include_self:
            z_append(word)
        for k in range(n1, n2 + 1):
            z_append(word[:k])
            z_append(word[-k:])
    return z


# 564 µs ± 14.9 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) 
Example #8
Source File: rewardplot.py    From striatum with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def calculate_cum_reward(policy):

    """Calculate cumulative reward with respect to time.

        Parameters
        ----------
        policy: bandit object
            The bandit algorithm you want to evaluate.

        Return
        ---------
        cum_reward: dict
            The dict stores {history_id: cumulative reward} .

        cum_n_actions: dict
            The dict stores
            {history_id: cumulative number of recommended actions}.
    """
    cum_reward = {-1: 0.0}
    cum_n_actions = {-1: 0}
    for i in range(policy.history_storage.n_histories):
        reward = policy.history_storage.get_history(i).rewards
        cum_n_actions[i] = cum_n_actions[i - 1] + len(reward)
        cum_reward[i] = cum_reward[i - 1] + sum(six.viewvalues(reward))
    return cum_reward, cum_n_actions 
Example #9
Source File: parallel.py    From dataflow with Apache License 2.0 6 votes vote down vote up
def __init__(self, get_df, num_prefetch, num_thread):
        """
        Args:
            get_df ( -> DataFlow): a callable which returns a DataFlow.
                Each thread will call this function to get the DataFlow to use.
                Therefore do not return the same DataFlow object for each call,
                unless your dataflow is stateless.
            num_prefetch (int): size of the queue
            num_thread (int): number of threads
        """
        assert num_thread > 0, num_thread
        assert num_prefetch > 0, num_prefetch
        self.num_thread = num_thread
        self.queue = queue.Queue(maxsize=num_prefetch)
        self.threads = [
            MultiThreadRunner._Worker(get_df, self.queue)
            for _ in range(num_thread)]

        try:
            self._size = self.__len__()
        except NotImplementedError:
            self._size = -1 
Example #10
Source File: generate_to_file.py    From mathematics_dataset with Apache License 2.0 6 votes vote down vote up
def main(unused_argv):
  generate.init_modules(FLAGS.train_split)

  output_dir = os.path.expanduser(FLAGS.output_dir)
  if os.path.exists(output_dir):
    logging.fatal('output dir %s already exists', output_dir)
  logging.info('Writing to %s', output_dir)
  os.makedirs(output_dir)

  for regime, flat_modules in six.iteritems(generate.filtered_modules):
    regime_dir = os.path.join(output_dir, regime)
    os.mkdir(regime_dir)
    per_module = generate.counts[regime]
    for module_name, module in six.iteritems(flat_modules):
      path = os.path.join(regime_dir, module_name + '.txt')
      with open(path, 'w') as text_file:
        for _ in range(per_module):
          problem, _ = generate.sample_from_module(module)
          text_file.write(str(problem.question) + '\n')
          text_file.write(str(problem.answer) + '\n')
      logging.info('Written %s', path) 
Example #11
Source File: bleu_hook.py    From fine-lm with MIT License 6 votes vote down vote up
def _get_ngrams(segment, max_order):
  """Extracts all n-grams up to a given maximum order from an input segment.

  Args:
    segment: text segment from which n-grams will be extracted.
    max_order: maximum length in tokens of the n-grams returned by this
        methods.

  Returns:
    The Counter containing all n-grams up to max_order in segment
    with a count of how many times each n-gram occurred.
  """
  ngram_counts = collections.Counter()
  for order in range(1, max_order + 1):
    for i in range(0, len(segment) - order + 1):
      ngram = tuple(segment[i:i + order])
      ngram_counts[ngram] += 1
  return ngram_counts 
Example #12
Source File: calculus.py    From mathematics_dataset with Apache License 2.0 6 votes vote down vote up
def _sample_integrand(coefficients, derivative_order, derivative_axis, entropy):
  """Integrates `coefficients` and adds sampled "constant" terms."""
  coefficients = np.asarray(coefficients)

  # Integrate (with zero for constant terms).
  integrand = coefficients
  for _ in range(derivative_order):
    integrand = polynomials.integrate(integrand, derivative_axis)

  # Add on sampled constant terms.
  constant_degrees = np.array(integrand.shape) - 1
  constant_degrees[derivative_axis] = derivative_order - 1
  extra_coeffs = polynomials.sample_coefficients(constant_degrees, entropy)
  pad_amount = coefficients.shape[derivative_axis]
  pad = [(0, pad_amount if i == derivative_axis else 0)
         for i in range(coefficients.ndim)]
  extra_coeffs = np.pad(extra_coeffs, pad, 'constant', constant_values=0)
  return integrand + extra_coeffs 
Example #13
Source File: visualization.py    From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License 6 votes vote down vote up
def draw_bounding_boxes(image, gt_boxes, im_info):
  num_boxes = gt_boxes.shape[0]
  gt_boxes_new = gt_boxes.copy()
  gt_boxes_new[:,:4] = np.round(gt_boxes_new[:,:4].copy() / im_info[2])
  disp_image = Image.fromarray(np.uint8(image[0]))

  for i in range(num_boxes):
    this_class = int(gt_boxes_new[i, 4])
    disp_image = _draw_single_box(disp_image, 
                                gt_boxes_new[i, 0],
                                gt_boxes_new[i, 1],
                                gt_boxes_new[i, 2],
                                gt_boxes_new[i, 3],
                                'N%02d-C%02d' % (i, this_class),
                                FONT,
                                color=STANDARD_COLORS[this_class % NUM_COLORS])

  image[0, :] = np.array(disp_image)
  return image 
Example #14
Source File: comparison.py    From mathematics_dataset with Apache License 2.0 6 votes vote down vote up
def _unique_values(entropy, only_integers=False, count=None):
  """Generates unique values."""
  if count is None:
    count = random.randint(*_sort_count_range(entropy))

  if only_integers:
    sampler = functools.partial(number.integer, signed=True)
  else:
    sampler = integer_or_rational_or_decimal

  for _ in range(1000):
    entropies = entropy * np.random.dirichlet(np.ones(count))
    entropies = np.maximum(1, entropies)
    values = [sampler(ent) for ent in entropies]
    if len(sympy.FiniteSet(*values)) == len(values):
      return values
  raise ValueError('Could not generate {} unique values with entropy={}'
                   .format(count, entropy)) 
Example #15
Source File: probability.py    From mathematics_dataset with Apache License 2.0 6 votes vote down vote up
def _sequence_event(values, length, verb):
  """Returns sequence (finite product) event.

  Args:
    values: List of values to sample from.
    length: Length of the sequence to generate.
    verb: Verb in infinitive form.

  Returns:
    Instance of `probability.FiniteProductEvent`, together with a text
    description.
  """
  del verb  # unused
  samples = [random.choice(values) for _ in range(length)]
  events = [probability.DiscreteEvent([sample]) for sample in samples]
  event = probability.FiniteProductEvent(events)
  sequence = ''.join(str(sample) for sample in samples)
  event_description = 'sequence {sequence}'.format(sequence=sequence)
  return event, event_description 
Example #16
Source File: numbers.py    From mathematics_dataset with Apache License 2.0 6 votes vote down vote up
def _random_coprime_pair(entropy):
  """Returns a pair of random coprime integers."""
  coprime_product = number.integer(entropy, False, min_abs=1)
  factors = sympy.factorint(coprime_product)
  def take():
    prime = random.choice(list(factors.keys()))
    power = factors[prime]
    del factors[prime]
    return prime ** power

  if random.random() < 0.8 and len(factors) >= 2:
    # Disallow trivial factoring where possible.
    count_left = random.randint(1, len(factors) - 1)
    count_right = len(factors) - count_left
  else:
    count_left = random.randint(0, len(factors))
    count_right = len(factors) - count_left

  left = sympy.prod([take() for _ in range(count_left)])
  right = sympy.prod([take() for _ in range(count_right)])
  assert left * right == coprime_product
  return left, right


# @composition.module(number.is_positive_integer) 
Example #17
Source File: main.py    From tensorflow-XNN with MIT License 5 votes vote down vote up
def get_subword_for_word_all0(word, n1=3, n2=6):
    z = []
    z_append = z.append
    word = "*" + word + "*"
    l = len(word)
    z_append(word)
    if l > n1:
        n2 = min(n2, l - 1)
        for i in range(l - n1 + 1):
            for k in range(n1, n2 + 1):
                if 2 * i + n2 < l:
                    z_append(word[i:(i + k)])
                    if i == 0:
                        z_append(word[-(i + k + 1):])
                    else:
                        z_append(word[-(i + k + 1):-i])
                else:
                    if 2 * i + k < l:
                        z_append(word[i:(i + k)])
                        z_append(word[-(i + k + 1):-i])
                    elif 2 * (i - 1) + n2 < l:
                        z_append(word[i:(i + k)])
    return z


# 3.44 µs ± 101 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) 
Example #18
Source File: corpus.py    From pt-voicebox with MIT License 5 votes vote down vote up
def make_tree(self):
        """constructs the tree of ngrams' likelihood of following other ngrams"""
        sentences = self.get_sentences()
        white_list = self.get_white_list(sentences)

        # go through each sentence, add each word to the dictionary, incrementing length each time
        for sentence in sentences:
            sentence = ['START_SENTENCE'] + sentence
            for ngram_size in range(0, self.max_ngram_size+1):
                for start in range(0, len(sentence)):
                    end = start + ngram_size
                    if end <= len(sentence):
                        words_to_add = sentence[start:end]
                        # checks that all of the words in the ngram pass criterion
                        if set(words_to_add) < white_list and len(words_to_add) > 0:
                            new_ngram = " ".join(words_to_add)
                            self.add_ngram(new_ngram)
                            if ngram_size == 1:
                                self.wordcount += 1

                            # add dictionaries of words following this ngram
                            for word_position in range(end, end+self.hindsight):
                                if word_position < len(sentence):
                                    reach = word_position - end
                                    target = self.tree[new_ngram].after[reach]
                                    word = sentence[word_position]
                                    if word in white_list:
                                        self.add_ngram(word, target)

                            # add dictionaries of words preceding this ngram
                            for word_position in range(start-1, start-self.foresight-1, -1):
                                if word_position >= 0:
                                    reach = start - word_position
                                    target = self.tree[new_ngram].before[reach-1]
                                    word = sentence[word_position]
                                    if word in white_list:
                                        self.add_ngram(word, target)

        self.calculate_frequencies()
        self.calculate_sig_scores() 
Example #19
Source File: voicebox.py    From pt-voicebox with MIT License 5 votes vote down vote up
def load_session(self):
        sessions = os.listdir('saved')
        for i in range(len(sessions)):
            print("%s %s" % (i + 1, sessions[i]))
        choice = input('Enter the number of the session you want to load:\n')
        session_name = sessions[int(choice) - 1]
        path = 'saved/%s' % session_name
        return loadobject(path)

    # given a chosen word and a tree of scores assigned to it by different sources, updates the weights of those sources
    # according to whether they exceeded or fell short of their expected contribution to the suggestion 
Example #20
Source File: voicebox.py    From pt-voicebox with MIT License 5 votes vote down vote up
def choose_voice(self):
        voice_keys = sorted(self.voices.keys())
        print("VOICES:")
        for i in range(len(voice_keys)):
            print("%s: %s" % (i + 1, voice_keys[i]))
        choice = input('Choose a voice by entering a number...\n')
        self.active_voice = self.voices[voice_keys[int(choice) - 1]]
        return self.active_voice 
Example #21
Source File: utils.py    From loaner with Apache License 2.0 5 votes vote down vote up
def write_break():
  """Writes a line break followed by a line of '-' and two more line breaks."""
  write('')
  write(''.join(['-' for _ in range(0, flags.get_help_width(), 1)]))
  write('') 
Example #22
Source File: main.py    From tensorflow-XNN with MIT License 5 votes vote down vote up
def get_subword_for_word_all(word, n1=3, n2=6):
    z = []
    z_append = z.append
    word = "*" + word + "*"
    l = len(word)
    z_append(word)
    for k in range(n1, n2 + 1):
        for i in range(l - k + 1):
            z_append(word[i:i + k])
    return z 
Example #23
Source File: graph.py    From link-prediction_with_deep-learning with MIT License 5 votes vote down vote up
def build_deepwalk_corpus_iter(G, num_paths, path_length, alpha=0,
                      rand=random.Random(0)):
  walks = []

  nodes = list(G.nodes())

  for cnt in range(num_paths):
    rand.shuffle(nodes)
    for node in nodes:
      yield G.random_walk(path_length, rand=rand, alpha=alpha, start=node) 
Example #24
Source File: graph.py    From link-prediction_with_deep-learning with MIT License 5 votes vote down vote up
def build_deepwalk_corpus(G, num_paths, path_length, alpha=0,
                      rand=random.Random(0)):
  walks = []

  nodes = list(G.nodes())
  
  for cnt in range(num_paths):
    rand.shuffle(nodes)
    for node in nodes:
      walks.append(G.random_walk(path_length, rand=rand, alpha=alpha, start=node))
  
  return walks 
Example #25
Source File: graph.py    From link-prediction_with_deep-learning with MIT License 5 votes vote down vote up
def clique(size):
    return from_adjlist(permutations(range(1,size+1)))


# http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python 
Example #26
Source File: graph.py    From link-prediction_with_deep-learning with MIT License 5 votes vote down vote up
def build_deepwalk_corpus_iter(G, num_paths, path_length, alpha=0,
                      rand=random.Random(0)):
  walks = []

  nodes = list(G.nodes())

  for cnt in range(num_paths):
    rand.shuffle(nodes)
    for node in nodes:
      yield G.random_walk(path_length, rand=rand, alpha=alpha, start=node) 
Example #27
Source File: graph.py    From link-prediction_with_deep-learning with MIT License 5 votes vote down vote up
def build_deepwalk_corpus(G, num_paths, path_length, alpha=0,
                      rand=random.Random(0)):
  walks = []

  nodes = list(G.nodes())
  
  for cnt in range(num_paths):
    rand.shuffle(nodes)
    for node in nodes:
      walks.append(G.random_walk(path_length, rand=rand, alpha=alpha, start=node))
  
  return walks 
Example #28
Source File: find_names_brute.py    From gransk with Apache License 2.0 5 votes vote down vote up
def _extract(self, text):
    if not text:
      return
    token_buffer = [('', (-1, -1))] * 4

    for index, token in enumerate(self.tokenizer.finditer(text)):
      relative_index = index % len(token_buffer)
      token_buffer[relative_index] = (
          token.group(), (token.start(), token.end()))

      probability = 0
      prev_token_start = -1

      for token_distance in range(len(token_buffer)):
        buffer_index = (relative_index - token_distance) % len(token_buffer)
        buffered_token, (token_start, token_end) = token_buffer[buffer_index]
        is_lowercase = buffered_token == buffered_token.lower()
        token_probability = self.model.get(buffered_token.lower(), 0)

        if is_lowercase or not token_probability:
          break

        if token_distance > 0:
          gap = text[token_end:prev_token_start]

          if self.stopper.search(gap):
            break

          token_probability *= math.sqrt(float(1) / len(gap))

        prev_token_start = token_start
        probability += token_probability
        avg_probability = (probability / (token_distance + 1))
        score = avg_probability * self.size_probability[token_distance + 1]

        if score >= self.threshold:
          yield token_start, text[token_start:token.end()]

        if token_distance > 0 and self.semistop.search(gap):
          break 
Example #29
Source File: diskimage_reader.py    From gransk with Apache License 2.0 5 votes vote down vote up
def _GetVSSStoreIdentifiers(self, scan_node):
    if not scan_node or not scan_node.path_spec:
      raise RuntimeError('Invalid scan node.')

    volume_system = vshadow_volume_system.VShadowVolumeSystem()
    volume_system.Open(scan_node.path_spec)

    volume_identifiers = self._source_scanner.GetVolumeIdentifiers(
        volume_system)

    if not volume_identifiers:
      return []

    return list(range(1, volume_system.number_of_volumes + 1)) 
Example #30
Source File: http.py    From threat_intel with MIT License 5 votes vote down vote up
def _cull(self):
        """Remove calls more than 1 second old from the queue."""
        right_now = time.time()

        cull_from = -1
        for index in range(len(self._call_times)):
            if right_now - self._call_times[index].time >= 1.0:
                cull_from = index
                self._outstanding_calls -= self._call_times[index].num_calls
            else:
                break

        if cull_from > -1:
            self._call_times = self._call_times[cull_from + 1:]