Python six.next() Examples

The following are 30 code examples of six.next(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module six , or try the search function .
Example #1
Source File: tokenizer.py    From koala with GNU General Public License v3.0 6 votes vote down vote up
def __str__(self):
        return self.tvalue


#========================================================================
#       Class: f_tokens
# Description: An ordered list of tokens

#  Attributes:        items - Ordered list
#                     index - Current position in the list
#
#     Methods: f_tokens     - __init__()
#              f_token      - add()      - Add a token to the end of the list
#              None         - addRef()   - Add a token to the end of the list
#              None         - reset()    - reset the index to -1
#              Boolean      - BOF()      - End of list?
#              Boolean      - EOF()      - Beginning of list?
#              Boolean      - moveNext() - Move the index along one
#              f_token/None - current()  - Return the current token
#              f_token/None - next()     - Return the next token (leave the index unchanged)
#              f_token/None - previous() - Return the previous token (leave the index unchanged)
#======================================================================== 
Example #2
Source File: feeding_functions.py    From lambda-packs with MIT License 6 votes vote down vote up
def __call__(self):
    if self._num_epochs and self._epoch >= self._num_epochs:
      raise errors.OutOfRangeError(None, None,
                                   "Already emitted %s epochs." % self._epoch)
    list_dict = {}
    list_dict_size = 0
    while list_dict_size < self._batch_size:
      try:
        data_row = next(self._iterator)
      except StopIteration:
        self._epoch += 1
        self._iterator = self._generator_function()
        data_row = next(self._iterator)
      for index, key in enumerate(self._keys):
        if key not in data_row.keys():
          raise KeyError("key mismatch between dicts emitted by GenFun"
                         "Expected {} keys; got {}".format(
                             self._keys, data_row.keys()))
        list_dict.setdefault(self._col_placeholders[index],
                             list()).append(data_row[key])
        list_dict_size += 1
    feed_dict = {key: np.asarray(item) for key, item in list(list_dict.items())}
    return feed_dict 
Example #3
Source File: test_paddle.py    From LearnPaddle2 with Apache License 2.0 6 votes vote down vote up
def infer(save_dirname=None):
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        [inference_program, feed_target_names, fetch_targets] = (
            fluid.io.load_inference_model(save_dirname, exe))
        test_reader = paddle.batch(paddle.dataset.uci_housing.test(), batch_size=20)

        test_data = six.next(test_reader())
        test_feat = numpy.array(list(map(lambda x: x[0], test_data))).astype("float32")
        test_label = numpy.array(list(map(lambda x: x[1], test_data))).astype("float32")

        results = exe.run(inference_program,
                          feed={feed_target_names[0]: numpy.array(test_feat)},
                          fetch_list=fetch_targets)
        print("infer results: ", results[0])
        print("ground truth: ", test_label)


# Run train and infer. 
Example #4
Source File: feeding_functions.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(self,
               placeholders,
               generator,
               batch_size,
               random_start=False,
               seed=None,
               num_epochs=None):
    first_sample = next(generator())
    if len(placeholders) != len(first_sample):
      raise ValueError("Expected {} placeholders; got {}.".format(
          len(first_sample), len(placeholders)))
    self._keys = sorted(list(first_sample.keys()))
    self._col_placeholders = placeholders
    self._generator_function = generator
    self._iterator = generator()
    self._batch_size = batch_size
    self._num_epochs = num_epochs
    self._epoch = 0
    random.seed(seed) 
Example #5
Source File: feeding_functions.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(self,
               placeholders,
               ordered_dict_of_arrays,
               batch_size,
               random_start=False,
               seed=None,
               num_epochs=None):
    if len(placeholders) != len(ordered_dict_of_arrays) + 1:
      raise ValueError("Expected {} placeholders; got {}.".format(
          len(ordered_dict_of_arrays), len(placeholders)))
    self._index_placeholder = placeholders[0]
    self._col_placeholders = placeholders[1:]
    self._ordered_dict_of_arrays = ordered_dict_of_arrays
    self._max = len(next(iter(ordered_dict_of_arrays.values())))
    for _, v in ordered_dict_of_arrays.items():
      if len(v) != self._max:
        raise ValueError("Array lengths must match.")
    self._batch_size = batch_size
    self._num_epochs = num_epochs
    self._epoch = 0
    random.seed(seed)
    self._trav = random.randrange(self._max) if random_start else 0
    self._epoch_end = (self._trav - 1) % self._max 
Example #6
Source File: sequence_queueing_state_saver.py    From lambda-packs with MIT License 6 votes vote down vote up
def next_key(self):
    """The key names of the next (in iteration) truncated unrolled examples.

    The format of the key is:

    ```python
    "%05d_of_%05d:%s" % (sequence + 1, sequence_count, original_key)
    ```

    if `sequence + 1 < sequence_count`, otherwise:

    ```python
    "STOP:%s" % original_key
    ```

    where `original_key` is the unique key read in by the prefetcher.

    Returns:
      A string vector of length `batch_size`, the keys.
    """
    return self._state_saver._received_next_key 
Example #7
Source File: external_workflow_handler.py    From botoflow with Apache License 2.0 6 votes vote down vote up
def request_cancel_external_workflow_execution(self, external_workflow_execution):
        """Requests cancellation of another workflow.

        :param external_workflow_execution: details of target workflow to cancel
        :type external_workflow_execution: botoflow.workflow_execution.WorkflowExecution
        :return: cancel Future
        :rtype: awsflow.core.future.Future
        """
        self._decider._decisions.append(RequestCancelExternalWorkflowExecution(
            workflow_id=external_workflow_execution.workflow_id,
            run_id=external_workflow_execution.run_id))

        cancel_future = Future()
        context = AsyncTaskContext(False, get_async_context())
        cancel_future.context = context

        handler = self._handle_external_workflow_event(external_workflow_execution, cancel_future)
        six.next(handler)
        self._open_cancel_requests[external_workflow_execution] = {'handler': handler}
        return cancel_future 
Example #8
Source File: timer_handler.py    From botoflow with Apache License 2.0 6 votes vote down vote up
def handle_execute_timer(self, seconds):
        decision_id = self._decider.get_next_id()
        timer_decision = StartTimer(decision_id, str(int(seconds)))
        self._decider._decisions.append(timer_decision)

        timer_future = Future()

        handler = self._handler_fsm(decision_id, timer_future)
        six.next(handler)  # arm
        self._open_timers[decision_id] = {'future': timer_future, 'handler': handler}

        @coroutine
        def wait_for_timer():
            yield timer_future

        return wait_for_timer() 
Example #9
Source File: sequence_queueing_state_saver.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def next_key(self):
    """The key names of the next (in iteration) truncated unrolled examples.

    The format of the key is:

    ```python
    "%05d_of_%05d:%s" % (sequence + 1, sequence_count, original_key)
    ```

    if `sequence + 1 < sequence_count`, otherwise:

    ```python
    "STOP:%s" % original_key
    ```

    where `original_key` is the unique key read in by the prefetcher.

    Returns:
      A string vector of length `batch_size`, the keys.
    """
    return self._state_saver._received_next_key 
Example #10
Source File: decoding.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def make_input_fn_from_generator(gen):
  """Use py_func to yield elements from the given generator."""
  first_ex = six.next(gen)
  flattened = contrib.framework().nest.flatten(first_ex)
  types = [t.dtype for t in flattened]
  shapes = [[None] * len(t.shape) for t in flattened]
  first_ex_list = [first_ex]

  def py_func():
    if first_ex_list:
      example = first_ex_list.pop()
    else:
      example = six.next(gen)
    return contrib.framework().nest.flatten(example)

  def input_fn():
    flat_example = tf.py_func(py_func, [], types)
    _ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]
    example = contrib.framework().nest.pack_sequence_as(first_ex, flat_example)
    return example

  return input_fn 
Example #11
Source File: wiki_lm.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def mix_generators(generator_list):
  """Given python generators, generate from one, then from another, etc."""
  i = 0
  l = len(generator_list)
  stopiters_seen = 0
  while stopiters_seen <= l:
    try:
      yield six.next(generator_list[i % l])
      i += 1
      stopiters_seen = 0
    except StopIteration:
      i += 1
      stopiters_seen += 1


# File names and Google drive ids for the training/eval/test Wikipedia data. 
Example #12
Source File: wiki_lm.py    From BERT with Apache License 2.0 6 votes vote down vote up
def mix_generators(generator_list):
  """Given python generators, generate from one, then from another, etc."""
  i = 0
  l = len(generator_list)
  stopiters_seen = 0
  while stopiters_seen <= l:
    try:
      yield six.next(generator_list[i % l])
      i += 1
      stopiters_seen = 0
    except StopIteration:
      i += 1
      stopiters_seen += 1


# File names and Google drive ids for the training/eval/test Wikipedia data. 
Example #13
Source File: arrayiterator.py    From ngraph-python with Apache License 2.0 6 votes vote down vote up
def __next__(self):
        """
        Returns a new minibatch of data with each call.

        Yields:
            tuple: The next minibatch which includes both features and labels.
        """
        if self.index >= self.total_iterations:
            raise StopIteration
        self.index += 1

        total, batch_bufs = self.get_at_most(self.batch_size)
        while total < self.batch_size:
            bsz, next_batch_bufs = self.get_at_most(self.batch_size - total)
            batch_bufs = {k: np.concatenate([batch_bufs[k], next_batch_bufs[k]])
                          for k in batch_bufs}
            total += bsz
        batch_bufs['iteration'] = self.index
        return batch_bufs 
Example #14
Source File: clf_helpers.py    From ibeis with Apache License 2.0 6 votes vote down vote up
def from_indicators(MultiClassLabels, indicator, index=None, task_name=None):
        import six
        labels = MultiClassLabels()
        n_samples = len(six.next(six.itervalues(indicator)))
        # if index is None:
        #     index = pd.Series(np.arange(n_samples), name='index')
        indicator_df = pd.DataFrame(indicator, index=index)
        assert np.all(indicator_df.sum(axis=1).values), (
            'states in the same task must be mutually exclusive')
        labels.indicator_df = indicator_df
        labels.class_names = indicator_df.columns.values
        labels.encoded_df = pd.DataFrame(
            indicator_df.values.argmax(axis=1),
            columns=[task_name],
            index=index,
        )
        labels.task_name = task_name
        labels.n_samples = n_samples
        labels.n_classes = len(labels.class_names)
        if labels.n_classes == 1:
            labels.n_classes = 2  # 1 column means binary case
        labels.classes_ = np.arange(labels.n_classes)
        labels.default_class_name = labels.class_names[1]
        return labels 
Example #15
Source File: decoding.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def make_input_fn_from_generator(gen):
  """Use py_func to yield elements from the given generator."""
  first_ex = six.next(gen)
  flattened = tf.contrib.framework.nest.flatten(first_ex)
  types = [t.dtype for t in flattened]
  shapes = [[None] * len(t.shape) for t in flattened]
  first_ex_list = [first_ex]

  def py_func():
    if first_ex_list:
      example = first_ex_list.pop()
    else:
      example = six.next(gen)
    return tf.contrib.framework.nest.flatten(example)

  def input_fn():
    flat_example = tf.py_func(py_func, [], types)
    _ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]
    example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example)
    return example

  return input_fn 
Example #16
Source File: wiki_lm.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def mix_generators(generator_list):
  """Given python generators, generate from one, then from another, etc."""
  i = 0
  l = len(generator_list)
  stopiters_seen = 0
  while stopiters_seen <= l:
    try:
      yield six.next(generator_list[i % l])
      i += 1
      stopiters_seen = 0
    except StopIteration:
      i += 1
      stopiters_seen += 1


# File names and Google drive ids for the training/eval/test Wikipedia data. 
Example #17
Source File: shakespeare_lstm.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def lstm_model(seq_len=100, batch_size=None, stateful=True):
  """Language model: predict the next char given the current sequence."""
  source = tf.keras.Input(
      name='seed', shape=(seq_len,), batch_size=batch_size, dtype=tf.int32)

  embedding = tf.keras.layers.Embedding(
      input_dim=256, output_dim=EMBEDDING_DIM)(source)
  lstm_1 = tf.keras.layers.LSTM(
      EMBEDDING_DIM, stateful=stateful, return_sequences=True)(embedding)
  lstm_2 = tf.keras.layers.LSTM(
      EMBEDDING_DIM, stateful=stateful, return_sequences=True)(lstm_1)
  predicted_char = tf.keras.layers.TimeDistributed(
      tf.keras.layers.Dense(256, activation='softmax'))(lstm_2)

  model = tf.keras.Model(
      inputs=[source], outputs=[predicted_char],
  )
  model.compile(
      optimizer=tf.train.RMSPropOptimizer(learning_rate=0.01),
      loss='sparse_categorical_crossentropy',
      metrics=['sparse_categorical_accuracy'])
  return model 
Example #18
Source File: test_devicecore.py    From python-devicecloud with Mozilla Public License 2.0 6 votes vote down vote up
def test_get_groups(self):
        self.prepare_response("GET", "/ws/Group", EXAMPLE_GET_GROUPS)
        it = self.dc.devicecore.get_groups()

        grp = six.next(it)
        self.assertEqual(grp.is_root(), True)
        self.assertEqual(grp.get_id(), "11817")
        self.assertEqual(grp.get_name(), "7603_Digi")
        self.assertEqual(grp.get_description(), "7603_Digi root group")
        self.assertEqual(grp.get_path(), "/7603_Digi/")
        self.assertEqual(grp.get_parent_id(), "1")

        grp = six.next(it)
        self.assertEqual(grp.is_root(), False)
        self.assertEqual(grp.get_id(), "13542")
        self.assertEqual(grp.get_name(), "Demo")
        self.assertEqual(grp.get_description(), "")
        self.assertEqual(grp.get_path(), "/7603_Digi/Demo/")
        self.assertEqual(grp.get_parent_id(), "11817") 
Example #19
Source File: data_utils.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def get(self):
        """Creates a generator to extract data from the queue.

        Skip the data if it is `None`.

        # Yields
            The next element in the queue, i.e. a tuple
            `(inputs, targets)` or
            `(inputs, targets, sample_weights)`.
        """
        try:
            while self.is_running():
                inputs = self.queue.get(block=True).get()
                self.queue.task_done()
                if inputs is not None:
                    yield inputs
        except Exception as e:
            self.stop()
            six.reraise(*sys.exc_info()) 
Example #20
Source File: decoding.py    From BERT with Apache License 2.0 6 votes vote down vote up
def make_input_fn_from_generator(gen):
  """Use py_func to yield elements from the given generator."""
  first_ex = six.next(gen)
  flattened = tf.contrib.framework.nest.flatten(first_ex)
  types = [t.dtype for t in flattened]
  shapes = [[None] * len(t.shape) for t in flattened]
  first_ex_list = [first_ex]

  def py_func():
    if first_ex_list:
      example = first_ex_list.pop()
    else:
      example = six.next(gen)
    return tf.contrib.framework.nest.flatten(example)

  def input_fn():
    flat_example = tf.py_func(py_func, [], types)
    _ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]
    example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example)
    return example

  return input_fn 
Example #21
Source File: decoding.py    From fine-lm with MIT License 6 votes vote down vote up
def make_input_fn_from_generator(gen):
  """Use py_func to yield elements from the given generator."""
  first_ex = six.next(gen)
  flattened = tf.contrib.framework.nest.flatten(first_ex)
  types = [t.dtype for t in flattened]
  shapes = [[None] * len(t.shape) for t in flattened]
  first_ex_list = [first_ex]

  def py_func():
    if first_ex_list:
      example = first_ex_list.pop()
    else:
      example = six.next(gen)
    return tf.contrib.framework.nest.flatten(example)

  def input_fn():
    flat_example = tf.py_func(py_func, [], types)
    _ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]
    example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example)
    return example

  return input_fn 
Example #22
Source File: embeddings_to_torch.py    From video-caption-openNMT.pytorch with MIT License 5 votes vote down vote up
def match_embeddings(vocab, emb, opt):
    dim = len(six.next(six.itervalues(emb)))
    filtered_embeddings = np.zeros((len(vocab), dim))
    count = {"match": 0, "miss": 0}
    for w, w_id in vocab.stoi.items():
        if w in emb:
            filtered_embeddings[w_id] = emb[w]
            count['match'] += 1
        else:
            if opt.verbose:
                print(u"not found:\t{}".format(w), file=sys.stderr)
            count['miss'] += 1

    return torch.Tensor(filtered_embeddings), count 
Example #23
Source File: batch_reader.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def _TextGenerator(self, example_gen):
    """Generates article and abstract text from tf.Example."""
    while True:
      e = six.next(example_gen)
      try:
        article_text = self._GetExFeatureText(e, self._article_key)
        abstract_text = self._GetExFeatureText(e, self._abstract_key)
      except ValueError:
        tf.logging.error('Failed to get article or abstract from example')
        continue

      yield (article_text, abstract_text) 
Example #24
Source File: test_streams.py    From python-devicecloud with Mozilla Public License 2.0 5 votes vote down vote up
def test_simple_read_several_pages(self):
        self.prepare_response("GET", "/ws/DataStream/test", GET_TEST_DATA_STREAM)

        # This test is a bit awkward as the pattern matching in httpretty is strange
        # and it I couldn't get it to work in a nicer fashion
        test_stream = self.dc.streams.get_stream("test")
        generator = test_stream.read(page_size=2)
        self.prepare_response("GET", "/ws/DataPoint/test", GET_DATA_POINTS_FIVE_PAGED[0])

        point1 = six.next(generator)
        self.assertEqual(point1.get_id(), "75b0e84b-0968-11e4-9041-fa163e8f4b62")
        point2 = six.next(generator)
        self.assertEqual(point2.get_id(), "75d56063-0968-11e4-9041-fa163e8f4b62")

        self.prepare_response("GET", "/ws/DataPoint/test", GET_DATA_POINTS_FIVE_PAGED[1])

        point3 = six.next(generator)
        self.assertEqual(point3.get_id(), "75f8901f-0968-11e4-ab44-fa163e7ebc6b")
        point4 = six.next(generator)
        self.assertEqual(point4.get_id(), "761eecbb-0968-11e4-9041-fa163e8f4b62")

        self.prepare_response("GET", "/ws/DataPoint/test", GET_DATA_POINTS_FIVE_PAGED[2])

        point5 = six.next(generator)
        self.assertEqual(point5.get_id(), "76459cf1-0968-11e4-98e9-fa163ecf1de4")
        self.assertRaises(StopIteration, six.next, generator) 
Example #25
Source File: test_streams.py    From python-devicecloud with Mozilla Public License 2.0 5 votes vote down vote up
def test_rollup_interval_invalid(self):
        self.prepare_response("GET", "/ws/DataStream/test", GET_TEST_DATA_STREAM)
        self.prepare_response("GET", "/ws/DataPoint/test", GET_DATA_POINTS_ONE)
        test_stream = self.dc.streams.get_stream("test")
        self.assertRaises(ValueError, six.next, test_stream.read(rollup_interval='invalid')) 
Example #26
Source File: test_streams.py    From python-devicecloud with Mozilla Public License 2.0 5 votes vote down vote up
def test_rollup_method_invalid(self):
        self.prepare_response("GET", "/ws/DataStream/test", GET_TEST_DATA_STREAM)
        test_stream = self.dc.streams.get_stream("test")
        self.assertRaises(ValueError, six.next, test_stream.read(rollup_method='invalid')) 
Example #27
Source File: test_core.py    From python-devicecloud with Mozilla Public License 2.0 5 votes vote down vote up
def test_iter_json_pages_paged_noparams(self):
        it = self.dc.get_connection().iter_json_pages("/test/path", page_size=1)
        self.prepare_response("GET", "/test/path", TEST_PAGED_RESPONSE_PAGE1)
        self.assertEqual(six.next(it)["id"], 1)
        self.prepare_response("GET", "/test/path", TEST_PAGED_RESPONSE_PAGE2)
        self.assertEqual(six.next(it)["id"], 2)
        self.assertDictEqual(self._get_last_request_params(), {
            "size": "1",
            "start": "1"
        }) 
Example #28
Source File: test_filedata.py    From python-devicecloud with Mozilla Public License 2.0 5 votes vote down vote up
def test_walk(self):
        self.prepare_response("GET", "/ws/FileData", GET_HOME_RESULT)
        gen = self.dc.filedata.walk()
        dirpath, dirnames, filenames = six.next(gen)
        self.assertEqual(dirpath, "~/")
        self.assertEqual(len(dirnames), 3)
        self.assertEqual([x.get_full_path() for x in dirnames], [
            '/db/CUS0000033_Spectrum_Design_Solutions__Paul_Osborne/00000000-00000000-0004F3FF-FF027D8C',
            '/db/CUS0000033_Spectrum_Design_Solutions__Paul_Osborne/00000000-00000000-080027FF-FFB1A2C2',
            '/db/CUS0000033_Spectrum_Design_Solutions__Paul_Osborne/test_dir'])
        self.assertEqual(filenames, [])

        # Dir 1
        self.prepare_response("GET", "/ws/FileData", GET_DIR1_RESULT)
        dirpath, dirnames, filenames = six.next(gen)
        self.assertEqual(dirpath,
                         "/db/CUS0000033_Spectrum_Design_Solutions__Paul_Osborne/00000000-00000000-0004F3FF-FF027D8C")
        self.assertEqual(dirnames, [])
        self.assertEqual(filenames, [])

        # Dir 2
        self.prepare_response("GET", "/ws/FileData", GET_DIR2_RESULT)
        dirpath, dirnames, filenames = six.next(gen)
        self.assertEqual(dirpath,
                         "/db/CUS0000033_Spectrum_Design_Solutions__Paul_Osborne/00000000-00000000-080027FF-FFB1A2C2")
        self.assertEqual(dirnames, [])
        self.assertEqual(filenames, [])

        # Dir 3
        self.prepare_response("GET", "/ws/FileData", GET_DIR3_RESULT)
        dirpath, dirnames, filenames = six.next(gen)
        self.assertEqual(dirpath, "/db/CUS0000033_Spectrum_Design_Solutions__Paul_Osborne/test_dir")
        self.assertEqual(dirnames, [])
        self.assertEqual(len(filenames), 1)
        f = filenames[0]
        self.assertEqual(f.get_full_path(),
                         "/db/CUS0000033_Spectrum_Design_Solutions__Paul_Osborne/test_dir/test_file.txt") 
Example #29
Source File: test_date_chunker.py    From arctic with GNU Lesser General Public License v2.1 5 votes vote down vote up
def test_to_chunks_exceptions():
    df = DataFrame(data={'data': [1, 2, 3]})
    c = DateChunker()

    with pytest.raises(Exception) as e:
        six.next(c.to_chunks(df, 'D'))
    assert('datetime indexed' in str(e.value))

    df.columns = ['date']
    with pytest.raises(Exception) as e:
        six.next(c.to_chunks(df, 'ZSDFG'))
    assert('Unknown freqstr' in str(e.value) or 'Invalid frequency' in str(e.value)) 
Example #30
Source File: integration_test.py    From pulsar with Apache License 2.0 5 votes vote down vote up
def _run_direct(self, app_conf, **kwds):
        with test_pulsar_app({}, app_conf, {}) as app:
            options = Bunch(job_manager=next(itervalues(app.app.managers)), file_cache=app.app.file_cache, **kwds)
            self._update_options_for_app(options, app.app, **kwds)
            run(options)