Python reader.ptb_iterator() Examples

The following are 7 code examples of reader.ptb_iterator(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module reader , or try the search function .
Example #1
Source File: ptb.py    From tensor-fsmn with MIT License 6 votes vote down vote up
def _run_epoch(self, session, data, eval_op, verbose=False):
        epoch_size = ((len(data) // self._batch_size) - 1) // self._num_steps
        start_time = time.time()
        costs = 0.0
        iters = 0

        for step, (x, y) in enumerate(reader.ptb_iterator(data, self._batch_size, self._num_steps)):
            fetches, feed_dict = self._one_loop_setup(eval_op)
            feed_dict[self._input_data] = x
            feed_dict[self._targets] = y

            res = session.run(fetches, feed_dict)
            self.train_writer.add_summary(res[2], step / 13)
            cost = res[0]

            costs += cost
            iters += self._num_steps

            if verbose and step % (epoch_size // 10) == 10:
                sys.stdout.write("%.3f perplexity: %.3f speed: %.0f wps\n" %
                    (step * 1.0 / epoch_size, np.exp(costs / iters),
                    iters * self._batch_size * self._num_steps / (time.time() - start_time)))
                sys.stdout.flush()

        return np.exp(costs / iters) 
Example #2
Source File: ptb.py    From tensor-fsmn with MIT License 6 votes vote down vote up
def predict(self, session, data, word_to_id):
        def _get_word_fromid(word_to_id, search_id):
            for word, wid in word_to_id.items():
                if wid == search_id:
                    return word

        for step, (x, y) in enumerate(reader.ptb_iterator(data, self._batch_size, self._num_steps)):
            fetches, feed_dict = self._one_loop_setup(self._logits)
            feed_dict[self._input_data] = x
            feed_dict[self._targets] = y

            res = session.run(fetches, feed_dict)
            label = res[1]
            label = np.argmax(label, 1)
            y = np.reshape(y, (self._batch_size * self._num_steps))
            for pre, real in zip(label, y):
                sys.stdout.write("Predict %s : Real %s\n" % (_get_word_fromid(word_to_id, pre), _get_word_fromid(word_to_id, real))) 
Example #3
Source File: train.py    From bit-rnn with Apache License 2.0 6 votes vote down vote up
def run_epoch(session, m, data, eval_op, verbose=False):
    """Runs the model on the given data."""
    epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
    start_time = time.time()
    costs = 0.0
    iters = 0
    state = m.initial_state.eval()
    for step, (x, y) in enumerate(
            reader.ptb_iterator(data, m.batch_size, m.num_steps)):
        cost, state, _ = session.run([m.cost, m.final_state, eval_op],
                                     {m.input_data: x,
                                      m.targets: y,
                                      m.initial_state: state})
        costs += cost
        iters += m.num_steps

        if verbose and step % (epoch_size // 10) == 10:
            print("%.3f perplexity: %.3f speed: %.0f wps" %
                  (step * 1.0 / epoch_size, np.exp(costs / iters),
                   iters * m.batch_size / (time.time() - start_time)))

    return np.exp(costs / iters) 
Example #4
Source File: ptb_word_lm.py    From rnn_text_writer with MIT License 5 votes vote down vote up
def run_epoch(session, model, data, is_train=False, verbose=False):
  """Runs the model on the given data."""
  epoch_size = ((len(data) // model.batch_size) - 1) // model.num_steps
  start_time = time.time()
  costs = 0.0
  iters = 0
  state = session.run(model.initial_state)

  for step, (x, y) in enumerate(reader.ptb_iterator(data, model.batch_size, model.num_steps)):
    if is_train:
      fetches = [model.cost, model.final_state, model.train_op]
    else:
      fetches = [model.cost, model.final_state]
    feed_dict = {}
    feed_dict[model.input_data] = x
    feed_dict[model.targets] = y
    for layer_num, (c, h) in enumerate(model.initial_state):
      feed_dict[c] = state[layer_num].c
      feed_dict[h] = state[layer_num].h

    if is_train:
      cost, state, _ = session.run(fetches, feed_dict)
    else:
      cost, state = session.run(fetches, feed_dict)

    costs += cost
    iters += model.num_steps

    if verbose and step % (epoch_size // 10) == 10:
      print("%.3f perplexity: %.3f speed: %.0f wps" %
            (step * 1.0 / epoch_size, np.exp(costs / iters),
             iters * model.batch_size / (time.time() - start_time)))

  return np.exp(costs / iters) 
Example #5
Source File: lstm.py    From dlbench with MIT License 5 votes vote down vote up
def run_epoch(session, m, data, eval_op, ITERS, verbose=False):
  """Runs the model on the given data."""
  epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
  start_time = time.time()
  costs = 0.0
  iters = 0
  #state = m.initial_state.eval()
  state = session.run(m.initial_state) #.eval()
  step = 0
  for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,
                                                    m.num_steps)):
    cost, state, _ = session.run([m.cost, m.final_state, eval_op],
                                 {m.input_data: x,
                                  m.targets: y,
                                  m.initial_state: state})
    costs += cost
    iters += m.num_steps

    if verbose and step % (epoch_size // 10) == 10:
      print("%.3f perplexity: %.3f speed: %.0f wps" %
            (step * 1.0 / epoch_size, np.exp(costs / iters),
             iters * m.batch_size / (time.time() - start_time)))
   
    # few iters for profiling, remove if complete training is needed
    if step > ITERS - 1:
      break

  print("Time for %d iterations %.4f seconds" %
            (ITERS, time.time() - start_time))
   
  return np.exp(costs / iters) 
Example #6
Source File: lstm.py    From dlbench with MIT License 5 votes vote down vote up
def run_epoch(session, m, data, eval_op, verbose=False):
  """Runs the model on the given data."""
  epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
  start_time = time.time()
  costs = 0.0
  iters = 0
  print('m.initial_state:', m.initial_state)
  state = session.run(m.initial_state) #.eval()
  step = 0
  for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,
                                                    m.num_steps)):
    cost, state, _ = session.run([m.cost, m.final_state, eval_op],
                                 {m.input_data: x,
                                  m.targets: y,
                                  m.initial_state: state})
    costs += cost
    iters += m.num_steps

    if verbose and step % (epoch_size // 10) == 10:
      print("%.3f perplexity: %.3f speed: %.0f wps" %
            (step * 1.0 / epoch_size, np.exp(costs / iters),
             iters * m.batch_size / (time.time() - start_time)))

  print("Time for one epoch, %d iters: %.4f seconds" %
            (step+1, time.time() - start_time))
  average_batch_time = (time.time() - start_time)/(step+1)
  print("Average time per minibatch in this epoch: %.4f seconds" % average_batch_time)
   
  return np.exp(costs / iters), average_batch_time 
Example #7
Source File: ptb-lm.py    From pytorch-language-model with MIT License 5 votes vote down vote up
def run_epoch(model, data, is_train=False, lr=1.0):
  """Runs the model on the given data."""
  if is_train:
    model.train()
  else:
    model.eval()
  epoch_size = ((len(data) // model.batch_size) - 1) // model.num_steps
  start_time = time.time()
  hidden = model.init_hidden()
  costs = 0.0
  iters = 0
  for step, (x, y) in enumerate(reader.ptb_iterator(data, model.batch_size, model.num_steps)):
    inputs = Variable(torch.from_numpy(x.astype(np.int64)).transpose(0, 1).contiguous()).cuda()
    model.zero_grad()
    hidden = repackage_hidden(hidden)
    outputs, hidden = model(inputs, hidden)
    targets = Variable(torch.from_numpy(y.astype(np.int64)).transpose(0, 1).contiguous()).cuda()
    tt = torch.squeeze(targets.view(-1, model.batch_size * model.num_steps))

    loss = criterion(outputs.view(-1, model.vocab_size), tt)
    costs += loss.data[0] * model.num_steps
    iters += model.num_steps

    if is_train:
      loss.backward()
      torch.nn.utils.clip_grad_norm(model.parameters(), 0.25)
      for p in model.parameters():
        p.data.add_(-lr, p.grad.data)
      if step % (epoch_size // 10) == 10:
        print("{} perplexity: {:8.2f} speed: {} wps".format(step * 1.0 / epoch_size, np.exp(costs / iters),
                                                       iters * model.batch_size / (time.time() - start_time)))
  return np.exp(costs / iters)