Python tensorflow.python.ops.rnn.rnn() Examples

The following are 16 code examples of tensorflow.python.ops.rnn.rnn(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.rnn , or try the search function .
Example #1
Source File: test_tf_qrnn_work.py    From tensorflow_end2end_speech_recognition with MIT License 6 votes vote down vote up
def baseline_forward(self, X, size, n_class):
        shape = X.get_shape()
        # batch_size x sentence_length x word_length -> batch_size x sentence_length x word_length
        _X = tf.transpose(X, [1, 0, 2])
        _X = tf.reshape(_X, [-1, int(shape[2])])  # (batch_size x sentence_length) x word_length
        seq = tf.split(0, int(shape[1]), _X)  # sentence_length x (batch_size x word_length)

        with tf.name_scope("LSTM"):
            lstm_cell = rnn_cell.BasicLSTMCell(size, forget_bias=1.0)
            outputs, states = rnn.rnn(lstm_cell, seq, dtype=tf.float32)

        with tf.name_scope("LSTM-Classifier"):
            W = tf.Variable(tf.random_normal([size, n_class]), name="W")
            b = tf.Variable(tf.random_normal([n_class]), name="b")
            output = tf.matmul(outputs[-1], W) + b

        return output 
Example #2
Source File: seq2seq.py    From deep-text-corrector with Apache License 2.0 6 votes vote down vote up
def basic_rnn_seq2seq(
        encoder_inputs, decoder_inputs, cell, dtype=dtypes.float32, scope=None):
    """Basic RNN sequence-to-sequence model.

    This model first runs an RNN to encode encoder_inputs into a state vector,
    then runs decoder, initialized with the last encoder state, on decoder_inputs.
    Encoder and decoder use the same RNN cell type, but don't share parameters.

    Args:
      encoder_inputs: A list of 2D Tensors [batch_size x input_size].
      decoder_inputs: A list of 2D Tensors [batch_size x input_size].
      cell: rnn_cell.RNNCell defining the cell function and size.
      dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
      scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".

    Returns:
      A tuple of the form (outputs, state), where:
        outputs: A list of the same length as decoder_inputs of 2D Tensors with
          shape [batch_size x output_size] containing the generated outputs.
        state: The state of each decoder cell in the final time-step.
          It is a 2D Tensor of shape [batch_size x cell.state_size].
    """
    with variable_scope.variable_scope(scope or "basic_rnn_seq2seq"):
        _, enc_state = rnn.rnn(cell, encoder_inputs, dtype=dtype)
        return rnn_decoder(decoder_inputs, enc_state, cell) 
Example #3
Source File: seq2seq.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def basic_rnn_seq2seq(
    encoder_inputs, decoder_inputs, cell, dtype=dtypes.float32, scope=None):
  """Basic RNN sequence-to-sequence model.

  This model first runs an RNN to encode encoder_inputs into a state vector,
  then runs decoder, initialized with the last encoder state, on decoder_inputs.
  Encoder and decoder use the same RNN cell type, but don't share parameters.

  Args:
    encoder_inputs: A list of 2D Tensors [batch_size x input_size].
    decoder_inputs: A list of 2D Tensors [batch_size x input_size].
    cell: rnn_cell.RNNCell defining the cell function and size.
    dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
    scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".

  Returns:
    A tuple of the form (outputs, state), where:
      outputs: A list of the same length as decoder_inputs of 2D Tensors with
        shape [batch_size x output_size] containing the generated outputs.
      state: The state of each decoder cell in the final time-step.
        It is a 2D Tensor of shape [batch_size x cell.state_size].
  """
  with variable_scope.variable_scope(scope or "basic_rnn_seq2seq"):
    _, enc_state = rnn.rnn(cell, encoder_inputs, dtype=dtype)
    return rnn_decoder(decoder_inputs, enc_state, cell) 
Example #4
Source File: seq2seq.py    From ecm with Apache License 2.0 6 votes vote down vote up
def basic_rnn_seq2seq(
        encoder_inputs, decoder_inputs, cell, dtype=dtypes.float32, scope=None):
    """Basic RNN sequence-to-sequence model.

    This model first runs an RNN to encode encoder_inputs into a state vector,
    then runs decoder, initialized with the last encoder state, on decoder_inputs.
    Encoder and decoder use the same RNN cell type, but don't share parameters.

    Args:
        encoder_inputs: A list of 2D Tensors [batch_size x input_size].
        decoder_inputs: A list of 2D Tensors [batch_size x input_size].
        cell: rnn_cell.RNNCell defining the cell function and size.
        dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
        scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".

    Returns:
        A tuple of the form (outputs, state), where:
            outputs: A list of the same length as decoder_inputs of 2D Tensors with
                shape [batch_size x output_size] containing the generated outputs.
            state: The state of each decoder cell in the final time-step.
                It is a 2D Tensor of shape [batch_size x cell.state_size].
    """
    with variable_scope.variable_scope(scope or "basic_rnn_seq2seq"):
        _, enc_state = rnn.rnn(cell, encoder_inputs, dtype=dtype)
        return rnn_decoder(decoder_inputs, enc_state, cell) 
Example #5
Source File: tf_seq2seq_model.py    From Conditional-SeqGAN-Tensorflow with MIT License 6 votes vote down vote up
def basic_rnn_seq2seq(
    encoder_inputs, decoder_inputs, cell, dtype=dtypes.float32, scope=None):
  """Basic RNN sequence-to-sequence model.

  This model first runs an RNN to encode encoder_inputs into a state vector,
  then runs decoder, initialized with the last encoder state, on decoder_inputs.
  Encoder and decoder use the same RNN cell type, but don't share parameters.

  Args:
    encoder_inputs: A list of 2D Tensors [batch_size x input_size].
    decoder_inputs: A list of 2D Tensors [batch_size x input_size].
    cell: rnn_cell.RNNCell defining the cell function and size.
    dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
    scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".

  Returns:
    A tuple of the form (outputs, state), where:
      outputs: A list of the same length as decoder_inputs of 2D Tensors with
        shape [batch_size x output_size] containing the generated outputs.
      state: The state of each decoder cell in the final time-step.
        It is a 2D Tensor of shape [batch_size x cell.state_size].
  """
  with variable_scope.variable_scope(scope or "basic_rnn_seq2seq"):
    _, enc_state = rnn.rnn(cell, encoder_inputs, dtype=dtype)
    return rnn_decoder(decoder_inputs, enc_state, cell) 
Example #6
Source File: seq2seq.py    From deep-text-corrector with Apache License 2.0 5 votes vote down vote up
def tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
                     loop_function=None, dtype=dtypes.float32, scope=None):
    """RNN sequence-to-sequence model with tied encoder and decoder parameters.

    This model first runs an RNN to encode encoder_inputs into a state vector, and
    then runs decoder, initialized with the last encoder state, on decoder_inputs.
    Encoder and decoder use the same RNN cell and share parameters.

    Args:
      encoder_inputs: A list of 2D Tensors [batch_size x input_size].
      decoder_inputs: A list of 2D Tensors [batch_size x input_size].
      cell: rnn_cell.RNNCell defining the cell function and size.
      loop_function: If not None, this function will be applied to i-th output
        in order to generate i+1-th input, and decoder_inputs will be ignored,
        except for the first element ("GO" symbol), see rnn_decoder for details.
      dtype: The dtype of the initial state of the rnn cell (default: tf.float32).
      scope: VariableScope for the created subgraph; default: "tied_rnn_seq2seq".

    Returns:
      A tuple of the form (outputs, state), where:
        outputs: A list of the same length as decoder_inputs of 2D Tensors with
          shape [batch_size x output_size] containing the generated outputs.
        state: The state of each decoder cell in each time-step. This is a list
          with length len(decoder_inputs) -- one item for each time-step.
          It is a 2D Tensor of shape [batch_size x cell.state_size].
    """
    with variable_scope.variable_scope("combined_tied_rnn_seq2seq"):
        scope = scope or "tied_rnn_seq2seq"
        _, enc_state = rnn.rnn(
            cell, encoder_inputs, dtype=dtype, scope=scope)
        variable_scope.get_variable_scope().reuse_variables()
        return rnn_decoder(decoder_inputs, enc_state, cell,
                           loop_function=loop_function, scope=scope) 
Example #7
Source File: seq2seq.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
                     loop_function=None, dtype=dtypes.float32, scope=None):
  """RNN sequence-to-sequence model with tied encoder and decoder parameters.

  This model first runs an RNN to encode encoder_inputs into a state vector, and
  then runs decoder, initialized with the last encoder state, on decoder_inputs.
  Encoder and decoder use the same RNN cell and share parameters.

  Args:
    encoder_inputs: A list of 2D Tensors [batch_size x input_size].
    decoder_inputs: A list of 2D Tensors [batch_size x input_size].
    cell: rnn_cell.RNNCell defining the cell function and size.
    loop_function: If not None, this function will be applied to i-th output
      in order to generate i+1-th input, and decoder_inputs will be ignored,
      except for the first element ("GO" symbol), see rnn_decoder for details.
    dtype: The dtype of the initial state of the rnn cell (default: tf.float32).
    scope: VariableScope for the created subgraph; default: "tied_rnn_seq2seq".

  Returns:
    A tuple of the form (outputs, state), where:
      outputs: A list of the same length as decoder_inputs of 2D Tensors with
        shape [batch_size x output_size] containing the generated outputs.
      state: The state of each decoder cell in each time-step. This is a list
        with length len(decoder_inputs) -- one item for each time-step.
        It is a 2D Tensor of shape [batch_size x cell.state_size].
  """
  with variable_scope.variable_scope("combined_tied_rnn_seq2seq"):
    scope = scope or "tied_rnn_seq2seq"
    _, enc_state = rnn.rnn(
        cell, encoder_inputs, dtype=dtype, scope=scope)
    variable_scope.get_variable_scope().reuse_variables()
    return rnn_decoder(decoder_inputs, enc_state, cell,
                       loop_function=loop_function, scope=scope) 
Example #8
Source File: fused_rnn_cell.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def __call__(self,
               inputs,
               initial_state=None,
               dtype=None,
               sequence_length=None,
               scope=None):
    is_list = isinstance(inputs, list)
    if self._use_dynamic_rnn:
      if is_list:
        inputs = array_ops.pack(inputs)
      outputs, state = rnn.dynamic_rnn(
          self._cell,
          inputs,
          sequence_length=sequence_length,
          initial_state=initial_state,
          dtype=dtype,
          time_major=True,
          scope=scope)
      if is_list:
        # Convert outputs back to list
        outputs = array_ops.unpack(outputs)
    else:  # non-dynamic rnn
      if not is_list:
        inputs = array_ops.unpack(inputs)
      outputs, state = rnn.rnn(self._cell,
                               inputs,
                               initial_state=initial_state,
                               dtype=dtype,
                               sequence_length=sequence_length,
                               scope=scope)
      if not is_list:
        # Convert outputs back to tensor
        outputs = array_ops.pack(outputs)

    return outputs, state 
Example #9
Source File: seq2seq.py    From ecm with Apache License 2.0 5 votes vote down vote up
def tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
                                         loop_function=None, dtype=dtypes.float32, scope=None):
    """RNN sequence-to-sequence model with tied encoder and decoder parameters.

    This model first runs an RNN to encode encoder_inputs into a state vector, and
    then runs decoder, initialized with the last encoder state, on decoder_inputs.
    Encoder and decoder use the same RNN cell and share parameters.

    Args:
        encoder_inputs: A list of 2D Tensors [batch_size x input_size].
        decoder_inputs: A list of 2D Tensors [batch_size x input_size].
        cell: rnn_cell.RNNCell defining the cell function and size.
        loop_function: If not None, this function will be applied to i-th output
            in order to generate i+1-th input, and decoder_inputs will be ignored,
            except for the first element ("GO" symbol), see rnn_decoder for details.
        dtype: The dtype of the initial state of the rnn cell (default: tf.float32).
        scope: VariableScope for the created subgraph; default: "tied_rnn_seq2seq".

    Returns:
        A tuple of the form (outputs, state), where:
            outputs: A list of the same length as decoder_inputs of 2D Tensors with
                shape [batch_size x output_size] containing the generated outputs.
            state: The state of each decoder cell in each time-step. This is a list
                with length len(decoder_inputs) -- one item for each time-step.
                It is a 2D Tensor of shape [batch_size x cell.state_size].
    """
    with variable_scope.variable_scope("combined_tied_rnn_seq2seq"):
        scope = scope or "tied_rnn_seq2seq"
        _, enc_state = rnn.rnn(
                cell, encoder_inputs, dtype=dtype, scope=scope)
        variable_scope.get_variable_scope().reuse_variables()
        return rnn_decoder(decoder_inputs, enc_state, cell,
                                             loop_function=loop_function, scope=scope) 
Example #10
Source File: tf_seq2seq_model.py    From Conditional-SeqGAN-Tensorflow with MIT License 5 votes vote down vote up
def tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
                     loop_function=None, dtype=dtypes.float32, scope=None):
  """RNN sequence-to-sequence model with tied encoder and decoder parameters.

  This model first runs an RNN to encode encoder_inputs into a state vector, and
  then runs decoder, initialized with the last encoder state, on decoder_inputs.
  Encoder and decoder use the same RNN cell and share parameters.

  Args:
    encoder_inputs: A list of 2D Tensors [batch_size x input_size].
    decoder_inputs: A list of 2D Tensors [batch_size x input_size].
    cell: rnn_cell.RNNCell defining the cell function and size.
    loop_function: If not None, this function will be applied to i-th output
      in order to generate i+1-th input, and decoder_inputs will be ignored,
      except for the first element ("GO" symbol), see rnn_decoder for details.
    dtype: The dtype of the initial state of the rnn cell (default: tf.float32).
    scope: VariableScope for the created subgraph; default: "tied_rnn_seq2seq".

  Returns:
    A tuple of the form (outputs, state), where:
      outputs: A list of the same length as decoder_inputs of 2D Tensors with
        shape [batch_size x output_size] containing the generated outputs.
      state: The state of each decoder cell in each time-step. This is a list
        with length len(decoder_inputs) -- one item for each time-step.
        It is a 2D Tensor of shape [batch_size x cell.state_size].
  """
  with variable_scope.variable_scope("combined_tied_rnn_seq2seq"):
    scope = scope or "tied_rnn_seq2seq"
    _, enc_state = rnn.rnn(
        cell, encoder_inputs, dtype=dtype, scope=scope)
    variable_scope.get_variable_scope().reuse_variables()
    return rnn_decoder(decoder_inputs, enc_state, cell,
                       loop_function=loop_function, scope=scope) 
Example #11
Source File: predict.py    From image-classification-rnn with Apache License 2.0 5 votes vote down vote up
def rnn_model(x, weights, biases):
	"""Build a rnn model for image"""
	x = tf.transpose(x, [1, 0, 2])
	x = tf.reshape(x, [-1, n_input])
	x = tf.split(0, n_steps, x)

	lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
	outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
	return tf.matmul(outputs[-1], weights) + biases 
Example #12
Source File: predict.py    From image-classification-rnn with Apache License 2.0 5 votes vote down vote up
def predict():
	"""Predict unseen images"""
	"""Step 0: load data and trained model"""
	mnist = input_data.read_data_sets("./data/", one_hot=True)
	checkpoint_dir = sys.argv[1]

	"""Step 1: build the rnn model"""
	x = tf.placeholder("float", [None, n_steps, n_input])
	y = tf.placeholder("float", [None, n_classes])

	weights = tf.Variable(tf.random_normal([n_hidden, n_classes]), name='weights')
	biases = tf.Variable(tf.random_normal([n_classes]), name='biases')

	pred = rnn_model(x, weights, biases)
	correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
	accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

	"""Step 2: predict new images with the trained model"""
	with tf.Session() as sess:
		sess.run(tf.initialize_all_variables())
		"""Step 2.0: load the trained model"""
		checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir + 'checkpoints')
		print('Loaded the trained model: {}'.format(checkpoint_file))

		saver = tf.train.Saver()
		saver.restore(sess, checkpoint_file)

		"""Step 2.1: predict new data"""
		test_len = 500
		test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
		test_label = mnist.test.labels[:test_len]
		print("Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_label})) 
Example #13
Source File: train.py    From image-classification-rnn with Apache License 2.0 5 votes vote down vote up
def rnn_model(x, weights, biases):
	"""RNN (LSTM or GRU) model for image"""
	x = tf.transpose(x, [1, 0, 2])
	x = tf.reshape(x, [-1, n_input])
	x = tf.split(0, n_steps, x)

	lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
	outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
	return tf.matmul(outputs[-1], weights) + biases 
Example #14
Source File: train.py    From image-classification-rnn with Apache License 2.0 4 votes vote down vote up
def train():
	"""Train an image classifier"""
	"""Step 0: load image data and training parameters"""
	mnist = input_data.read_data_sets("./data/", one_hot=True)
	parameter_file = sys.argv[1]
	params = json.loads(open(parameter_file).read())

	"""Step 1: build a rnn model for image"""
	x = tf.placeholder("float", [None, n_steps, n_input])
	y = tf.placeholder("float", [None, n_classes])

	weights = tf.Variable(tf.random_normal([n_hidden, n_classes]), name='weights')
	biases = tf.Variable(tf.random_normal([n_classes]), name='biases')

	pred = rnn_model(x, weights, biases)
	cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
	optimizer = tf.train.AdamOptimizer(learning_rate=params['learning_rate']).minimize(cost)

	correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
	accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

	"""Step 2: train the image classification model"""
	with tf.Session() as sess:
		sess.run(tf.initialize_all_variables())
		step = 1

		"""Step 2.0: create a directory for saving model files"""
		timestamp = str(int(time.time()))
		out_dir = os.path.abspath(os.path.join(os.path.curdir, "trained_model_" + timestamp))
		checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
		checkpoint_prefix = os.path.join(checkpoint_dir, "model")
		if not os.path.exists(checkpoint_dir):
			os.makedirs(checkpoint_dir)
		saver = tf.train.Saver(tf.all_variables())

		"""Step 2.1: train the image classifier batch by batch"""
		while step * params['batch_size'] < params['training_iters']:
			batch_x, batch_y = mnist.train.next_batch(params['batch_size'])
			# Reshape data to get 28 seq of 28 elements
			batch_x = batch_x.reshape((params['batch_size'], n_steps, n_input))
			sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})

			"""Step 2.2: save the model"""
			if step % params['display_step'] == 0:
				path = saver.save(sess, checkpoint_prefix, global_step=step)
				acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
				loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
				print('Iter: {}, Loss: {:.6f}, Accuracy: {:.6f}'.format(step * params['batch_size'], loss, acc))
			step += 1
		print("The training is done")

		"""Step 3: test the model"""
		test_len = 128
		test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
		test_label = mnist.test.labels[:test_len]
		print("Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_label})) 
Example #15
Source File: dnn_lstm.py    From Query_Classfication_LSTM with Apache License 2.0 4 votes vote down vote up
def Forward(self, sess):
        lstm= tf.nn.rnn_cell.BasicLSTMCell(200, forget_bias=1.0)#LSTM size
        #lstm=tf.nn.rnn_cell.GRUCell(10)
        state=tf.zeros([1,200])# batch size, state_num=2*step_size
        num_steps=20# we don't need time step actually, the length of sentence is time-step
        x_in_batch = tf.transpose(self.x_in, [1, 0, 2])#change to 20*1*200
        x_in = tf.reshape(x_in_batch, [-1, 200])#change to 20*200
        x_in = tf.split(0, 20, x_in)#this will return a list, i.e. 20 sequences of 1*200

        if self.i == 0:
            with tf.variable_scope('output'):
                output_lstm, state=rnn.rnn(lstm, x_in, dtype=tf.float32)
                #output_lstm, state= lstm(x_in,state)#200*1
        else:
            with tf.variable_scope('output', reuse=True):
                output_lstm, state = rnn.rnn(lstm, x_in, dtype=tf.float32)
                #output_lstm, state= lstm(x_in,state)
        self.i+=1

        output_lstm=output_lstm[-1]# get the last element of a list

        lin_h=tf.matmul(output_lstm,self.hiddenLayer.W)+self.hiddenLayer.b
        #x_in=1*200, W=200*200

        reg_h = tf.reduce_sum(tf.gather(self.reg_lookup_table, self.reg_x), 0)#Num*200
        print "reg_h is"
        print reg_h
        h = self.activation(lin_h + tf.cast(reg_h,tf.float32))#1*200

        lin_output_pre = tf.matmul(h, self.outputLayer.W) + self.outputLayer.b
        lin_output = tf.nn.dropout(lin_output_pre, keep_prob=0.6)

        #h=1*200, outputLayer.W=200*63, lin_outupt=1*63
        #re.W:19156*63
        reg_output = tf.reduce_sum(tf.gather(self.skip_layer_re.W, self.reg_x), 0) + self.skip_layer_re.b
        print reg_output

        #x_in=1*200. ae.W=200*63
        ae_output = tf.matmul(x_in[-1], self.skip_layer_ae.W) + self.skip_layer_ae.b#use the last element as skip layer input
        ae_output = tf.nn.dropout(ae_output, keep_prob=0.5)

        output = tf.nn.softmax(lin_output + ae_output + reg_output)#XXX*63

        return output 
Example #16
Source File: my_seq2seq.py    From Neural_Conversation_Models with Apache License 2.0 4 votes vote down vote up
def embedding_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
                          num_encoder_symbols, num_decoder_symbols,
                          embedding_size, output_projection=None,
                          feed_previous=False, dtype=dtypes.float32,
                          scope=None, beam_search=True, beam_size=10):
  """Embedding RNN sequence-to-sequence model.

  This model first embeds encoder_inputs by a newly created embedding (of shape
  [num_encoder_symbols x input_size]). Then it runs an RNN to encode
  embedded encoder_inputs into a state vector. Next, it embeds decoder_inputs
  by another newly created embedding (of shape [num_decoder_symbols x
  input_size]). Then it runs RNN decoder, initialized with the last
  encoder state, on embedded decoder_inputs.

  Args:
    encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
    decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
    cell: rnn_cell.RNNCell defining the cell function and size.
    num_encoder_symbols: Integer; number of symbols on the encoder side.
    num_decoder_symbols: Integer; number of symbols on the decoder side.
    embedding_size: Integer, the length of the embedding vector for each symbol.
    output_projection: None or a pair (W, B) of output projection weights and
      biases; W has shape [output_size x num_decoder_symbols] and B has
      shape [num_decoder_symbols]; if provided and feed_previous=True, each
      fed previous output will first be multiplied by W and added B.
    feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
      of decoder_inputs will be used (the "GO" symbol), and all other decoder
      inputs will be taken from previous outputs (as in embedding_rnn_decoder).
      If False, decoder_inputs are used as given (the standard decoder case).
    dtype: The dtype of the initial state for both the encoder and encoder
      rnn cells (default: tf.float32).
    scope: VariableScope for the created subgraph; defaults to
      "embedding_rnn_seq2seq"

  Returns:
    A tuple of the form (outputs, state), where:
      outputs: A list of the same length as decoder_inputs of 2D Tensors with
        shape [batch_size x num_decoder_symbols] containing the generated
        outputs.
      state: The state of each decoder cell in each time-step. This is a list
        with length len(decoder_inputs) -- one item for each time-step.
        It is a 2D Tensor of shape [batch_size x cell.state_size].
  """
  with variable_scope.variable_scope(scope or "embedding_rnn_seq2seq"):
    # Encoder.
    encoder_cell = rnn_cell.EmbeddingWrapper(
        cell, embedding_classes=num_encoder_symbols,
        embedding_size=embedding_size)
    _, encoder_state = rnn.rnn(encoder_cell, encoder_inputs, dtype=dtype)

    # Decoder.
    if output_projection is None:
      cell = rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)


    return embedding_rnn_decoder(
          decoder_inputs, encoder_state, cell, num_decoder_symbols,
          embedding_size, output_projection=output_projection,
          feed_previous=feed_previous, beam_search=beam_search, beam_size=beam_size)