Python tensorflow.contrib.layers.python.layers.layers.linear() Examples

The following are 14 code examples of tensorflow.contrib.layers.python.layers.layers.linear(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.layers.python.layers.layers , or try the search function .
Example #1
Source File: attention_decoder.py    From TransDG with MIT License 6 votes vote down vote up
def prepare_attention(attention_states,
                      kd_states,
                      attention_option,
                      num_units,
                      reuse=False):
    # Prepare attention keys / values from attention_states
    with variable_scope.variable_scope("attn_keys", reuse=reuse) as scope:
        attention_keys = layers.linear(attention_states, num_units,
                                       biases_initializer=None, scope=scope)
        if kd_states is not None:
            attention_values = (attention_states, kd_states)
        else:
            attention_values = attention_states
        # Attention scoring function
        attention_score_fn = _create_attention_score_fn("attn_score", num_units, attention_option, reuse)

    # Attention construction function
    attention_construct_fn = _create_attention_construct_fn("attn_construct",
                                                            num_units, attention_score_fn, reuse)

    return attention_keys, attention_values, attention_construct_fn 
Example #2
Source File: my_attention_decoder_fn.py    From SentenceFunction with Apache License 2.0 5 votes vote down vote up
def prepare_attention(attention_states,
                      attention_option,
                      num_units,
                      reuse=False):
  """Prepare keys/values/functions for attention.

  Args:
    attention_states: hidden states to attend over.
    attention_option: how to compute attention, either "luong" or "bahdanau".
    num_units: hidden state dimension.
    reuse: whether to reuse variable scope.

  Returns:
    attention_keys: to be compared with target states.
    attention_values: to be used to construct context vectors.
    attention_score_fn: to compute similarity between key and target states.
    attention_construct_fn: to build attention states.
  """

  # Prepare attention keys / values from attention_states
  with variable_scope.variable_scope("attention_keys", reuse=reuse) as scope:
    attention_keys = layers.linear(
        attention_states, num_units, biases_initializer=None, scope=scope)
  attention_values = attention_states

  # Attention score function
  attention_score_fn = _create_attention_score_fn("attention_score", num_units,
                                                  attention_option, reuse)

  # Attention construction function
  attention_construct_fn = _create_attention_construct_fn("attention_construct",
                                                          num_units,
                                                          attention_score_fn,
                                                          reuse)

  return (attention_keys, attention_values, attention_score_fn,
          attention_construct_fn) 
Example #3
Source File: my_attention_decoder_fn.py    From SentenceFunction with Apache License 2.0 5 votes vote down vote up
def _create_attention_construct_fn(name, num_units, attention_score_fn, reuse):
  """Function to compute attention vectors.

  Args:
    name: to label variables.
    num_units: hidden state dimension.
    attention_score_fn: to compute similarity between key and target states.
    reuse: whether to reuse variable scope.

  Returns:
    attention_construct_fn: to build attention states.
  """
  with variable_scope.variable_scope(name, reuse=reuse) as scope:

    def construct_fn(attention_query, attention_keys, attention_values):
      context = attention_score_fn(attention_query, attention_keys,
                                   attention_values)
      concat_input = array_ops.concat([attention_query, context], 1)
      attention = layers.linear(
          concat_input, num_units, biases_initializer=None, scope=scope)
      return attention

    return construct_fn


# keys: [batch_size, attention_length, attn_size]
# query: [batch_size, 1, attn_size]
# return weights [batch_size, attention_length] 
Example #4
Source File: attention_decoder_fn.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def prepare_attention(attention_states,
                      attention_option,
                      num_units,
                      reuse=False):
  """Prepare keys/values/functions for attention.

  Args:
    attention_states: hidden states to attend over.
    attention_option: how to compute attention, either "luong" or "bahdanau".
    num_units: hidden state dimension.
    reuse: whether to reuse variable scope.

  Returns:
    attention_keys: to be compared with target states.
    attention_values: to be used to construct context vectors.
    attention_score_fn: to compute similarity between key and target states.
    attention_construct_fn: to build attention states.
  """

  # Prepare attention keys / values from attention_states
  with variable_scope.variable_scope("attention_keys", reuse=reuse) as scope:
    attention_keys = layers.linear(
        attention_states, num_units, biases_initializer=None, scope=scope)
  attention_values = attention_states

  # Attention score function
  attention_score_fn = _create_attention_score_fn("attention_score", num_units,
                                                  attention_option, reuse)

  # Attention construction function
  attention_construct_fn = _create_attention_construct_fn("attention_construct",
                                                          num_units,
                                                          attention_score_fn,
                                                          reuse)

  return (attention_keys, attention_values, attention_score_fn,
          attention_construct_fn) 
Example #5
Source File: attention_decoder_fn.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _create_attention_construct_fn(name, num_units, attention_score_fn, reuse):
  """Function to compute attention vectors.

  Args:
    name: to label variables.
    num_units: hidden state dimension.
    attention_score_fn: to compute similarity between key and target states.
    reuse: whether to reuse variable scope.

  Returns:
    attention_construct_fn: to build attention states.
  """
  with variable_scope.variable_scope(name, reuse=reuse) as scope:

    def construct_fn(attention_query, attention_keys, attention_values):
      context = attention_score_fn(attention_query, attention_keys,
                                   attention_values)
      concat_input = array_ops.concat([attention_query, context], 1)
      attention = layers.linear(
          concat_input, num_units, biases_initializer=None, scope=scope)
      return attention

    return construct_fn


# keys: [batch_size, attention_length, attn_size]
# query: [batch_size, 1, attn_size]
# return weights [batch_size, attention_length] 
Example #6
Source File: attention_decoder.py    From TransDG with MIT License 5 votes vote down vote up
def create_output_fn(vocab_size):
    with variable_scope.variable_scope("output_fn") as scope:
        def output_fn(x):
            return layers.linear(x, vocab_size, scope=scope)

        return output_fn 
Example #7
Source File: attention_decoder.py    From TransDG with MIT License 5 votes vote down vote up
def create_hidden_fn(num_units):
    with variable_scope.variable_scope("hidden_fn") as scope:
        def hidden_fn(x):
            return layers.linear(x, num_units, scope=scope)

        return hidden_fn 
Example #8
Source File: attention_decoder.py    From TransDG with MIT License 5 votes vote down vote up
def prepare_multistep_attention(encoder_states,
                                decoder_reprs,
                                kd_states1,
                                kd_states2,
                                attention_option,
                                num_units,
                                reuse=False):
    # Prepare attention keys / values from attention_states
    with variable_scope.variable_scope("attn_keys", reuse=reuse) as scope:
        attention_keys1 = layers.linear(encoder_states, num_units, biases_initializer=None, scope=scope)
        attention_values1 = encoder_states
        # Attention scoring function
        attention_score_fn1 = _create_attention_score_fn("attn_score", num_units,
                                                         attention_option, reuse)

    with variable_scope.variable_scope("attn_reprs", reuse=reuse) as scope:
        if decoder_reprs is not None:
            attention_keys2 = layers.linear(decoder_reprs, num_units, biases_initializer=None, scope=scope)
        else:
            attention_keys2 = None
        attention_values2 = decoder_reprs
        # Attention scoring function
        attention_score_fn2 = _create_attention_score_fn("attn_score", num_units,
                                                         attention_option, reuse)

    attention_keys = (attention_keys1, attention_keys2)
    if kd_states1 is not None and kd_states2 is not None:
        attention_values = (attention_values1, attention_values2, kd_states1, kd_states2)
    else:
        attention_values = (attention_values1, attention_values2, None, None)
    attention_score_fn = (attention_score_fn1, attention_score_fn2)

    # Attention construction function
    attention_construct_fn = _create_attention_construct_fn("attn_construct_multi",
                                                            num_units, attention_score_fn, reuse)

    return attention_keys, attention_values, attention_construct_fn 
Example #9
Source File: attention_decoder.py    From TransDG with MIT License 5 votes vote down vote up
def _create_attention_construct_fn(name, num_units, attention_score_fn, reuse):
    """Function to compute attention vectors.
    Args:
        name: to label variables.
        num_units: hidden state dimension.
        attention_score_fn: to compute similarity between key and target states.
        reuse: whether to reuse variable scope.
    Returns:
        attention_construct_fn: to build attention states.
    """
    with variable_scope.variable_scope(name, reuse=reuse) as scope:

        def construct_fn(attention_query, attention_keys, attention_values):
            if isinstance(attention_score_fn, tuple):  # multi-step decoding
                attention_score_fn1, attention_score_fn2 = attention_score_fn
                attention_keys1, attention_keys2 = attention_keys
                attention_values1, decoder_reprs, kd_states1, kd_states2 = attention_values
                context1 = attention_score_fn1(attention_query, attention_keys1, attention_values1)
                if kd_states1 is None or kd_states2 is None:
                    context2 = attention_score_fn2(attention_query, attention_keys2, decoder_reprs)
                    concat_input = array_ops.concat([attention_query, context1, context2], 1)
                else:
                    if decoder_reprs is None:
                        concat_input = array_ops.concat([attention_query, context1, kd_states1, kd_states2], 1)
                    else:
                        context2 = attention_score_fn2(attention_query, attention_keys2, decoder_reprs)
                        concat_input = array_ops.concat([attention_query, context1, context2, kd_states1, kd_states2], 1)
            else:  # only one step decoding
                if isinstance(attention_values, tuple):
                    attention_values1, kd_state = attention_values
                    context1 = attention_score_fn(attention_query, attention_keys, attention_values1)
                    concat_input = array_ops.concat([attention_query, context1, kd_state], 1)
                else:
                    context = attention_score_fn(attention_query, attention_keys, attention_values)
                    concat_input = array_ops.concat([attention_query, context], 1)

            attention = layers.linear(concat_input, num_units, biases_initializer=None, scope=scope)
            return attention

        return construct_fn 
Example #10
Source File: attention_decoder_fn.py    From keras-lambda with MIT License 5 votes vote down vote up
def prepare_attention(attention_states,
                      attention_option,
                      num_units,
                      reuse=False):
  """Prepare keys/values/functions for attention.

  Args:
    attention_states: hidden states to attend over.
    attention_option: how to compute attention, either "luong" or "bahdanau".
    num_units: hidden state dimension.
    reuse: whether to reuse variable scope.

  Returns:
    attention_keys: to be compared with target states.
    attention_values: to be used to construct context vectors.
    attention_score_fn: to compute similarity between key and target states.
    attention_construct_fn: to build attention states.
  """

  # Prepare attention keys / values from attention_states
  with variable_scope.variable_scope("attention_keys", reuse=reuse) as scope:
    attention_keys = layers.linear(
        attention_states, num_units, biases_initializer=None, scope=scope)
  attention_values = attention_states

  # Attention score function
  attention_score_fn = _create_attention_score_fn("attention_score", num_units,
                                                  attention_option, reuse)

  # Attention construction function
  attention_construct_fn = _create_attention_construct_fn("attention_construct",
                                                          num_units,
                                                          attention_score_fn,
                                                          reuse)

  return (attention_keys, attention_values, attention_score_fn,
          attention_construct_fn) 
Example #11
Source File: attention_decoder_fn.py    From keras-lambda with MIT License 5 votes vote down vote up
def _create_attention_construct_fn(name, num_units, attention_score_fn, reuse):
  """Function to compute attention vectors.

  Args:
    name: to label variables.
    num_units: hidden state dimension.
    attention_score_fn: to compute similarity between key and target states.
    reuse: whether to reuse variable scope.

  Returns:
    attention_construct_fn: to build attention states.
  """
  with variable_scope.variable_scope(name, reuse=reuse) as scope:

    def construct_fn(attention_query, attention_keys, attention_values):
      context = attention_score_fn(attention_query, attention_keys,
                                   attention_values)
      concat_input = array_ops.concat([attention_query, context], 1)
      attention = layers.linear(
          concat_input, num_units, biases_initializer=None, scope=scope)
      return attention

    return construct_fn


# keys: [batch_size, attention_length, attn_size]
# query: [batch_size, 1, attn_size]
# return weights [batch_size, attention_length] 
Example #12
Source File: attention_decoder.py    From ccm with Apache License 2.0 4 votes vote down vote up
def _create_attention_construct_fn(name, num_units, attention_score_fn, reuse):
    """Function to compute attention vectors.
    Args:
        name: to label variables.
        num_units: hidden state dimension.
        attention_score_fn: to compute similarity between key and target states.
        reuse: whether to reuse variable scope.
    Returns:
        attention_construct_fn: to build attention states.
    """
    with variable_scope.variable_scope(name, reuse=reuse) as scope:

        def construct_fn(attention_query, attention_keys, attention_values):
            alignments = None
            if type(attention_score_fn) is tuple:
                context0 = attention_score_fn[0](attention_query, attention_keys[0],
                                                                         attention_values[0])
                if len(attention_keys) == 2:
                    context1 = attention_score_fn[1](attention_query, attention_keys[1],
                                                                             attention_values[1])
                elif len(attention_keys) == 3:
                    context1 = attention_score_fn[1](attention_query, attention_keys[1:],
                            attention_values[1:])
                if type(context1) is tuple:
                    if len(context1) == 2:
                        context1, alignments = context1
                        concat_input = array_ops.concat([attention_query, context0, context1], 1)
                    elif len(context1) == 3:
                        context1, context2, alignments = context1
                        concat_input = array_ops.concat([attention_query, context0, context1, context2], 1)
                else:
                    concat_input = array_ops.concat([attention_query, context0, context1], 1)
            else:
                context = attention_score_fn(attention_query, attention_keys,
                                                                         attention_values)
                concat_input = array_ops.concat([attention_query, context], 1)
            attention = layers.linear(
                    concat_input, num_units, biases_initializer=None, scope=scope)
            if alignments is None:
                return attention
            else:
                return attention, alignments

        return construct_fn


# keys: [batch_size, attention_length, attn_size]
# query: [batch_size, 1, attn_size]
# return weights [batch_size, attention_length] 
Example #13
Source File: attention_decoder.py    From ccm with Apache License 2.0 4 votes vote down vote up
def _create_attention_construct_fn(name, num_units, attention_score_fn, reuse):
    """Function to compute attention vectors.
    Args:
        name: to label variables.
        num_units: hidden state dimension.
        attention_score_fn: to compute similarity between key and target states.
        reuse: whether to reuse variable scope.
    Returns:
        attention_construct_fn: to build attention states.
    """
    with variable_scope.variable_scope(name, reuse=reuse) as scope:

        def construct_fn(attention_query, attention_keys, attention_values):
            alignments = None
            if type(attention_score_fn) is tuple:
                context0 = attention_score_fn[0](attention_query, attention_keys[0],
                                                                         attention_values[0])
                if len(attention_keys) == 2:
                    context1 = attention_score_fn[1](attention_query, attention_keys[1],
                                                                             attention_values[1])
                elif len(attention_keys) == 3:
                    context1 = attention_score_fn[1](attention_query, attention_keys[1:],
                            attention_values[1:])
                if type(context1) is tuple:
                    if len(context1) == 2:
                        context1, alignments = context1
                        concat_input = array_ops.concat([attention_query, context0, context1], 1)
                    elif len(context1) == 3:
                        context1, context2, alignments = context1
                        concat_input = array_ops.concat([attention_query, context0, context1, context2], 1)
                else:
                    concat_input = array_ops.concat([attention_query, context0, context1], 1)
            else:
                context = attention_score_fn(attention_query, attention_keys,
                                                                         attention_values)
                concat_input = array_ops.concat([attention_query, context], 1)
            attention = layers.linear(
                    concat_input, num_units, biases_initializer=None, scope=scope)
            if alignments is None:
                return attention
            else:
                return attention, alignments

        return construct_fn


# keys: [batch_size, attention_length, attn_size]
# query: [batch_size, 1, attn_size]
# return weights [batch_size, attention_length] 
Example #14
Source File: attention_decoder.py    From ccm with Apache License 2.0 4 votes vote down vote up
def _create_attention_construct_fn(name, num_units, attention_score_fn, reuse):
    """Function to compute attention vectors.
    Args:
        name: to label variables.
        num_units: hidden state dimension.
        attention_score_fn: to compute similarity between key and target states.
        reuse: whether to reuse variable scope.
    Returns:
        attention_construct_fn: to build attention states.
    """
    with variable_scope.variable_scope(name, reuse=reuse) as scope:

        def construct_fn(attention_query, attention_keys, attention_values):
            alignments = None
            if type(attention_score_fn) is tuple:
                context0 = attention_score_fn[0](attention_query, attention_keys[0],
                                                                         attention_values[0])
                if len(attention_keys) == 2:
                    context1 = attention_score_fn[1](attention_query, attention_keys[1],
                                                                             attention_values[1])
                elif len(attention_keys) == 3:
                    context1 = attention_score_fn[1](attention_query, attention_keys[1:],
                            attention_values[1:])
                if type(context1) is tuple:
                    if len(context1) == 2:
                        context1, alignments = context1
                        concat_input = array_ops.concat([attention_query, context0, context1], 1)
                    elif len(context1) == 3:
                        context1, context2, alignments = context1
                        concat_input = array_ops.concat([attention_query, context0, context1, context2], 1)
                else:
                    concat_input = array_ops.concat([attention_query, context0, context1], 1)
            else:
                context = attention_score_fn(attention_query, attention_keys,
                                                                         attention_values)
                concat_input = array_ops.concat([attention_query, context], 1)
            attention = layers.linear(
                    concat_input, num_units, biases_initializer=None, scope=scope)
            if alignments is None:
                return attention
            else:
                return attention, alignments

        return construct_fn


# keys: [batch_size, attention_length, attn_size]
# query: [batch_size, 1, attn_size]
# return weights [batch_size, attention_length]