Python tensorflow.python.ops.init_ops.zeros_initializer() Examples

The following are 30 code examples of tensorflow.python.ops.init_ops.zeros_initializer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.init_ops , or try the search function .
Example #1
Source File: vgg.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      biases_initializer=init_ops.zeros_initializer()):
    with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
      return arg_sc 
Example #2
Source File: rebar.py    From yolo_v2 with Apache License 2.0 6 votes vote down vote up
def _create_baseline(self, n_output=1, n_hidden=100,
                       is_zero_init=False,
                       collection='BASELINE'):
    # center input
    h = self._x
    if self.mean_xs is not None:
      h -= self.mean_xs

    if is_zero_init:
      initializer = init_ops.zeros_initializer()
    else:
      initializer = slim.variance_scaling_initializer()

    with slim.arg_scope([slim.fully_connected],
                        variables_collections=[collection, Q_COLLECTION],
                        trainable=False,
                        weights_initializer=initializer):
      h = slim.fully_connected(h, n_hidden, activation_fn=tf.nn.tanh)
      baseline = slim.fully_connected(h, n_output, activation_fn=None)

      if n_output == 1:
        baseline = tf.reshape(baseline, [-1])  # very important to reshape
    return baseline 
Example #3
Source File: vgg.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      biases_initializer=init_ops.zeros_initializer()):
    with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
      return arg_sc 
Example #4
Source File: vgg.py    From lambda-packs with MIT License 6 votes vote down vote up
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      biases_initializer=init_ops.zeros_initializer()):
    with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
      return arg_sc 
Example #5
Source File: rebar.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def _create_baseline(self, n_output=1, n_hidden=100,
                       is_zero_init=False,
                       collection='BASELINE'):
    # center input
    h = self._x
    if self.mean_xs is not None:
      h -= self.mean_xs

    if is_zero_init:
      initializer = init_ops.zeros_initializer()
    else:
      initializer = slim.variance_scaling_initializer()

    with slim.arg_scope([slim.fully_connected],
                        variables_collections=[collection, Q_COLLECTION],
                        trainable=False,
                        weights_initializer=initializer):
      h = slim.fully_connected(h, n_hidden, activation_fn=tf.nn.tanh)
      baseline = slim.fully_connected(h, n_output, activation_fn=None)

      if n_output == 1:
        baseline = tf.reshape(baseline, [-1])  # very important to reshape
    return baseline 
Example #6
Source File: truncated_vgg.py    From Table-Detection-using-Deep-learning with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def vgg_arg_scope(weight_decay=0.0005):
    """Defines the VGG arg scope.

    Args:
      weight_decay: The l2 regularization coefficient.

    Returns:
      An arg_scope.
    """
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected],
        activation_fn=nn_ops.relu,
        weights_regularizer=regularizers.l2_regularizer(weight_decay),
        biases_initializer=init_ops.zeros_initializer()
    ):
        with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
            return arg_sc 
Example #7
Source File: core.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def __init__(self, units,
               activation=None,
               use_bias=True,
               kernel_initializer=None,
               bias_initializer=init_ops.zeros_initializer(),
               kernel_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               trainable=True,
               name=None,
               **kwargs):
    super(Dense, self).__init__(trainable=trainable, name=name, **kwargs)
    self.units = units
    self.activation = activation
    self.use_bias = use_bias
    self.kernel_initializer = kernel_initializer
    self.bias_initializer = bias_initializer
    self.kernel_regularizer = kernel_regularizer
    self.bias_regularizer = bias_regularizer
    self.activity_regularizer = activity_regularizer 
Example #8
Source File: optimizers.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
  """Find max_norm given norm and previous average."""
  with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
    log_norm = math_ops.log(norm + epsilon)

    def moving_average(name, value, decay):
      moving_average_variable = vs.get_variable(
          name, shape=value.get_shape(), dtype=value.dtype,
          initializer=init_ops.zeros_initializer, trainable=False)
      return moving_averages.assign_moving_average(
          moving_average_variable, value, decay, zero_debias=False)

    # quicker adaptation at the beginning
    if global_step is not None:
      n = math_ops.to_float(global_step)
      decay = math_ops.minimum(decay, n / (n + 1.))

    # update averages
    mean = moving_average("mean", log_norm, decay)
    sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)

    variance = sq_mean - math_ops.square(mean)
    std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
    max_norms = math_ops.exp(mean + std_factor*std)
    return max_norms, mean 
Example #9
Source File: core.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(self, units,
               activation=None,
               use_bias=True,
               kernel_initializer=None,
               bias_initializer=init_ops.zeros_initializer(),
               kernel_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               trainable=True,
               name=None,
               **kwargs):
    super(Dense, self).__init__(trainable=trainable, name=name, **kwargs)
    self.units = units
    self.activation = activation
    self.use_bias = use_bias
    self.kernel_initializer = kernel_initializer
    self.bias_initializer = bias_initializer
    self.kernel_regularizer = kernel_regularizer
    self.bias_regularizer = bias_regularizer
    self.activity_regularizer = activity_regularizer
    self.input_spec = base.InputSpec(min_ndim=2) 
Example #10
Source File: evaluation.py    From lambda-packs with MIT License 6 votes vote down vote up
def _get_or_create_eval_step():
  """Gets or creates the eval step `Tensor`.

  Returns:
    A `Tensor` representing a counter for the evaluation step.

  Raises:
    ValueError: If multiple `Tensors` have been added to the
      `tf.GraphKeys.EVAL_STEP` collection.
  """
  graph = ops.get_default_graph()
  eval_steps = graph.get_collection(ops.GraphKeys.EVAL_STEP)
  if len(eval_steps) == 1:
    return eval_steps[0]
  elif len(eval_steps) > 1:
    raise ValueError('Multiple tensors added to tf.GraphKeys.EVAL_STEP')
  else:
    counter = variable_scope.get_variable(
        'eval_step',
        shape=[],
        dtype=dtypes.int64,
        initializer=init_ops.zeros_initializer(),
        trainable=False,
        collections=[ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.EVAL_STEP])
    return counter 
Example #11
Source File: feature_column.py    From lambda-packs with MIT License 6 votes vote down vote up
def _create_dense_column_weighted_sum(
    column, builder, units, weight_collections, trainable):
  """Create a weighted sum of a dense column for linear_model."""
  tensor = column._get_dense_tensor(  # pylint: disable=protected-access
      builder,
      weight_collections=weight_collections,
      trainable=trainable)
  num_elements = column._variable_shape.num_elements()  # pylint: disable=protected-access
  batch_size = array_ops.shape(tensor)[0]
  tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
  weight = variable_scope.get_variable(
      name='weights',
      shape=[num_elements, units],
      initializer=init_ops.zeros_initializer(),
      trainable=trainable,
      collections=weight_collections)
  return math_ops.matmul(tensor, weight, name='weighted_sum') 
Example #12
Source File: variables.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def create_global_step(graph=None):
  """Create global step tensor in graph.

  Args:
    graph: The graph in which to create the global step. If missing, use default
        graph.

  Returns:
    Global step tensor.

  Raises:
    ValueError: if global step key is already defined.
  """
  graph = ops.get_default_graph() if graph is None else graph
  if get_global_step(graph) is not None:
    raise ValueError('"global_step" already exists.')
  # Create in proper graph and base name_scope.
  with graph.as_default() as g, g.name_scope(None):
    collections = [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]
    return variable(ops.GraphKeys.GLOBAL_STEP, shape=[], dtype=dtypes.int64,
                    initializer=init_ops.zeros_initializer, trainable=False,
                    collections=collections) 
Example #13
Source File: vgg16.py    From Chinese-Character-and-Calligraphic-Image-Processing with MIT License 6 votes vote down vote up
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      biases_initializer=init_ops.zeros_initializer()):
    with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
      return arg_sc 
Example #14
Source File: mru.py    From SketchySceneColorization with MIT License 5 votes vote down vote up
def fully_connected(inputs, num_outputs, sn, activation_fn=None,
                    normalizer_fn=None, normalizer_params=None,
                    weights_initializer=ly.xavier_initializer(),
                    weight_decay_rate=1e-6,
                    biases_initializer=init_ops.zeros_initializer(),
                    biases_regularizer=None,
                    reuse=None, scope=None):
    # TODO move regularizer definitions to model
    weights_regularizer = ly.l2_regularizer(weight_decay_rate)

    input_dim = inputs.get_shape().as_list()[1]

    with tf.variable_scope(scope, 'fully_connected', [inputs], reuse=reuse) as sc:
        inputs = tf.convert_to_tensor(inputs)

        weights = tf.get_variable(name="weights", shape=(input_dim, num_outputs),
                                  initializer=weights_initializer, regularizer=weights_regularizer,
                                  trainable=True, dtype=inputs.dtype.base_dtype)

        # Spectral Normalization
        if sn:
            weights = spectral_normed_weight(weights, num_iters=1, update_collection=Config.SPECTRAL_NORM_UPDATE_OPS)

        linear_out = tf.matmul(inputs, weights)

        if biases_initializer is not None:
            biases = tf.get_variable(name="biases", shape=(num_outputs,),
                                     initializer=biases_initializer, regularizer=biases_regularizer,
                                     trainable=True, dtype=inputs.dtype.base_dtype)

        linear_out = tf.nn.bias_add(linear_out, biases)

        # Apply normalizer function / layer.
        if normalizer_fn is not None:
            normalizer_params = normalizer_params or {}
            linear_out = normalizer_fn(linear_out, activation_fn=None, **normalizer_params)

        if activation_fn is not None:
            linear_out = activation_fn(linear_out)

    return linear_out 
Example #15
Source File: feature_column.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _wide_embedding_lookup_arguments(self, input_tensor):
    return _LinearEmbeddingLookupArguments(
        input_tensor=self.to_sparse_tensor(input_tensor),
        weight_tensor=None,
        vocab_size=self.length * self.source_column.dimension,
        initializer=init_ops.zeros_initializer(),
        combiner="sum") 
Example #16
Source File: feature_column.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _wide_embedding_lookup_arguments(self, input_tensor):
    return _LinearEmbeddingLookupArguments(
        input_tensor=self.id_tensor(input_tensor),
        weight_tensor=self.weight_tensor(input_tensor),
        vocab_size=self.length,
        initializer=init_ops.zeros_initializer,
        combiner=self.sparse_id_column.combiner) 
Example #17
Source File: tpu_estimator.py    From embedding-as-service with MIT License 5 votes vote down vote up
def _create_global_step(graph):
  graph = graph or ops.get_default_graph()
  if training.get_global_step(graph) is not None:
    raise ValueError('"global_step" already exists.')
  # Create in proper graph and base name_scope.
  with graph.as_default() as g, g.name_scope(None):
    return variable_scope.get_variable(
        ops.GraphKeys.GLOBAL_STEP,
        shape=[],
        dtype=dtypes.int64,
        initializer=init_ops.zeros_initializer(),
        trainable=False,
        use_resource=True,
        collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]) 
Example #18
Source File: feature_column.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _wide_embedding_lookup_arguments(self, input_tensor):
    return _LinearEmbeddingLookupArguments(
        input_tensor=input_tensor,
        weight_tensor=None,
        vocab_size=self.length,
        initializer=init_ops.zeros_initializer,
        combiner=self.combiner) 
Example #19
Source File: feature_column.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _wide_embedding_lookup_arguments(self, input_tensor):
    return _LinearEmbeddingLookupArguments(
        input_tensor=self.to_sparse_tensor(input_tensor),
        weight_tensor=None,
        vocab_size=self.length * self.source_column.dimension,
        initializer=init_ops.zeros_initializer,
        combiner="sum") 
Example #20
Source File: tpu_estimator.py    From transformer-xl with Apache License 2.0 5 votes vote down vote up
def _create_global_step(graph):
  graph = graph or ops.get_default_graph()
  if training.get_global_step(graph) is not None:
    raise ValueError('"global_step" already exists.')
  # Create in proper graph and base name_scope.
  with graph.as_default() as g, g.name_scope(None):
    return variable_scope.get_variable(
        ops.GraphKeys.GLOBAL_STEP,
        shape=[],
        dtype=dtypes.int64,
        initializer=init_ops.zeros_initializer(),
        trainable=False,
        use_resource=True,
        collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]) 
Example #21
Source File: layers.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def convolution1d(inputs,
                  num_outputs,
                  kernel_size,
                  stride=1,
                  padding='SAME',
                  data_format=None,
                  rate=1,
                  activation_fn=nn.relu,
                  normalizer_fn=None,
                  normalizer_params=None,
                  weights_initializer=initializers.xavier_initializer(),
                  weights_regularizer=None,
                  biases_initializer=init_ops.zeros_initializer(),
                  biases_regularizer=None,
                  reuse=None,
                  variables_collections=None,
                  outputs_collections=None,
                  trainable=True,
                  scope=None):
  return convolution(
      inputs,
      num_outputs,
      kernel_size,
      stride,
      padding,
      data_format,
      rate,
      activation_fn,
      normalizer_fn,
      normalizer_params,
      weights_initializer,
      weights_regularizer,
      biases_initializer,
      biases_regularizer,
      reuse,
      variables_collections,
      outputs_collections,
      trainable,
      scope,
      conv_dims=1) 
Example #22
Source File: histogram_ops.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
  """Accumulate histograms in new variables."""
  with variable_scope.variable_scope(
      None, 'hist_accumulate', [hist_true, hist_false]):
    # Holds running total histogram of scores for records labeled True.
    hist_true_acc = variable_scope.get_variable(
        'hist_true_acc',
        shape=[nbins],
        dtype=hist_true.dtype,
        initializer=init_ops.zeros_initializer(),
        collections=collections,
        trainable=False)
    # Holds running total histogram of scores for records labeled False.
    hist_false_acc = variable_scope.get_variable(
        'hist_false_acc',
        shape=[nbins],
        dtype=hist_true.dtype,
        initializer=init_ops.zeros_initializer(),
        collections=collections,
        trainable=False)

    update_op = control_flow_ops.group(
        hist_true_acc.assign_add(hist_true),
        hist_false_acc.assign_add(hist_false),
        name='update_op')

    return hist_true_acc, hist_false_acc, update_op 
Example #23
Source File: tpu_estimator.py    From Chinese-XLNet with Apache License 2.0 5 votes vote down vote up
def _create_global_step(graph):
  graph = graph or ops.get_default_graph()
  if training.get_global_step(graph) is not None:
    raise ValueError('"global_step" already exists.')
  # Create in proper graph and base name_scope.
  with graph.as_default() as g, g.name_scope(None):
    return variable_scope.get_variable(
        ops.GraphKeys.GLOBAL_STEP,
        shape=[],
        dtype=dtypes.int64,
        initializer=init_ops.zeros_initializer(),
        trainable=False,
        use_resource=True,
        collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]) 
Example #24
Source File: layers.py    From STGAN with MIT License 5 votes vote down vote up
def flatten_fully_connected(inputs,
                            num_outputs,
                            activation_fn=tf.nn.relu,
                            normalizer_fn=None,
                            normalizer_params=None,
                            weights_initializer=slim.xavier_initializer(),
                            weights_regularizer=None,
                            biases_initializer=tf.zeros_initializer(),
                            biases_regularizer=None,
                            reuse=None,
                            variables_collections=None,
                            outputs_collections=None,
                            trainable=True,
                            scope=None):
    with tf.variable_scope(scope, 'flatten_fully_connected', [inputs]):
        if inputs.shape.ndims > 2:
            inputs = slim.flatten(inputs)
        return slim.fully_connected(inputs,
                                    num_outputs,
                                    activation_fn,
                                    normalizer_fn,
                                    normalizer_params,
                                    weights_initializer,
                                    weights_regularizer,
                                    biases_initializer,
                                    biases_regularizer,
                                    reuse,
                                    variables_collections,
                                    outputs_collections,
                                    trainable,
                                    scope) 
Example #25
Source File: head.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _centered_bias(logits_dimension, head_name=None):
  """Returns `logits`, optionally with centered bias applied.

  Args:
    logits_dimension: Last dimension of `logits`. Must be >= 1.
    head_name: Optional name of the head.

  Returns:
    Centered bias `Variable`.

  Raises:
    ValueError: if `logits_dimension` is invalid.
  """
  if (logits_dimension is None) or (logits_dimension < 1):
    raise ValueError("Invalid logits_dimension %s." % logits_dimension)
  centered_bias = variable_scope.get_variable(
      name="centered_bias_weight",
      shape=(logits_dimension,),
      initializer=init_ops.zeros_initializer(),
      trainable=True)
  for dim in range(logits_dimension):
    if head_name:
      summary.scalar("centered_bias/bias_%d/%s" % (dim, head_name),
                     centered_bias[dim])
    else:
      summary.scalar("centered_bias/bias_%d" % dim, centered_bias[dim])
  return centered_bias 
Example #26
Source File: layers.py    From STGAN with MIT License 5 votes vote down vote up
def flatten_fully_connected_v1(inputs,
                               num_outputs,
                               activation_fn=tf.nn.relu,
                               normalizer_fn=None,
                               normalizer_params=None,
                               weights_initializer=slim.xavier_initializer(),
                               weights_regularizer=None,
                               biases_initializer=tf.zeros_initializer(),
                               biases_regularizer=None,
                               reuse=None,
                               variables_collections=None,
                               outputs_collections=None,
                               trainable=True,
                               scope=None):
    with tf.variable_scope(scope, 'flatten_fully_connected_v1'):
        if inputs.shape.ndims > 2:
            inputs = slim.flatten(inputs)
        return slim.fully_connected(inputs,
                                    num_outputs,
                                    activation_fn,
                                    normalizer_fn,
                                    normalizer_params,
                                    weights_initializer,
                                    weights_regularizer,
                                    biases_initializer,
                                    biases_regularizer,
                                    reuse,
                                    variables_collections,
                                    outputs_collections,
                                    trainable,
                                    scope) 
Example #27
Source File: layers.py    From STGAN with MIT License 5 votes vote down vote up
def flatten_fully_connected_v2(inputs,
                               num_outputs,
                               activation_fn=nn.relu,
                               normalizer_fn=None,
                               normalizer_params=None,
                               weights_normalizer_fn=None,
                               weights_normalizer_params=None,
                               weights_initializer=initializers.xavier_initializer(),
                               weights_regularizer=None,
                               biases_initializer=init_ops.zeros_initializer(),
                               biases_regularizer=None,
                               reuse=None,
                               variables_collections=None,
                               outputs_collections=None,
                               trainable=True,
                               scope=None):
    with variable_scope.variable_scope(scope, 'flatten_fully_connected_v2'):
        if inputs.shape.ndims > 2:
            inputs = layers.flatten(inputs)
        return fully_connected(inputs=inputs,
                               num_outputs=num_outputs,
                               activation_fn=activation_fn,
                               normalizer_fn=normalizer_fn,
                               normalizer_params=normalizer_params,
                               weights_normalizer_fn=weights_normalizer_fn,
                               weights_normalizer_params=weights_normalizer_params,
                               weights_initializer=weights_initializer,
                               weights_regularizer=weights_regularizer,
                               biases_initializer=biases_initializer,
                               biases_regularizer=biases_regularizer,
                               reuse=reuse,
                               variables_collections=variables_collections,
                               outputs_collections=outputs_collections,
                               trainable=trainable,
                               scope=scope) 
Example #28
Source File: variables.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def create_global_step(graph=None):
  """Create global step tensor in graph.

  Args:
    graph: The graph in which to create the global step. If missing, use default
        graph.

  Returns:
    Global step tensor.

  Raises:
    ValueError: if global step key is already defined.
  """
  graph = ops.get_default_graph() if graph is None else graph
  if get_global_step(graph) is not None:
    raise ValueError('"global_step" already exists.')
  # Create in proper graph and base name_scope.
  with graph.as_default() as g, g.name_scope(None):
    collections = [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]
    return variable(
        ops.GraphKeys.GLOBAL_STEP,
        shape=[],
        dtype=dtypes.int64,
        initializer=init_ops.zeros_initializer(),
        trainable=False,
        collections=collections) 
Example #29
Source File: feature_column.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _wide_embedding_lookup_arguments(self, input_tensor):
    return _LinearEmbeddingLookupArguments(
        input_tensor=self.id_tensor(input_tensor),
        weight_tensor=self.weight_tensor(input_tensor),
        vocab_size=self.length,
        initializer=init_ops.zeros_initializer(),
        combiner=self.combiner) 
Example #30
Source File: feature_column.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _wide_embedding_lookup_arguments(self, input_tensor):
    return _LinearEmbeddingLookupArguments(
        input_tensor=input_tensor,
        weight_tensor=None,
        vocab_size=self.length,
        initializer=init_ops.zeros_initializer(),
        combiner=self.combiner)