Python tensorflow.python.ops.array_ops.stack() Examples

The following are 30 code examples of tensorflow.python.ops.array_ops.stack(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.array_ops , or try the search function .
Example #1
Source File: array_grad.py    From lambda-packs with MIT License 6 votes vote down vote up
def _TileGrad(op, grad):
  """Sum reduces grad along the tiled dimensions."""
  assert isinstance(grad, ops.Tensor)
  input_shape = array_ops.shape(op.inputs[0])
  # We interleave multiples and input_shape to get split_shape,
  # reshape grad to split_shape, and reduce along all even
  # dimensions (the tiled dimensions) to get the result
  # with shape input_shape.  For example
  #   input_shape = [20, 30, 40]
  #   multiples = [2, 3, 4]
  #   split_shape = [2, 20, 3, 30, 4, 40]
  #   axes = [0, 2, 4]
  split_shape = array_ops.reshape(
      array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1])
  axes = math_ops.range(0, array_ops.size(split_shape), 2)
  input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes)
  # Fix shape inference
  input_grad.set_shape(op.inputs[0].get_shape())
  return [input_grad, None] 
Example #2
Source File: tfexample_decoder.py    From lambda-packs with MIT License 6 votes vote down vote up
def tensors_to_item(self, keys_to_tensors):
    tensor = keys_to_tensors[self._tensor_key]
    shape = self._shape
    if self._shape_keys:
      shape_dims = []
      for k in self._shape_keys:
        shape_dim = keys_to_tensors[k]
        if isinstance(shape_dim, sparse_tensor.SparseTensor):
          shape_dim = sparse_ops.sparse_tensor_to_dense(shape_dim)
        shape_dims.append(shape_dim)
      shape = array_ops.reshape(array_ops.stack(shape_dims), [-1])
    if isinstance(tensor, sparse_tensor.SparseTensor):
      if shape is not None:
        tensor = sparse_ops.sparse_reshape(tensor, shape)
      tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
    else:
      if shape is not None:
        tensor = array_ops.reshape(tensor, shape)
    return tensor 
Example #3
Source File: tfexample_decoder.py    From lambda-packs with MIT License 6 votes vote down vote up
def tensors_to_item(self, keys_to_tensors):
    indices = keys_to_tensors[self._indices_key]
    values = keys_to_tensors[self._values_key]
    if self._shape_key:
      shape = keys_to_tensors[self._shape_key]
      if isinstance(shape, sparse_tensor.SparseTensor):
        shape = sparse_ops.sparse_tensor_to_dense(shape)
    elif self._shape:
      shape = self._shape
    else:
      shape = indices.dense_shape
    indices_shape = array_ops.shape(indices.indices)
    rank = indices_shape[1]
    ids = math_ops.to_int64(indices.values)
    indices_columns_to_preserve = array_ops.slice(
        indices.indices, [0, 0], array_ops.stack([-1, rank - 1]))
    new_indices = array_ops.concat(
        [indices_columns_to_preserve, array_ops.reshape(ids, [-1, 1])], 1)

    tensor = sparse_tensor.SparseTensor(new_indices, values.values, shape)
    if self._densify:
      tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
    return tensor 
Example #4
Source File: array_grad.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _SliceGrad(op, grad):
  """Gradient for Slice op."""
  # Create an Nx2 padding where the first column represents how many
  # zeros are to be prepended for each dimension, and the second
  # column indicates how many zeros are appended.
  #
  # The number of zeros to append is the shape of the input
  # elementwise-subtracted by both the begin vector and sizes vector.
  #
  # Some more reshaping is needed to assemble this tensor with the
  # right dimensions.
  input_vec = op.inputs[0]
  begin_vec = op.inputs[1]
  input_rank = array_ops.rank(input_vec)
  slice_size = array_ops.shape(op.outputs[0])

  shape = array_ops.stack([input_rank, 1])
  before_pad = array_ops.reshape(begin_vec, shape)
  after_pad = array_ops.reshape(
      array_ops.shape(input_vec) - slice_size - begin_vec, shape)
  paddings = array_ops.concat([before_pad, after_pad], 1)
  return array_ops.pad(grad, paddings), None, None 
Example #5
Source File: array_grad.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _TileGrad(op, grad):
  """Sum reduces grad along the tiled dimensions."""
  assert isinstance(grad, ops.Tensor)
  input_shape = array_ops.shape(op.inputs[0])
  # We interleave multiples and input_shape to get split_shape,
  # reshape grad to split_shape, and reduce along all even
  # dimensions (the tiled dimensions) to get the result
  # with shape input_shape.  For example
  #   input_shape = [20, 30, 40]
  #   multiples = [2, 3, 4]
  #   split_shape = [2, 20, 3, 30, 4, 40]
  #   axes = [0, 2, 4]
  split_shape = array_ops.reshape(
      array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1])
  axes = math_ops.range(0, array_ops.size(split_shape), 2)
  input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes)
  # Fix shape inference
  input_grad.set_shape(op.inputs[0].get_shape())
  return [input_grad, None] 
Example #6
Source File: array_grad.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _PadGrad(op, grad):
  """Gradient for Pad."""
  # Pad introduces values around the original tensor, so the gradient function
  # slices the original shape out of the gradient."""
  x = op.inputs[0]
  a = op.inputs[1]  # [Rank(x), 2]
  # Takes a slice of a. The 1st column. [Rank(x), 1].
  pad_before = array_ops.slice(a, [0, 0],
                               array_ops.stack([array_ops.rank(x), 1]))
  # Make it a 1-D tensor.
  begin = array_ops.reshape(pad_before, [-1])
  sizes = array_ops.shape(x)
  return array_ops.slice(grad, begin, sizes), None


# ReverseSequence is just a permutation.  The gradient permutes back. 
Example #7
Source File: backend.py    From lambda-packs with MIT License 6 votes vote down vote up
def repeat(x, n):
  """Repeats a 2D tensor.

  if `x` has shape (samples, dim) and `n` is `2`,
  the output will have shape `(samples, 2, dim)`.

  Arguments:
      x: Tensor or variable.
      n: Python integer, number of times to repeat.

  Returns:
      A tensor.
  """
  assert ndim(x) == 2
  x = array_ops.expand_dims(x, 1)
  pattern = array_ops.stack([1, n, 1])
  return array_ops.tile(x, pattern) 
Example #8
Source File: array_grad.py    From lambda-packs with MIT License 6 votes vote down vote up
def _PadGrad(op, grad):
  """Gradient for Pad."""
  # Pad introduces values around the original tensor, so the gradient function
  # slices the original shape out of the gradient."""
  x = op.inputs[0]
  a = op.inputs[1]  # [Rank(x), 2]
  # Takes a slice of a. The 1st column. [Rank(x), 1].
  pad_before = array_ops.slice(a, [0, 0],
                               array_ops.stack([array_ops.rank(x), 1]))
  # Make it a 1-D tensor.
  begin = array_ops.reshape(pad_before, [-1])
  sizes = array_ops.shape(x)
  return array_ops.slice(grad, begin, sizes), None


# ReverseSequence is just a permutation.  The gradient permutes back. 
Example #9
Source File: array_grad.py    From lambda-packs with MIT License 6 votes vote down vote up
def _SliceGrad(op, grad):
  """Gradient for Slice op."""
  # Create an Nx2 padding where the first column represents how many
  # zeros are to be prepended for each dimension, and the second
  # column indicates how many zeros are appended.
  #
  # The number of zeros to append is the shape of the input
  # elementwise-subtracted by both the begin vector and sizes vector.
  #
  # Some more reshaping is needed to assemble this tensor with the
  # right dimensions.
  input_vec = op.inputs[0]
  begin_vec = op.inputs[1]
  input_rank = array_ops.rank(input_vec)
  slice_size = array_ops.shape(op.outputs[0])

  shape = array_ops.stack([input_rank, 1])
  before_pad = array_ops.reshape(begin_vec, shape)
  after_pad = array_ops.reshape(
      array_ops.shape(input_vec) - slice_size - begin_vec, shape)
  paddings = array_ops.concat([before_pad, after_pad], 1)
  return array_ops.pad(grad, paddings), None, None 
Example #10
Source File: spectral_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _infer_fft_length_for_irfft(input_tensor, fft_rank):
  """Infers the `fft_length` argument for a `rank` IRFFT from `input_tensor`."""
  # A TensorShape for the inner fft_rank dimensions.
  fft_shape = input_tensor.get_shape()[-fft_rank:]

  # If any dim is unknown, fall back to tensor-based math.
  if not fft_shape.is_fully_defined():
    fft_length = _array_ops.unstack(_array_ops.shape(input_tensor)[-fft_rank:])
    fft_length[-1] = _math_ops.maximum(0, 2 * (fft_length[-1] - 1))
    return _array_ops.stack(fft_length)

  # Otherwise, return a constant.
  fft_length = fft_shape.as_list()
  if fft_length:
    fft_length[-1] = max(0, 2 * (fft_length[-1] - 1))
  return _ops.convert_to_tensor(fft_length, _dtypes.int32) 
Example #11
Source File: core.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def call(self, inputs):
    shape = inputs.get_shape().as_list()
    input_dim = shape[-1]
    output_shape = shape[:-1] + [self.units]
    if len(output_shape) > 2:
      # Reshape the input to 2D.
      output_shape_tensors = array_ops.unstack(array_ops.shape(inputs))
      output_shape_tensors[-1] = self.units
      output_shape_tensor = array_ops.stack(output_shape_tensors)
      inputs = array_ops.reshape(inputs, [-1, input_dim])

    outputs = standard_ops.matmul(inputs, self.kernel)
    if self.use_bias:
      outputs = nn.bias_add(outputs, self.bias)

    if len(output_shape) > 2:
      # Reshape the output back to the original ndim of the input.
      outputs = array_ops.reshape(outputs, output_shape_tensor)
      outputs.set_shape(output_shape)

    if self.activation is not None:
      return self.activation(outputs)  # pylint: disable=not-callable
    return outputs 
Example #12
Source File: tfexample_decoder.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def tensors_to_item(self, keys_to_tensors):
    tensor = keys_to_tensors[self._tensor_key]
    shape = self._shape
    if self._shape_keys:
      shape_dims = []
      for k in self._shape_keys:
        shape_dim = keys_to_tensors[k]
        if isinstance(shape_dim, sparse_tensor.SparseTensor):
          shape_dim = sparse_ops.sparse_tensor_to_dense(shape_dim)
        shape_dims.append(shape_dim)
      shape = array_ops.reshape(array_ops.stack(shape_dims), [-1])
    if isinstance(tensor, sparse_tensor.SparseTensor):
      if shape is not None:
        tensor = sparse_ops.sparse_reshape(tensor, shape)
      tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
    else:
      if shape is not None:
        tensor = array_ops.reshape(tensor, shape)
    return tensor 
Example #13
Source File: tfexample_decoder.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def tensors_to_item(self, keys_to_tensors):
    indices = keys_to_tensors[self._indices_key]
    values = keys_to_tensors[self._values_key]
    if self._shape_key:
      shape = keys_to_tensors[self._shape_key]
      if isinstance(shape, sparse_tensor.SparseTensor):
        shape = sparse_ops.sparse_tensor_to_dense(shape)
    elif self._shape:
      shape = self._shape
    else:
      shape = indices.dense_shape
    indices_shape = array_ops.shape(indices.indices)
    rank = indices_shape[1]
    ids = math_ops.to_int64(indices.values)
    indices_columns_to_preserve = array_ops.slice(
        indices.indices, [0, 0], array_ops.stack([-1, rank - 1]))
    new_indices = array_ops.concat(
        [indices_columns_to_preserve, array_ops.reshape(ids, [-1, 1])], 1)

    tensor = sparse_tensor.SparseTensor(new_indices, values.values, shape)
    if self._densify:
      tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
    return tensor 
Example #14
Source File: control_flow_ops.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def grad_sync(self):
    """A control trigger node for synchronization in the grad loop.

    One main use is to keep the pop ops of a stack executed in the
    iteration order.
    """
    if self._grad_sync is None:
      with ops.control_dependencies(None):
        self._grad_sync = control_trigger(name="b_sync")
      self._grad_sync._set_control_flow_context(self._grad_context)
      self._grad_index.op._add_control_input(self._grad_sync)
    return self._grad_sync 
Example #15
Source File: nn_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _TopKGrad(op, grad, _):
  """Return the gradients for TopK.

  Args:
    op: The TopKOp for which we need to generate gradients.
    grad: Tensor. The gradients passed to the TopKOp.

  Returns:
    A list of two tensors, the first being the gradient w.r.t to the input and
    TopK, and the second being the gradient w.r.t. to the indices (all zero).
  """
  in_shape = array_ops.shape(op.inputs[0])
  ind_shape = array_ops.shape(op.outputs[1])

  ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
  # Flatten indices to 2D.
  ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack([-1, ind_lastdim]))

  in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
  outerdim = array_ops.shape(ind_2d)[0]
  # Compute linear indices (flattened to 1D).
  ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
      math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])

  # Substitute grad to appropriate locations and fill the rest with zeros,
  # finally reshaping it to the original input shape.
  return [array_ops.reshape(
      sparse_ops.sparse_to_dense(ind,
                                 array_ops.reshape(
                                     math_ops.reduce_prod(in_shape), [1]),
                                 array_ops.reshape(grad, [-1]),
                                 validate_indices=False),
      in_shape), array_ops.zeros(
          [], dtype=dtypes.int32)] 
Example #16
Source File: gmm_ops.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _define_distance_to_clusters(self, data):
    """Defines the Mahalanobis distance to the assigned Gaussian."""
    # TODO(xavigonzalvo): reuse (input - mean) * cov^-1 * (input -
    # mean) from log probability function.
    self._all_scores = []
    for shard in data:
      all_scores = []
      shard = array_ops.expand_dims(shard, 0)
      for c in xrange(self._num_classes):
        if self._covariance_type == FULL_COVARIANCE:
          cov = self._covs[c, :, :]
        elif self._covariance_type == DIAG_COVARIANCE:
          cov = array_ops.diag(self._covs[c, :])
        inverse = linalg_ops.matrix_inverse(cov + self._min_var)
        inv_cov = array_ops.tile(
            array_ops.expand_dims(inverse, 0),
            array_ops.stack([self._num_examples, 1, 1]))
        diff = array_ops.transpose(shard - self._means[c, :, :], perm=[1, 0, 2])
        m_left = math_ops.matmul(diff, inv_cov)
        all_scores.append(
            math_ops.sqrt(
                math_ops.matmul(
                    m_left, array_ops.transpose(
                        diff, perm=[0, 2, 1]))))
      self._all_scores.append(
          array_ops.reshape(
              array_ops.concat(all_scores, 1),
              array_ops.stack([self._num_examples, self._num_classes])))

    # Distance to the associated class.
    self._all_scores = array_ops.concat(self._all_scores, 0)
    assignments = array_ops.concat(self.assignments(), 0)
    rows = math_ops.to_int64(math_ops.range(0, self._num_examples))
    indices = array_ops.concat(
        [array_ops.expand_dims(rows, 1), array_ops.expand_dims(assignments, 1)],
        1)
    self._scores = array_ops.gather_nd(self._all_scores, indices) 
Example #17
Source File: array_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _UnpackGrad(op, *grads):
  """Gradient for unpack op."""
  return array_ops.stack(grads, axis=op.get_attr("axis")) 
Example #18
Source File: control_flow_ops.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def forward_sync(self):
    """A control trigger node for synchronization in the forward loop.

    One main use is to keep the push ops of a stack executed in the
    iteration order.
    """
    if self._forward_sync is None:
      with ops.control_dependencies(None):
        self._forward_sync = control_trigger(name="f_sync")
      self._forward_sync._set_control_flow_context(self._forward_context)
      self._forward_index.op._add_control_input(self._forward_sync)
    return self._forward_sync 
Example #19
Source File: fused_rnn_cell.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def __call__(self,
               inputs,
               initial_state=None,
               dtype=None,
               sequence_length=None,
               scope=None):
    is_list = isinstance(inputs, list)
    if self._use_dynamic_rnn:
      if is_list:
        inputs = array_ops.stack(inputs)
      outputs, state = rnn.dynamic_rnn(
          self._cell,
          inputs,
          sequence_length=sequence_length,
          initial_state=initial_state,
          dtype=dtype,
          time_major=True,
          scope=scope)
      if is_list:
        # Convert outputs back to list
        outputs = array_ops.unstack(outputs)
    else:  # non-dynamic rnn
      if not is_list:
        inputs = array_ops.unstack(inputs)
      outputs, state = contrib_rnn.static_rnn(self._cell,
                                              inputs,
                                              initial_state=initial_state,
                                              dtype=dtype,
                                              sequence_length=sequence_length,
                                              scope=scope)
      if not is_list:
        # Convert outputs back to tensor
        outputs = array_ops.stack(outputs)

    return outputs, state 
Example #20
Source File: ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
  """Pack tensors along a new axis.

  See tf.pack.

  Args:
    labeled_tensors: The input tensors, which must have identical axes.
    new_axis: The name of the new axis, or a tuple containing the name
      and coordinate labels.
    axis_position: Optional integer position at which to insert the new axis.
    name: Optional op name.

  Returns:
    The packed tensors as a single LabeledTensor, with `new_axis` in the given
    `axis_position`.

  Raises:
    ValueError: If fewer than one input tensors is provided, or if the tensors
      don't have identical axes.
  """
  with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
    labeled_tensors = [
        core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
    ]

    if len(labeled_tensors) < 1:
      raise ValueError('pack expects at least 1 tensors, but received %s' %
                       labeled_tensors)

    axes_0 = labeled_tensors[0].axes
    for t in labeled_tensors:
      if t.axes != axes_0:
        raise ValueError('Non-identical axes. Expected %s but got %s' %
                         (axes_0, t.axes))

    pack_op = array_ops.stack(
        [t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
    axes = list(axes_0.values())
    axes.insert(axis_position, new_axis)
    return core.LabeledTensor(pack_op, axes) 
Example #21
Source File: gmm_ops.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _define_expectation_operation(self, shard_id):
    # Shape broadcasting.
    probs = array_ops.expand_dims(self._probs[shard_id], 0)
    # Membership weights are computed as:
    # w_{ik} = \frac{\alpha_k f(\mathbf{y_i}|\mathbf{\theta}_k)}
    #               {\sum_{m=1}^{K}\alpha_mf(\mathbf{y_i}|\mathbf{\theta}_m)}
    # where "i" is the i-th example, "k" is the k-th mixture, theta are
    # the model parameters and y_i the observations.
    # These are defined for each shard.
    self._w[shard_id] = array_ops.reshape(
        math_ops.exp(probs - self._prior_probs[shard_id]),
        array_ops.stack([self._num_examples, self._num_classes])) 
Example #22
Source File: variables.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def report_uninitialized_variables(var_list=None,
                                   name="report_uninitialized_variables"):
  """Adds ops to list the names of uninitialized variables.

  When run, it returns a 1-D tensor containing the names of uninitialized
  variables if there are any, or an empty array if there are none.

  Args:
    var_list: List of `Variable` objects to check. Defaults to the
      value of `global_variables() + local_variables()`
    name: Optional name of the `Operation`.

  Returns:
    A 1-D tensor containing names of the uninitialized variables, or an empty
    1-D tensor if there are no variables or no uninitialized variables.
  """
  if var_list is None:
    var_list = global_variables() + local_variables()
    # Backwards compatibility for old-style variables. TODO(touts): remove.
    if not var_list:
      var_list = []
      for op in ops.get_default_graph().get_operations():
        if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
          var_list.append(op.outputs[0])
  with ops.name_scope(name):
    if not var_list:
      # Return an empty tensor so we only need to check for returned tensor
      # size being 0 as an indication of model ready.
      return array_ops.constant([], dtype=dtypes.string)
    else:
      # Get a 1-D boolean tensor listing whether each variable is initialized.
      variables_mask = math_ops.logical_not(
          array_ops.stack(
              [state_ops.is_variable_initialized(v) for v in var_list]))
      # Get a 1-D string tensor containing all the variable names.
      variable_names_tensor = array_ops.constant([s.op.name for s in var_list])
      # Return a 1-D tensor containing all the names of uninitialized variables.
      return array_ops.boolean_mask(variable_names_tensor, variables_mask)

# pylint: disable=protected-access 
Example #23
Source File: mixture.py    From lambda-packs with MIT License 5 votes vote down vote up
def _log_cdf(self, x):
    with ops.control_dependencies(self._assertions):
      x = ops.convert_to_tensor(x, name="x")
      distribution_log_cdfs = [d.log_cdf(x) for d in self.components]
      cat_log_probs = self._cat_probs(log_probs=True)
      final_log_cdfs = [
          cat_lp + d_lcdf
          for (cat_lp, d_lcdf) in zip(cat_log_probs, distribution_log_cdfs)
      ]
      concatted_log_cdfs = array_ops.stack(final_log_cdfs, axis=0)
      mixture_log_cdf = math_ops.reduce_logsumexp(concatted_log_cdfs, [0])
      return mixture_log_cdf 
Example #24
Source File: feature_column.py    From lambda-packs with MIT License 5 votes vote down vote up
def to_sparse_tensor(self, input_tensor):
    """Creates a SparseTensor from the bucketized Tensor."""
    dimension = self.source_column.dimension
    batch_size = array_ops.shape(input_tensor, name="shape")[0]

    if dimension > 1:
      i1 = array_ops.reshape(
          array_ops.tile(
              array_ops.expand_dims(
                  math_ops.range(0, batch_size), 1, name="expand_dims"),
              [1, dimension],
              name="tile"), [-1],
          name="reshape")
      i2 = array_ops.tile(
          math_ops.range(0, dimension), [batch_size], name="tile")
      # Flatten the bucket indices and unique them across dimensions
      # E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
      bucket_indices = array_ops.reshape(
          input_tensor, [-1], name="reshape") + self.length * i2
    else:
      # Simpler indices when dimension=1
      i1 = math_ops.range(0, batch_size)
      i2 = array_ops.zeros([batch_size], dtype=dtypes.int32, name="zeros")
      bucket_indices = array_ops.reshape(input_tensor, [-1], name="reshape")

    indices = math_ops.to_int64(array_ops.transpose(array_ops.stack((i1, i2))))
    shape = math_ops.to_int64(array_ops.stack([batch_size, dimension]))
    sparse_id_values = sparse_tensor_py.SparseTensor(
        indices, bucket_indices, shape)

    return sparse_id_values 
Example #25
Source File: seq2seq_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def seq2seq_inputs(x, y, input_length, output_length, sentinel=None, name=None):
  """Processes inputs for Sequence to Sequence models.

  Args:
    x: Input Tensor [batch_size, input_length, embed_dim].
    y: Output Tensor [batch_size, output_length, embed_dim].
    input_length: length of input x.
    output_length: length of output y.
    sentinel: optional first input to decoder and final output expected.
      If sentinel is not provided, zeros are used. Due to fact that y is not
      available in sampling time, shape of sentinel will be inferred from x.
    name: Operation name.

  Returns:
    Encoder input from x, and decoder inputs and outputs from y.
  """
  with ops.name_scope(name, "seq2seq_inputs", [x, y]):
    in_x = array_ops.unstack(x, axis=1)
    y = array_ops.unstack(y, axis=1)
    if not sentinel:
      # Set to zeros of shape of y[0], using x for batch size.
      sentinel_shape = array_ops.stack(
          [array_ops.shape(x)[0], y[0].get_shape()[1]])
      sentinel = array_ops.zeros(sentinel_shape)
      sentinel.set_shape(y[0].get_shape())
    in_y = [sentinel] + y
    out_y = y + [sentinel]
    return in_x, in_y, out_y 
Example #26
Source File: seq2seq_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def sequence_classifier(decoding, labels, sampling_decoding=None, name=None):
  """Returns predictions and loss for sequence of predictions.

  Args:
    decoding: List of Tensors with predictions.
    labels: List of Tensors with labels.
    sampling_decoding: Optional, List of Tensor with predictions to be used
      in sampling. E.g. they shouldn't have dependncy on outputs.
      If not provided, decoding is used.
    name: Operation name.

  Returns:
    Predictions and losses tensors.
  """
  with ops.name_scope(name, "sequence_classifier", [decoding, labels]):
    predictions, xent_list = [], []
    for i, pred in enumerate(decoding):
      xent_list.append(nn.softmax_cross_entropy_with_logits(
          labels=labels[i], logits=pred,
          name="sequence_loss/xent_raw{0}".format(i)))
      if sampling_decoding:
        predictions.append(nn.softmax(sampling_decoding[i]))
      else:
        predictions.append(nn.softmax(pred))
    xent = math_ops.add_n(xent_list, name="sequence_loss/xent")
    loss = math_ops.reduce_sum(xent, name="sequence_loss")
    return array_ops.stack(predictions, axis=1), loss 
Example #27
Source File: lstm1d.py    From lambda-packs with MIT License 5 votes vote down vote up
def ndlstm_base_unrolled(inputs, noutput, scope=None, reverse=False):
  """Run an LSTM, either forward or backward.

  This is a 1D LSTM implementation using unrolling and the TensorFlow
  LSTM op.

  Args:
    inputs: input sequence (length, batch_size, ninput)
    noutput: depth of output
    scope: optional scope name
    reverse: run LSTM in reverse

  Returns:
    Output sequence (length, batch_size, noutput)

  """
  with variable_scope.variable_scope(scope, "SeqLstmUnrolled", [inputs]):
    length, batch_size, _ = _shape(inputs)
    lstm_cell = rnn_cell.BasicLSTMCell(noutput, state_is_tuple=False)
    state = array_ops.zeros([batch_size, lstm_cell.state_size])
    output_u = []
    inputs_u = array_ops.unstack(inputs)
    if reverse:
      inputs_u = list(reversed(inputs_u))
    for i in xrange(length):
      if i > 0:
        variable_scope.get_variable_scope().reuse_variables()
      output, state = lstm_cell(inputs_u[i], state)
      output_u += [output]
    if reverse:
      output_u = list(reversed(output_u))
    outputs = array_ops.stack(output_u)
    return outputs 
Example #28
Source File: tensor_forest.py    From lambda-packs with MIT License 5 votes vote down vote up
def feature_importances(self):
    tree_counts = [self.trees[i].feature_usage_counts()
                   for i in range(self.params.num_trees)]
    total_counts = math_ops.reduce_sum(array_ops.stack(tree_counts, 0), 0)
    return total_counts / math_ops.reduce_sum(total_counts) 
Example #29
Source File: tensor_forest.py    From lambda-packs with MIT License 5 votes vote down vote up
def average_size(self):
    """Constructs a TF graph for evaluating the average size of a forest.

    Returns:
      The average number of nodes over the trees.
    """
    sizes = []
    for i in range(self.params.num_trees):
      with ops.device(self.variables.device_dummies[i].device):
        sizes.append(self.trees[i].size())
    return math_ops.reduce_mean(math_ops.to_float(array_ops.stack(sizes)))

  # pylint: disable=unused-argument 
Example #30
Source File: tensor_forest.py    From lambda-packs with MIT License 5 votes vote down vote up
def inference_graph(self, input_data, **inference_args):
    """Constructs a TF graph for evaluating a random forest.

    Args:
      input_data: A tensor or dict of string->Tensor for input data.
      **inference_args: Keyword arguments to pass through to each tree.

    Returns:
      The last op in the random forest inference graph.

    Raises:
      NotImplementedError: If trying to use feature bagging with sparse
        features.
    """
    processed_dense_features, processed_sparse_features, data_spec = (
        data_ops.ParseDataTensorOrDict(input_data))

    probabilities = []
    for i in range(self.params.num_trees):
      with ops.device(self.variables.device_dummies[i].device):
        tree_data = processed_dense_features
        if self.params.bagged_features:
          if processed_sparse_features is not None:
            raise NotImplementedError(
                'Feature bagging not supported with sparse features.')
          tree_data = self._bag_features(i, input_data)
        probabilities.append(self.trees[i].inference_graph(
            tree_data,
            data_spec,
            sparse_features=processed_sparse_features,
            **inference_args))
    with ops.device(self.variables.device_dummies[0].device):
      all_predict = array_ops.stack(probabilities)
      return math_ops.div(
          math_ops.reduce_sum(all_predict, 0), self.params.num_trees,
          name='probabilities')