Python tensorflow.TensorShape() Examples

The following are 30 code examples of tensorflow.TensorShape(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: 9_w2v_eager.py    From deep-learning-note with MIT License 6 votes vote down vote up
def main():
    dataset = tf.data.Dataset.from_generator(gen, (tf.int32, tf.int32),
                                             (tf.TensorShape([BATCH_SIZE]),
                                              tf.TensorShape([BATCH_SIZE, 1])))
    optimizer = tf.compat.v1.train.GradientDescentOptimizer(LEARNING_RATE)
    model = Word2Vec(vocab_size=VOCAB_SIZE, embed_size=EMBED_SIZE)
    grad_fn = tfe.implicit_value_and_gradients(model.compute_loss)
    total_loss = 0.0
    num_train_steps = 0
    while num_train_steps < NUM_TRAIN_STEPS:
        for center_words, target_words in tfe.Iterator(dataset):
            if num_train_steps >= NUM_TRAIN_STEPS:
                break
            loss_batch, grads = grad_fn(center_words, target_words)
            total_loss += loss_batch
            optimizer.apply_gradients(grads)
            if (num_train_steps + 1) % SKIP_STEP == 0:
                print('Average loss at step {}: {:5.1f}'.format(
                    num_train_steps, total_loss / SKIP_STEP
                ))
                total_loss = 0.0
            num_train_steps += 1 
Example #2
Source File: special.py    From zhusuan with MIT License 6 votes vote down vote up
def __init__(self,
                 samples,
                 value_shape=None,
                 group_ndims=0,
                 **kwargs):
        self.samples = tf.convert_to_tensor(samples)

        self.explicit_value_shape = tf.TensorShape(value_shape)

        super(Implicit, self).__init__(
            dtype=samples.dtype,
            param_dtype=samples.dtype,
            is_continuous=samples.dtype.is_floating,
            is_reparameterized=False,
            use_path_derivative=False,
            group_ndims=group_ndims,
            **kwargs) 
Example #3
Source File: common_attention.py    From fine-lm with MIT License 6 votes vote down vote up
def reshape_by_blocks(x, x_shape, memory_block_size):
  """Reshapes input by splitting its length over blocks of memory_block_size.

  Args:
    x: a Tensor with shape [batch, heads, length, depth]
    x_shape: tf.TensorShape of x.
    memory_block_size: Integer which divides length.

  Returns:
    Tensor with shape
    [batch, heads, length // memory_block_size, memory_block_size, depth].
  """
  x = tf.reshape(x, [
      x_shape[0], x_shape[1], x_shape[2] // memory_block_size,
      memory_block_size, x_shape[3]
  ])
  return x 
Example #4
Source File: multivariate.py    From zhusuan with MIT License 6 votes vote down vote up
def _sample(self, n_samples):
        if self.n_experiments is None:
            raise ValueError('Cannot sample when `n_experiments` is None')

        if self.logits.get_shape().ndims == 2:
            logits_flat = self.logits
        else:
            logits_flat = tf.reshape(self.logits, [-1, self.n_categories])
        samples_flat = tf.transpose(
            tf.random.categorical(logits_flat, n_samples * self.n_experiments))
        shape = tf.concat([[n_samples, self.n_experiments],
                           self.batch_shape], 0)
        samples = tf.reshape(samples_flat, shape)
        static_n_samples = n_samples if isinstance(n_samples,
                                                   int) else None
        static_n_exps = self.n_experiments \
            if isinstance(self.n_experiments, int) else None
        samples.set_shape(
            tf.TensorShape([static_n_samples, static_n_exps]).
            concatenate(self.get_batch_shape()))
        samples = tf.reduce_sum(
            tf.one_hot(samples, self.n_categories, dtype=self.dtype),
            axis=1)
        return samples 
Example #5
Source File: special.py    From zhusuan with MIT License 6 votes vote down vote up
def __init__(self,
                 dtype,
                 batch_shape=None,
                 value_shape=None,
                 group_ndims=0,
                 is_continuous=None,
                 **kwargs):
        dtype = tf.float32 if dtype is None else tf.as_dtype(dtype).base_dtype

        self.explicit_batch_shape = tf.TensorShape(batch_shape)

        self.explicit_value_shape = tf.TensorShape(value_shape)

        if is_continuous is None:
            is_continuous = dtype.is_floating

        super(Empirical, self).__init__(
            dtype=dtype,
            param_dtype=None,
            is_continuous=is_continuous,
            is_reparameterized=False,
            use_path_derivative=False,
            group_ndims=group_ndims,
            **kwargs) 
Example #6
Source File: tf_nndistance.py    From pointnet-registration-framework with MIT License 6 votes vote down vote up
def nn_distance(xyz1,xyz2):
	'''
Computes the distance of nearest neighbors for a pair of point clouds
input: xyz1: (batch_size,#points_1,3)  the first point cloud
input: xyz2: (batch_size,#points_2,3)  the second point cloud
output: dist1: (batch_size,#point_1)   distance from first to second
output: idx1:  (batch_size,#point_1)   nearest neighbor from first to second
output: dist2: (batch_size,#point_2)   distance from second to first
output: idx2:  (batch_size,#point_2)   nearest neighbor from second to first
	'''
	return nn_distance_module.nn_distance(xyz1,xyz2)
#@tf.RegisterShape('NnDistance')
#def _nn_distance_shape(op):
	#shape1=op.inputs[0].get_shape().with_rank(3)
	#shape2=op.inputs[1].get_shape().with_rank(3)
	#return [tf.TensorShape([shape1.dims[0],shape1.dims[1]]),tf.TensorShape([shape1.dims[0],shape1.dims[1]]),
		#tf.TensorShape([shape2.dims[0],shape2.dims[1]]),tf.TensorShape([shape2.dims[0],shape2.dims[1]])] 
Example #7
Source File: multivariate.py    From zhusuan with MIT License 6 votes vote down vote up
def _sample(self, n_samples):
        if self.logits.get_shape().ndims == 2:
            logits_flat = self.logits
        else:
            logits_flat = tf.reshape(self.logits, [-1, self.n_categories])
        samples_flat = tf.transpose(
            tf.random.categorical(logits_flat, n_samples))
        if self.logits.get_shape().ndims == 2:
            samples = samples_flat
        else:
            shape = tf.concat([[n_samples], self.batch_shape], 0)
            samples = tf.reshape(samples_flat, shape)
            static_n_samples = n_samples if isinstance(n_samples,
                                                       int) else None
            samples.set_shape(
                tf.TensorShape([static_n_samples]).
                concatenate(self.get_batch_shape()))
        samples = tf.one_hot(samples, self.n_categories, dtype=self.dtype)
        return samples 
Example #8
Source File: losses.py    From deepchem with MIT License 6 votes vote down vote up
def _make_shapes_consistent(output, labels):
  """Try to make inputs have the same shape by adding dimensions of size 1."""
  shape1 = output.shape
  shape2 = labels.shape
  len1 = len(shape1)
  len2 = len(shape2)
  if len1 == len2:
    return (output, labels)
  if isinstance(shape1, tf.TensorShape):
    shape1 = tuple(shape1.as_list())
  if isinstance(shape2, tf.TensorShape):
    shape2 = tuple(shape2.as_list())
  if len1 > len2 and all(i == 1 for i in shape1[len2:]):
    for i in range(len1 - len2):
      labels = tf.expand_dims(labels, -1)
    return (output, labels)
  if len2 > len1 and all(i == 1 for i in shape2[len1:]):
    for i in range(len2 - len1):
      output = tf.expand_dims(output, -1)
    return (output, labels)
  raise ValueError("Incompatible shapes for outputs and labels: %s versus %s" %
                   (str(shape1), str(shape2))) 
Example #9
Source File: layers.py    From Counterfactual-StoryRW with MIT License 6 votes vote down vote up
def _compute_concat_output_shape(input_shape, axis):
    """Infers the output shape of concat given the input shape.

    The code is adapted from the ConcatLayer of lasagne
    (https://github.com/Lasagne/Lasagne/blob/master/lasagne/layers/merge.py)

    Args:
        input_shape (list): A list of shapes, each of which is in turn a
            list or TensorShape.
        axis (int): Axis of the concat operation.

    Returns:
        list: Output shape of concat.
    """
    # The size of each axis of the output shape equals the first
    # input size of respective axis that is not `None`
    input_shape = [tf.TensorShape(s).as_list() for s in input_shape]
    output_shape = [next((s for s in sizes if s is not None), None)
                    for sizes in zip(*input_shape)]
    axis_sizes = [s[axis] for s in input_shape]
    concat_axis_size = None if any(s is None for s in axis_sizes) \
            else sum(axis_sizes)
    output_shape[axis] = concat_axis_size
    return output_shape 
Example #10
Source File: multivariate.py    From zhusuan with MIT License 6 votes vote down vote up
def _sample(self, n_samples):
        logits, temperature = self.logits, self.temperature
        if not self.is_reparameterized:
            logits = tf.stop_gradient(logits)
            temperature = tf.stop_gradient(temperature)
        shape = tf.concat([[n_samples], tf.shape(self.logits)], 0)

        uniform = open_interval_standard_uniform(shape, self.dtype)
        # TODO: Add Gumbel distribution
        gumbel = -tf.log(-tf.log(uniform))
        samples = tf.nn.softmax((logits + gumbel) / temperature)

        static_n_samples = n_samples if isinstance(n_samples, int) else None
        samples.set_shape(
            tf.TensorShape([static_n_samples]).concatenate(logits.get_shape()))
        return samples 
Example #11
Source File: multivariate.py    From zhusuan with MIT License 6 votes vote down vote up
def _sample(self, n_samples):
        mean, u_tril, v_tril = self.mean, self.u_tril, self.v_tril
        if not self.is_reparameterized:
            mean = tf.stop_gradient(mean)
            u_tril = tf.stop_gradient(u_tril)
            v_tril = tf.stop_gradient(v_tril)

        def tile(t):
            new_shape = tf.concat([[n_samples], tf.ones_like(tf.shape(t))], 0)
            return tf.tile(tf.expand_dims(t, 0), new_shape)

        batch_u_tril = tile(u_tril)
        batch_v_tril = tile(v_tril)
        noise = tf.random_normal(
            tf.concat([[n_samples], tf.shape(mean)], axis=0), dtype=self.dtype)
        samples = mean + \
            tf.matmul(tf.matmul(batch_u_tril, noise),
                      tf.matrix_transpose(batch_v_tril))
        # Update static shape
        static_n_samples = n_samples if isinstance(n_samples, int) else None
        samples.set_shape(tf.TensorShape([static_n_samples])
                          .concatenate(self.get_batch_shape())
                          .concatenate(self.get_value_shape()))
        return samples 
Example #12
Source File: connectors.py    From Counterfactual-StoryRW with MIT License 6 votes vote down vote up
def _assert_same_size(outputs, output_size):
    """Check if outputs match output_size

    Args:
        outputs: A Tensor or a (nested) tuple of tensors
        output_size: Can be an Integer, a TensorShape, or a (nested) tuple of
            Integers or TensorShape.
    """
    nest.assert_same_structure(outputs, output_size)
    flat_output_size = nest.flatten(output_size)
    flat_output = nest.flatten(outputs)

    for (output, size) in zip(flat_output, flat_output_size):
        if output[0].shape != tf.TensorShape(size):
            raise ValueError(
                "The output size does not match the the required output_size") 
Example #13
Source File: multivariate.py    From zhusuan with MIT License 5 votes vote down vote up
def _get_value_shape(self):
        if isinstance(self.n_categories, int):
            return tf.TensorShape([self.n_categories])
        return tf.TensorShape([None]) 
Example #14
Source File: multivariate.py    From zhusuan with MIT License 5 votes vote down vote up
def _sample(self, n_samples):
        logits, temperature = self.logits, self.temperature
        if not self.is_reparameterized:
            logits = tf.stop_gradient(logits)
            temperature = tf.stop_gradient(temperature)
        shape = tf.concat([[n_samples], tf.shape(self.logits)], 0)

        uniform = open_interval_standard_uniform(shape, self.dtype)
        gumbel = -tf.log(-tf.log(uniform))
        samples = tf.nn.log_softmax((logits + gumbel) / temperature)

        static_n_samples = n_samples if isinstance(n_samples, int) else None
        samples.set_shape(
            tf.TensorShape([static_n_samples]).concatenate(logits.get_shape()))
        return samples 
Example #15
Source File: multivariate.py    From zhusuan with MIT License 5 votes vote down vote up
def _get_value_shape(self):
        if isinstance(self.n_categories, int):
            return tf.TensorShape([self.n_categories])
        return tf.TensorShape([None]) 
Example #16
Source File: multivariate.py    From zhusuan with MIT License 5 votes vote down vote up
def _get_value_shape(self):
        shape_ = tf.TensorShape([
            self._n_row if isinstance(self._n_row, int) else None,
            self._n_col if isinstance(self._n_col, int) else None])
        return shape_ 
Example #17
Source File: multivariate.py    From zhusuan with MIT License 5 votes vote down vote up
def _get_batch_shape(self):
        if self.logits.get_shape():
            return self.logits.get_shape()[:-1]
        return tf.TensorShape(None) 
Example #18
Source File: special.py    From zhusuan with MIT License 5 votes vote down vote up
def _get_batch_shape(self):
        if (not self.samples.get_shape()) or (not self.explicit_value_shape):
            return tf.TensorShape(None)
        else:
            d = self.explicit_value_shape.ndims
            if d == 0:
                return self.samples.get_shape()
            else:
                return self.samples.get_shape()[:-d] 
Example #19
Source File: univariate.py    From zhusuan with MIT License 5 votes vote down vote up
def _sample(self, n_samples):
        mean, std = self.mean, self.std
        if not self.is_reparameterized:
            mean = tf.stop_gradient(mean)
            std = tf.stop_gradient(std)
        shape = tf.concat([[n_samples], self.batch_shape], 0)
        samples = tf.random_normal(shape, dtype=self.dtype) * std + mean
        static_n_samples = n_samples if isinstance(n_samples, int) else None
        samples.set_shape(
            tf.TensorShape([static_n_samples]).concatenate(
                self.get_batch_shape()))
        return samples 
Example #20
Source File: encoder_on_batch.py    From ADEM with MIT License 5 votes vote down vote up
def encoder_on_batch(batch_with_embedding, batch_mask,
                     encoder, scope_name=None):
    check_encoder_format(encoder)
    encoder_func = get_encoder(encoder)
    batch_size = tf.shape(batch_with_embedding, )[0]

    if 'output_size' not in encoder['params']:
        output_dim = encoder['params']['context_level_state_size']
    else:
        output_dim = encoder['params']['output_size']

    idx = tf.constant(0)
    output = tf.zeros([1, output_dim], dtype=tf.float32)
    condition_func = lambda idx, output: idx < batch_size

    def body_func(idx, output):
        encoder['params']['input_with_embedding'] = batch_with_embedding[idx]
        encoder['params']['mask'] = batch_mask[idx]
        encoder['params']['scope_name'] = scope_name
        encoder_result = encoder_func(**encoder['params'])
        return [idx + 1, tf.concat([output, encoder_result], axis=0)]

    _, batch_output = tf.while_loop(
        condition_func, body_func,
        loop_vars=[idx, output],
        shape_invariants=[idx.get_shape(), tf.TensorShape([None, output_dim])])

    return tf.slice(batch_output, [1, 0], [batch_size, output_dim]) 
Example #21
Source File: 10_w2v_graph.py    From deep-learning-note with MIT License 5 votes vote down vote up
def main():
    dataset = tf.data.Dataset.from_generator(gen,
                                             (tf.int32, tf.int32),
                                             (tf.TensorShape([BATCH_SIZE]), tf.TensorShape([BATCH_SIZE, 1])))
    word2vec(dataset) 
Example #22
Source File: tf_sampling.py    From deep-functional-dictionaries with MIT License 5 votes vote down vote up
def gather_point(inp,idx):
    '''
input:
    batch_size * ndataset * 3   float32
    batch_size * npoints        int32
returns:
    batch_size * npoints * 3    float32
    '''
    return sampling_module.gather_point(inp,idx)
#@tf.RegisterShape('GatherPoint')
#def _gather_point_shape(op):
#    shape1=op.inputs[0].get_shape().with_rank(3)
#    shape2=op.inputs[1].get_shape().with_rank(2)
#    return [tf.TensorShape([shape1.dims[0],shape2.dims[1],shape1.dims[2]])] 
Example #23
Source File: psroi_pooling_op_grad.py    From TFFRCNN with MIT License 5 votes vote down vote up
def _psroi_pool_shape(op):
  """Shape function for the PSROIPool op.

  """
  dims_data = op.inputs[0].get_shape().as_list()
  channels = dims_data[3]
  dims_rois = op.inputs[1].get_shape().as_list()
  num_rois = dims_rois[0]
  output_dim = op.get_attr('output_dim')
  group_size  = op.get_attr('group_size')
  pooled_height = group_size
  pooled_width = group_size

  output_shape = tf.TensorShape([num_rois, pooled_height, pooled_width, output_dim])
  return [output_shape, output_shape] 
Example #24
Source File: mle_losses_test.py    From Counterfactual-StoryRW with MIT License 5 votes vote down vote up
def _test_sequence_loss(self, loss_fn, labels, logits, sequence_length):
        with self.test_session() as sess:
            loss = loss_fn(labels, logits, sequence_length)
            rank = sess.run(tf.rank(loss))
            self.assertEqual(rank, 0)

            loss = loss_fn(
                labels, logits, sequence_length, sum_over_timesteps=False)
            rank = sess.run(tf.rank(loss))
            self.assertEqual(rank, 1)
            self.assertEqual(loss.shape, tf.TensorShape([self._max_time]))

            loss = loss_fn(
                labels, logits, sequence_length, sum_over_timesteps=False,
                average_across_timesteps=True, average_across_batch=False)
            rank = sess.run(tf.rank(loss))
            self.assertEqual(rank, 1)
            self.assertEqual(loss.shape, tf.TensorShape([self._batch_size]))

            loss = loss_fn(
                labels, logits, sequence_length, sum_over_timesteps=False,
                average_across_batch=False)
            rank = sess.run(tf.rank(loss))
            self.assertEqual(rank, 2)
            self.assertEqual(loss.shape,
                             tf.TensorShape([self._batch_size, self._max_time]))

            sequence_length_time = tf.random_uniform(
                [self._max_time], maxval=self._max_time, dtype=tf.int32)
            loss = loss_fn(
                labels, logits, sequence_length_time, sum_over_timesteps=False,
                average_across_batch=False, time_major=True)
            self.assertEqual(loss.shape,
                             tf.TensorShape([self._batch_size, self._max_time])) 
Example #25
Source File: layers.py    From Counterfactual-StoryRW with MIT License 5 votes vote down vote up
def compute_output_shape(self, input_shape):
        input_shape = tf.TensorShape(input_shape)
        for layer in self._layers:
            output_shape = layer.compute_output_shape(input_shape)
            input_shape = output_shape
        return output_shape 
Example #26
Source File: layers.py    From Counterfactual-StoryRW with MIT License 5 votes vote down vote up
def compute_output_shape(self, input_shape):
        if self._layers is None:
            _shapes = input_shape
            if not isinstance(_shapes, (list, tuple)):
                _shapes = [_shapes]
        else:
            _shapes = []
            for layer in self._layers:
                layer_output_shape = layer.compute_output_shape(input_shape)
                _shapes.append(layer_output_shape)
        _shapes = [tf.TensorShape(s) for s in _shapes]

        if self._mode == 'concat':
            output_shape = _compute_concat_output_shape(_shapes, self._axis)
        elif self._mode in ['sum', 'mean', 'prod', 'max', 'min',
                            'and', 'or', 'logsumexp']:
            output_shape = _compute_concat_output_shape(_shapes, self._axis)
            output_shape.pop(self._axis)
        elif self._mode in ['elemwise_sum', 'elemwise_mul']:
            # Simply infer the output shape as the input shape of highest rank
            _ranks = [s.ndims for s in _shapes]
            max_rank = max(_ranks)
            max_ranked_shapes = []
            for i, s in enumerate(_shapes):
                if _ranks[i] == max_rank:
                    max_ranked_shapes.append(s.as_list())
            # Grab the first size of each axis that is not `None`
            output_shape = [next((s for s in sizes if s is not None), None)
                            for sizes in zip(*max_ranked_shapes)]
        else:
            raise ValueError("Unknown merge mode: '%s'" % self._mode)

        return tf.TensorShape(output_shape) 
Example #27
Source File: layers.py    From Counterfactual-StoryRW with MIT License 5 votes vote down vote up
def compute_output_shape(self, input_shape):
        input_shape = tf.TensorShape(input_shape).as_list()
        if self._data_format == 'channels_last':
            return tf.TensorShape([input_shape[0], input_shape[2]])
        else:
            return tf.TensorShape([input_shape[0], input_shape[1]]) 
Example #28
Source File: mono_text_data.py    From Counterfactual-StoryRW with MIT License 5 votes vote down vote up
def _make_padded_text_and_id_shapes(dataset, dataset_hparams, decoder,
                                        text_name, text_id_name):
        max_length = dataset_hparams['max_seq_length']
        if max_length is None:
            raise ValueError("hparams 'max_seq_length' must be specified "
                             "when 'pad_to_max_seq_length' is True.")
        max_length += decoder.added_length

        padded_shapes = dataset.output_shapes

        def _get_new_shape(name):
            dim = len(padded_shapes[name])
            if not dataset_hparams['variable_utterance']:
                if dim != 1:
                    raise ValueError(
                        "Unable to pad data '%s' to max seq length. Expected "
                        "1D Tensor, but got %dD Tensor." % (name, dim))
                return tf.TensorShape(max_length)
            else:
                if dim != 2:
                    raise ValueError(
                        "Unable to pad data '%s' to max seq length. Expected "
                        "2D Tensor, but got %dD Tensor." % (name, dim))
                return tf.TensorShape([padded_shapes[name][0], max_length])

        text_and_id_shapes = {}
        if text_name in padded_shapes:
            text_and_id_shapes[text_name] = _get_new_shape(text_name)
        if text_id_name in padded_shapes:
            text_and_id_shapes[text_id_name] = _get_new_shape(text_id_name)

        return text_and_id_shapes 
Example #29
Source File: transformer_decoders.py    From Counterfactual-StoryRW with MIT License 5 votes vote down vote up
def output_size(self):
        """Output size of one step.
        """
        return TransformerDecoderOutput(
            logits=tf.TensorShape([self._vocab_size]),
            sample_id=self._helper.sample_ids_shape) 
Example #30
Source File: univariate.py    From zhusuan with MIT License 5 votes vote down vote up
def _get_value_shape(self):
        return tf.TensorShape([])