Python tensorflow.TensorShape() Examples
The following are 30
code examples of tensorflow.TensorShape().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.

Example #1
Source File: 9_w2v_eager.py From deep-learning-note with MIT License | 6 votes |
def main(): dataset = tf.data.Dataset.from_generator(gen, (tf.int32, tf.int32), (tf.TensorShape([BATCH_SIZE]), tf.TensorShape([BATCH_SIZE, 1]))) optimizer = tf.compat.v1.train.GradientDescentOptimizer(LEARNING_RATE) model = Word2Vec(vocab_size=VOCAB_SIZE, embed_size=EMBED_SIZE) grad_fn = tfe.implicit_value_and_gradients(model.compute_loss) total_loss = 0.0 num_train_steps = 0 while num_train_steps < NUM_TRAIN_STEPS: for center_words, target_words in tfe.Iterator(dataset): if num_train_steps >= NUM_TRAIN_STEPS: break loss_batch, grads = grad_fn(center_words, target_words) total_loss += loss_batch optimizer.apply_gradients(grads) if (num_train_steps + 1) % SKIP_STEP == 0: print('Average loss at step {}: {:5.1f}'.format( num_train_steps, total_loss / SKIP_STEP )) total_loss = 0.0 num_train_steps += 1
Example #2
Source File: common_attention.py From fine-lm with MIT License | 6 votes |
def reshape_by_blocks(x, x_shape, memory_block_size): """Reshapes input by splitting its length over blocks of memory_block_size. Args: x: a Tensor with shape [batch, heads, length, depth] x_shape: tf.TensorShape of x. memory_block_size: Integer which divides length. Returns: Tensor with shape [batch, heads, length // memory_block_size, memory_block_size, depth]. """ x = tf.reshape(x, [ x_shape[0], x_shape[1], x_shape[2] // memory_block_size, memory_block_size, x_shape[3] ]) return x
Example #3
Source File: tf_nndistance.py From pointnet-registration-framework with MIT License | 6 votes |
def nn_distance(xyz1,xyz2): ''' Computes the distance of nearest neighbors for a pair of point clouds input: xyz1: (batch_size,#points_1,3) the first point cloud input: xyz2: (batch_size,#points_2,3) the second point cloud output: dist1: (batch_size,#point_1) distance from first to second output: idx1: (batch_size,#point_1) nearest neighbor from first to second output: dist2: (batch_size,#point_2) distance from second to first output: idx2: (batch_size,#point_2) nearest neighbor from second to first ''' return nn_distance_module.nn_distance(xyz1,xyz2) #@tf.RegisterShape('NnDistance') #def _nn_distance_shape(op): #shape1=op.inputs[0].get_shape().with_rank(3) #shape2=op.inputs[1].get_shape().with_rank(3) #return [tf.TensorShape([shape1.dims[0],shape1.dims[1]]),tf.TensorShape([shape1.dims[0],shape1.dims[1]]), #tf.TensorShape([shape2.dims[0],shape2.dims[1]]),tf.TensorShape([shape2.dims[0],shape2.dims[1]])]
Example #4
Source File: connectors.py From Counterfactual-StoryRW with MIT License | 6 votes |
def _assert_same_size(outputs, output_size): """Check if outputs match output_size Args: outputs: A Tensor or a (nested) tuple of tensors output_size: Can be an Integer, a TensorShape, or a (nested) tuple of Integers or TensorShape. """ nest.assert_same_structure(outputs, output_size) flat_output_size = nest.flatten(output_size) flat_output = nest.flatten(outputs) for (output, size) in zip(flat_output, flat_output_size): if output[0].shape != tf.TensorShape(size): raise ValueError( "The output size does not match the the required output_size")
Example #5
Source File: layers.py From Counterfactual-StoryRW with MIT License | 6 votes |
def _compute_concat_output_shape(input_shape, axis): """Infers the output shape of concat given the input shape. The code is adapted from the ConcatLayer of lasagne (https://github.com/Lasagne/Lasagne/blob/master/lasagne/layers/merge.py) Args: input_shape (list): A list of shapes, each of which is in turn a list or TensorShape. axis (int): Axis of the concat operation. Returns: list: Output shape of concat. """ # The size of each axis of the output shape equals the first # input size of respective axis that is not `None` input_shape = [tf.TensorShape(s).as_list() for s in input_shape] output_shape = [next((s for s in sizes if s is not None), None) for sizes in zip(*input_shape)] axis_sizes = [s[axis] for s in input_shape] concat_axis_size = None if any(s is None for s in axis_sizes) \ else sum(axis_sizes) output_shape[axis] = concat_axis_size return output_shape
Example #6
Source File: losses.py From deepchem with MIT License | 6 votes |
def _make_shapes_consistent(output, labels): """Try to make inputs have the same shape by adding dimensions of size 1.""" shape1 = output.shape shape2 = labels.shape len1 = len(shape1) len2 = len(shape2) if len1 == len2: return (output, labels) if isinstance(shape1, tf.TensorShape): shape1 = tuple(shape1.as_list()) if isinstance(shape2, tf.TensorShape): shape2 = tuple(shape2.as_list()) if len1 > len2 and all(i == 1 for i in shape1[len2:]): for i in range(len1 - len2): labels = tf.expand_dims(labels, -1) return (output, labels) if len2 > len1 and all(i == 1 for i in shape2[len1:]): for i in range(len2 - len1): output = tf.expand_dims(output, -1) return (output, labels) raise ValueError("Incompatible shapes for outputs and labels: %s versus %s" % (str(shape1), str(shape2)))
Example #7
Source File: special.py From zhusuan with MIT License | 6 votes |
def __init__(self, dtype, batch_shape=None, value_shape=None, group_ndims=0, is_continuous=None, **kwargs): dtype = tf.float32 if dtype is None else tf.as_dtype(dtype).base_dtype self.explicit_batch_shape = tf.TensorShape(batch_shape) self.explicit_value_shape = tf.TensorShape(value_shape) if is_continuous is None: is_continuous = dtype.is_floating super(Empirical, self).__init__( dtype=dtype, param_dtype=None, is_continuous=is_continuous, is_reparameterized=False, use_path_derivative=False, group_ndims=group_ndims, **kwargs)
Example #8
Source File: special.py From zhusuan with MIT License | 6 votes |
def __init__(self, samples, value_shape=None, group_ndims=0, **kwargs): self.samples = tf.convert_to_tensor(samples) self.explicit_value_shape = tf.TensorShape(value_shape) super(Implicit, self).__init__( dtype=samples.dtype, param_dtype=samples.dtype, is_continuous=samples.dtype.is_floating, is_reparameterized=False, use_path_derivative=False, group_ndims=group_ndims, **kwargs)
Example #9
Source File: multivariate.py From zhusuan with MIT License | 6 votes |
def _sample(self, n_samples): if self.n_experiments is None: raise ValueError('Cannot sample when `n_experiments` is None') if self.logits.get_shape().ndims == 2: logits_flat = self.logits else: logits_flat = tf.reshape(self.logits, [-1, self.n_categories]) samples_flat = tf.transpose( tf.random.categorical(logits_flat, n_samples * self.n_experiments)) shape = tf.concat([[n_samples, self.n_experiments], self.batch_shape], 0) samples = tf.reshape(samples_flat, shape) static_n_samples = n_samples if isinstance(n_samples, int) else None static_n_exps = self.n_experiments \ if isinstance(self.n_experiments, int) else None samples.set_shape( tf.TensorShape([static_n_samples, static_n_exps]). concatenate(self.get_batch_shape())) samples = tf.reduce_sum( tf.one_hot(samples, self.n_categories, dtype=self.dtype), axis=1) return samples
Example #10
Source File: multivariate.py From zhusuan with MIT License | 6 votes |
def _sample(self, n_samples): if self.logits.get_shape().ndims == 2: logits_flat = self.logits else: logits_flat = tf.reshape(self.logits, [-1, self.n_categories]) samples_flat = tf.transpose( tf.random.categorical(logits_flat, n_samples)) if self.logits.get_shape().ndims == 2: samples = samples_flat else: shape = tf.concat([[n_samples], self.batch_shape], 0) samples = tf.reshape(samples_flat, shape) static_n_samples = n_samples if isinstance(n_samples, int) else None samples.set_shape( tf.TensorShape([static_n_samples]). concatenate(self.get_batch_shape())) samples = tf.one_hot(samples, self.n_categories, dtype=self.dtype) return samples
Example #11
Source File: multivariate.py From zhusuan with MIT License | 6 votes |
def _sample(self, n_samples): logits, temperature = self.logits, self.temperature if not self.is_reparameterized: logits = tf.stop_gradient(logits) temperature = tf.stop_gradient(temperature) shape = tf.concat([[n_samples], tf.shape(self.logits)], 0) uniform = open_interval_standard_uniform(shape, self.dtype) # TODO: Add Gumbel distribution gumbel = -tf.log(-tf.log(uniform)) samples = tf.nn.softmax((logits + gumbel) / temperature) static_n_samples = n_samples if isinstance(n_samples, int) else None samples.set_shape( tf.TensorShape([static_n_samples]).concatenate(logits.get_shape())) return samples
Example #12
Source File: multivariate.py From zhusuan with MIT License | 6 votes |
def _sample(self, n_samples): mean, u_tril, v_tril = self.mean, self.u_tril, self.v_tril if not self.is_reparameterized: mean = tf.stop_gradient(mean) u_tril = tf.stop_gradient(u_tril) v_tril = tf.stop_gradient(v_tril) def tile(t): new_shape = tf.concat([[n_samples], tf.ones_like(tf.shape(t))], 0) return tf.tile(tf.expand_dims(t, 0), new_shape) batch_u_tril = tile(u_tril) batch_v_tril = tile(v_tril) noise = tf.random_normal( tf.concat([[n_samples], tf.shape(mean)], axis=0), dtype=self.dtype) samples = mean + \ tf.matmul(tf.matmul(batch_u_tril, noise), tf.matrix_transpose(batch_v_tril)) # Update static shape static_n_samples = n_samples if isinstance(n_samples, int) else None samples.set_shape(tf.TensorShape([static_n_samples]) .concatenate(self.get_batch_shape()) .concatenate(self.get_value_shape())) return samples
Example #13
Source File: 10_w2v_graph.py From deep-learning-note with MIT License | 5 votes |
def main(): dataset = tf.data.Dataset.from_generator(gen, (tf.int32, tf.int32), (tf.TensorShape([BATCH_SIZE]), tf.TensorShape([BATCH_SIZE, 1]))) word2vec(dataset)
Example #14
Source File: 11_w2v_visual.py From deep-learning-note with MIT License | 5 votes |
def main(): dataset = tf.data.Dataset.from_generator(gen, (tf.int32, tf.int32), (tf.TensorShape([BATCH_SIZE]), tf.TensorShape([BATCH_SIZE, 1]))) model = SkipGramModel(dataset, VOCAB_SIZE, EMBED_SIZE, BATCH_SIZE, NUM_SAMPLED, LEARNING_RATE) model.build_graph() model.train(NUM_TRAIN_STEPS) model.visualize(VISUAL_FLD, NUM_VISUALIZE)
Example #15
Source File: sequence_layers_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def test_net_slice_char_logits_with_correct_shape(self): batch_size = 2 seq_length = 4 num_char_classes = 3 layer = create_layer(sequence_layers.NetSlice, batch_size, seq_length, num_char_classes) char_logits = layer.create_logits() self.assertEqual( tf.TensorShape([batch_size, seq_length, num_char_classes]), char_logits.get_shape())
Example #16
Source File: sequence_layers_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def test_net_slice_with_autoregression_char_logits_with_correct_shape(self): batch_size = 2 seq_length = 4 num_char_classes = 3 layer = create_layer(sequence_layers.NetSliceWithAutoregression, batch_size, seq_length, num_char_classes) char_logits = layer.create_logits() self.assertEqual( tf.TensorShape([batch_size, seq_length, num_char_classes]), char_logits.get_shape())
Example #17
Source File: sequence_layers_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def test_attention_char_logits_with_correct_shape(self): batch_size = 2 seq_length = 4 num_char_classes = 3 layer = create_layer(sequence_layers.Attention, batch_size, seq_length, num_char_classes) char_logits = layer.create_logits() self.assertEqual( tf.TensorShape([batch_size, seq_length, num_char_classes]), char_logits.get_shape())
Example #18
Source File: sequence_layers_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def test_attention_with_autoregression_char_logits_with_correct_shape(self): batch_size = 2 seq_length = 4 num_char_classes = 3 layer = create_layer(sequence_layers.AttentionWithAutoregression, batch_size, seq_length, num_char_classes) char_logits = layer.create_logits() self.assertEqual( tf.TensorShape([batch_size, seq_length, num_char_classes]), char_logits.get_shape())
Example #19
Source File: reader_ops_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def testParsingReaderOpWhileLoop(self): feature_size = 3 batch_size = 5 def ParserEndpoints(): return gen_parser_ops.gold_parse_reader(self._task_context, feature_size, batch_size, corpus_name='training-corpus') with self.test_session() as sess: # The 'condition' and 'body' functions expect as many arguments as there # are loop variables. 'condition' depends on the 'epoch' loop variable # only, so we disregard the remaining unused function arguments. 'body' # returns a list of updated loop variables. def Condition(epoch, *unused_args): return tf.less(epoch, 2) def Body(epoch, num_actions, *feature_args): # By adding one of the outputs of the reader op ('epoch') as a control # dependency to the reader op we force the repeated evaluation of the # reader op. with epoch.graph.control_dependencies([epoch]): features, epoch, gold_actions = ParserEndpoints() num_actions = tf.maximum(num_actions, tf.reduce_max(gold_actions, [0], False) + 1) feature_ids = [] for i in range(len(feature_args)): feature_ids.append(features[i]) return [epoch, num_actions] + feature_ids epoch = ParserEndpoints()[-2] num_actions = tf.constant(0) loop_vars = [epoch, num_actions] res = sess.run( tf.while_loop(Condition, Body, loop_vars, shape_invariants=[tf.TensorShape(None)] * 2, parallel_iterations=1)) logging.info('Result: %s', res) self.assertEqual(res[0], 2)
Example #20
Source File: ops.py From DOTA_models with Apache License 2.0 | 5 votes |
def _two_element_tuple(int_or_tuple): """Converts `int_or_tuple` to height, width. Several of the functions that follow accept arguments as either a tuple of 2 integers or a single integer. A single integer indicates that the 2 values of the tuple are the same. This functions normalizes the input value by always returning a tuple. Args: int_or_tuple: A list of 2 ints, a single int or a tf.TensorShape. Returns: A tuple with 2 values. Raises: ValueError: If `int_or_tuple` it not well formed. """ if isinstance(int_or_tuple, (list, tuple)): if len(int_or_tuple) != 2: raise ValueError('Must be a list with 2 elements: %s' % int_or_tuple) return int(int_or_tuple[0]), int(int_or_tuple[1]) if isinstance(int_or_tuple, int): return int(int_or_tuple), int(int_or_tuple) if isinstance(int_or_tuple, tf.TensorShape): if len(int_or_tuple) == 2: return int_or_tuple[0], int_or_tuple[1] raise ValueError('Must be an int, a list with 2 elements or a TensorShape of ' 'length 2')
Example #21
Source File: static_shape_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def test_return_correct_batchSize(self): tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3]) self.assertEqual(32, static_shape.get_batch_size(tensor_shape))
Example #22
Source File: static_shape_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def test_return_correct_height(self): tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3]) self.assertEqual(299, static_shape.get_height(tensor_shape))
Example #23
Source File: static_shape_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def test_return_correct_width(self): tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3]) self.assertEqual(384, static_shape.get_width(tensor_shape))
Example #24
Source File: static_shape_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def test_return_correct_depth(self): tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3]) self.assertEqual(3, static_shape.get_depth(tensor_shape))
Example #25
Source File: networks.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def output_size(self): return (self._action_size, self._action_size, tf.TensorShape([]))
Example #26
Source File: networks.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def output_size(self): return (self._action_size, self._action_size, tf.TensorShape([]))
Example #27
Source File: networks.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def output_size(self): return (self._action_size, self._action_size, tf.TensorShape([]))
Example #28
Source File: inception_score.py From ArtGAN with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _init_inception(): global softmax if not os.path.exists(MODEL_DIR): os.makedirs(MODEL_DIR) filename = DATA_URL.split('/')[-1] filepath = os.path.join(MODEL_DIR, filename) if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % ( filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR) with tf.gfile.FastGFile(os.path.join( MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(graph_def, name='') # Works with an arbitrary minibatch size. with tf.Session() as sess: pool3 = sess.graph.get_tensor_by_name('pool_3:0') ops = pool3.graph.get_operations() for op_idx, op in enumerate(ops): for o in op.outputs: shape = o.get_shape() shape = [s.value for s in shape] new_shape = [] for j, s in enumerate(shape): if s == 1 and j == 0: new_shape.append(None) else: new_shape.append(s) o._shape = tf.TensorShape(new_shape) w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1] logits = tf.matmul(tf.squeeze(pool3), w) softmax = tf.nn.softmax(logits)
Example #29
Source File: beam_search.py From fine-lm with MIT License | 5 votes |
def get_state_shape_invariants(tensor): """Returns the shape of the tensor but sets middle dims to None.""" shape = tensor.shape.as_list() for i in range(1, len(shape) - 1): shape[i] = None return tf.TensorShape(shape)
Example #30
Source File: checkpoint_compatibility_test.py From fine-lm with MIT License | 5 votes |
def input_fn(self): types = {"inputs": tf.int32} shapes = {"inputs": tf.TensorShape([None])} dataset = tf.data.Dataset.from_generator(self.input_generator, types, shapes) dataset = dataset.padded_batch(self.BATCH_SIZE, shapes) dataset = dataset.map(problem.standardize_shapes) features = dataset.make_one_shot_iterator().get_next() return features