Python tensorflow.compat.v1.int32() Examples
The following are 30
code examples of tensorflow.compat.v1.int32().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.compat.v1
, or try the search function
.
Example #1
Source File: metrics_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def testMultilabelMatch3(self): predictions = np.random.randint(1, 5, size=(100, 1, 1, 1)) targets = np.random.randint(1, 5, size=(100, 10, 1, 1)) weights = np.random.randint(0, 2, size=(100, 1, 1, 1)) targets *= weights predictions_repeat = np.repeat(predictions, 10, axis=1) expected = (predictions_repeat == targets).astype(float) expected = np.sum(expected, axis=(1, 2, 3)) expected = np.minimum(expected / 3.0, 1.) expected = np.sum(expected * weights[:, 0, 0, 0]) / weights.shape[0] with self.test_session() as session: scores, weights_ = metrics.multilabel_accuracy_match3( tf.one_hot(predictions, depth=5, dtype=tf.float32), tf.constant(targets, dtype=tf.int32)) a, a_op = tf.metrics.mean(scores, weights_) session.run(tf.local_variables_initializer()) session.run(tf.global_variables_initializer()) _ = session.run(a_op) actual = session.run(a) self.assertAlmostEqual(actual, expected, places=6)
Example #2
Source File: lstm_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def testLSTMSeq2seqBidirectionalEncoder(self): vocab_size = 9 x = np.random.randint(1, high=vocab_size, size=(3, 5, 1, 1)) y = np.random.randint(1, high=vocab_size, size=(3, 6, 1, 1)) hparams = lstm.lstm_seq2seq() p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size, hparams) with self.test_session() as session: features = { "inputs": tf.constant(x, dtype=tf.int32), "targets": tf.constant(y, dtype=tf.int32), } model = lstm.LSTMSeq2seqBidirectionalEncoder( hparams, tf.estimator.ModeKeys.TRAIN, p_hparams) logits, _ = model(features) session.run(tf.global_variables_initializer()) res = session.run(logits) self.assertEqual(res.shape, (3, 6, 1, 1, vocab_size))
Example #3
Source File: utils.py From lamb with Apache License 2.0 | 6 votes |
def mask_from_lengths(lengths, max_length=None, dtype=None, name=None): """Convert a length scalar to a vector of binary masks. This function will convert a vector of lengths to a matrix of binary masks. E.g. [2, 4, 3] will become [[1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 0]] Args: lengths: a d-dimensional vector of integers corresponding to lengths. max_length: an optional (default: None) scalar-like or 0-dimensional tensor indicating the maximum length of the masks. If not provided, the maximum length will be inferred from the lengths vector. dtype: the dtype of the returned mask, if specified. If None, the dtype of the lengths will be used. name: a name for the operation (optional). Returns: A d x max_length tensor of binary masks (int32). """ with tf.name_scope(name, 'mask_from_lengths'): dtype = lengths.dtype if dtype is None else dtype max_length = tf.reduce_max(lengths) if max_length is None else max_length indexes = tf.range(max_length, dtype=lengths.dtype) mask = tf.less(tf.expand_dims(indexes, 0), tf.expand_dims(lengths, 1)) cast_mask = tf.cast(mask, dtype) return tf.stop_gradient(cast_mask)
Example #4
Source File: model_tf1.py From machine-learning-for-programming-samples with MIT License | 6 votes |
def build(self, input_shape): with self._sess.graph.as_default(): self._placeholders["tokens"] = tf.placeholder( dtype=tf.int32, shape=[None, None], name="tokens" ) self._ops["output_logits"] = self.compute_logits( self._placeholders["tokens"] ) self._ops["output_probs"] = tf.nn.softmax(self._ops["output_logits"], -1) result = self.compute_loss_and_acc( rnn_output_logits=self._ops["output_logits"], target_token_seq=self._placeholders["tokens"], ) self._ops["loss"] = result.token_ce_loss self._ops["num_tokens"] = result.num_predictions self._ops["num_correct_tokens"] = result.num_correct_token_predictions self._ops["train_step"] = self._make_training_step(self._ops["loss"]) init_op = tf.variables_initializer( self._sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) ) self._sess.run(init_op)
Example #5
Source File: lstm_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def testLSTMSeq2SeqAttention(self): vocab_size = 9 x = np.random.randint(1, high=vocab_size, size=(3, 5, 1, 1)) y = np.random.randint(1, high=vocab_size, size=(3, 6, 1, 1)) hparams = lstm.lstm_attention() p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size, hparams) x = tf.constant(x, dtype=tf.int32) x = tf.placeholder_with_default(x, shape=[None, None, 1, 1]) with self.test_session() as session: features = { "inputs": x, "targets": tf.constant(y, dtype=tf.int32), } model = lstm.LSTMSeq2seqAttention( hparams, tf.estimator.ModeKeys.TRAIN, p_hparams) logits, _ = model(features) session.run(tf.global_variables_initializer()) res = session.run(logits) self.assertEqual(res.shape, (3, 6, 1, 1, vocab_size))
Example #6
Source File: lstm_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def testLSTMSeq2seqAttentionBidirectionalEncoder(self): vocab_size = 9 x = np.random.randint(1, high=vocab_size, size=(3, 5, 1, 1)) y = np.random.randint(1, high=vocab_size, size=(3, 6, 1, 1)) hparams = lstm.lstm_attention() p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size) x = tf.constant(x, dtype=tf.int32) x = tf.placeholder_with_default(x, shape=[None, None, 1, 1]) with self.test_session() as session: features = { "inputs": x, "targets": tf.constant(y, dtype=tf.int32), } model = lstm.LSTMSeq2seqAttentionBidirectionalEncoder( hparams, tf.estimator.ModeKeys.TRAIN, p_hparams) logits, _ = model(features) session.run(tf.global_variables_initializer()) res = session.run(logits) self.assertEqual(res.shape, (3, 6, 1, 1, vocab_size))
Example #7
Source File: resnet_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def _test_resnet(self, img_size, output_size): vocab_size = 9 batch_size = 2 x = np.random.randint( 256, size=(batch_size, img_size, img_size, 3)) y = np.random.randint( 1, high=vocab_size, size=(batch_size, 1, 1, 1)) hparams = resnet_tiny_cpu() p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size, hparams) p_hparams.modality["inputs"] = modalities.ModalityType.IMAGE p_hparams.modality["targets"] = modalities.ModalityType.CLASS_LABEL with self.test_session() as session: features = { "inputs": tf.constant(x, dtype=tf.int32), "targets": tf.constant(y, dtype=tf.int32), } model = resnet.Resnet(hparams, tf.estimator.ModeKeys.TRAIN, p_hparams) logits, _ = model(features) session.run(tf.global_variables_initializer()) res = session.run(logits) self.assertEqual(res.shape, (batch_size,) + output_size + (1, vocab_size))
Example #8
Source File: multistep_with_adamoptimizer.py From tensor2tensor with Apache License 2.0 | 6 votes |
def _create_slots(self, var_list): """Create slot variables for Adam with accumulated gradients.""" first_var = min(var_list, key=lambda x: x.name) self._create_non_slot_variable( initial_value=self._beta1, name="beta1_power", colocate_with=first_var) self._create_non_slot_variable( initial_value=self._beta2, name="beta2_power", colocate_with=first_var) # if iter is initialized as an int32, this optimizer could not run # with tensorflow_hub with a tensorflow-gpu version self._create_non_slot_variable( initial_value=0.0 if self._n == 1 else 1.0, name="iter", colocate_with=first_var) # Create slots for the first and second moments, as well as grad_acc. for v in var_list: self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "grad_acc", self._name)
Example #9
Source File: xception_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def _test_xception(self, img_size): vocab_size = 9 batch_size = 3 x = np.random.randint( 256, size=(batch_size, img_size, img_size, 3)) y = np.random.randint( 1, high=vocab_size, size=(batch_size, 1, 1, 1)) hparams = xception.xception_tiny() p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size, hparams) p_hparams.modality["inputs"] = modalities.ModalityType.IMAGE p_hparams.modality["targets"] = modalities.ModalityType.CLASS_LABEL with self.test_session() as session: features = { "inputs": tf.constant(x, dtype=tf.int32), "targets": tf.constant(y, dtype=tf.int32), } model = xception.Xception(hparams, tf.estimator.ModeKeys.TRAIN, p_hparams) logits, _ = model(features) session.run(tf.global_variables_initializer()) res = session.run(logits) self.assertEqual(res.shape, (batch_size, 1, 1, 1, vocab_size))
Example #10
Source File: model.py From benchmarks with Apache License 2.0 | 6 votes |
def get_synthetic_inputs(self, input_name, nclass): # Synthetic input should be within [0, 255]. image_shape, label_shape = self.get_input_shapes('train') inputs = tf.truncated_normal( image_shape, dtype=self.data_type, mean=127, stddev=60, name=self.model_name + '_synthetic_inputs') inputs = variables_module.VariableV1( inputs, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES], name=input_name) labels = tf.random_uniform( label_shape, minval=0, maxval=nclass - 1, dtype=tf.int32, name=self.model_name + '_synthetic_labels') return (inputs, labels)
Example #11
Source File: slicenet_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def testSliceNet(self): x = np.random.randint(256, size=(3, 5, 5, 3)) y = np.random.randint(10, size=(3, 5, 1, 1)) hparams = slicenet.slicenet_params1_tiny() hparams.add_hparam("data_dir", "") problem = registry.problem("image_cifar10") p_hparams = problem.get_hparams(hparams) hparams.problem_hparams = p_hparams with self.test_session() as session: features = { "inputs": tf.constant(x, dtype=tf.int32), "targets": tf.constant(y, dtype=tf.int32), "target_space_id": tf.constant(1, dtype=tf.int32), } model = slicenet.SliceNet(hparams, tf.estimator.ModeKeys.TRAIN, p_hparams) logits, _ = model(features) session.run(tf.global_variables_initializer()) res = session.run(logits) self.assertEqual(res.shape, (3, 1, 1, 1, 10))
Example #12
Source File: slicenet_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def testSliceNetImageToText(self): x = np.random.randint(256, size=(3, 5, 5, 3)) y = np.random.randint(10, size=(3, 5, 1, 1)) hparams = slicenet.slicenet_params1_tiny() hparams.add_hparam("data_dir", "") problem = registry.problem("image_ms_coco_characters") p_hparams = problem.get_hparams(hparams) hparams.problem_hparams = p_hparams with self.test_session() as session: features = { "inputs": tf.constant(x, dtype=tf.int32), "targets": tf.constant(y, dtype=tf.int32), "target_space_id": tf.constant(1, dtype=tf.int32), } model = slicenet.SliceNet(hparams, tf.estimator.ModeKeys.TRAIN, p_hparams) logits, _ = model(features) session.run(tf.global_variables_initializer()) res = session.run(logits) self.assertEqual(res.shape, (3, 5, 1, 1, 258))
Example #13
Source File: official_ncf_model.py From benchmarks with Apache License 2.0 | 6 votes |
def get_synthetic_inputs(self, input_name, nclass): """Returns the ops to generate synthetic inputs and labels.""" def users_init_val(): return tf.random_uniform((self.batch_size, 1), minval=0, maxval=_NUM_USERS_20M, dtype=tf.int32) users = tf.Variable(users_init_val, dtype=tf.int32, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES], name='synthetic_users') def items_init_val(): return tf.random_uniform((self.batch_size, 1), minval=0, maxval=_NUM_ITEMS_20M, dtype=tf.int32) items = tf.Variable(items_init_val, dtype=tf.int32, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES], name='synthetic_items') def labels_init_val(): return tf.random_uniform((self.batch_size,), minval=0, maxval=2, dtype=tf.int32) labels = tf.Variable(labels_init_val, dtype=tf.int32, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES], name='synthetic_labels') return [users, items, labels]
Example #14
Source File: model_tf1.py From machine-learning-for-programming-samples with MIT License | 6 votes |
def compute_logits(self, token_ids: tf.Tensor) -> tf.Tensor: """ Implements a language model, where each output is conditional on the current input and inputs processed so far. Args: token_ids: int32 tensor of shape [B, T], storing integer IDs of tokens. Returns: tf.float32 tensor of shape [B, T, V], storing the distribution over output symbols for each timestep for each batch element. """ # TODO 5# 1) Embed tokens # TODO 5# 2) Run RNN on embedded tokens # TODO 5# 3) Project RNN outputs onto the vocabulary to obtain logits. return rnn_output_logits
Example #15
Source File: rl_utils.py From tensor2tensor with Apache License 2.0 | 6 votes |
def __init__( self, batch_size, observation_space, action_space, policy_hparams, policy_dir, sampling_temp ): super(PolicyAgent, self).__init__( batch_size, observation_space, action_space ) self._sampling_temp = sampling_temp with tf.Graph().as_default(): self._observations_t = tf.placeholder( shape=((batch_size,) + self.observation_space.shape), dtype=self.observation_space.dtype ) (logits, self._values_t) = rl.get_policy( self._observations_t, policy_hparams, self.action_space ) actions = common_layers.sample_with_temperature(logits, sampling_temp) self._probs_t = tf.nn.softmax(logits / sampling_temp) self._actions_t = tf.cast(actions, tf.int32) model_saver = tf.train.Saver( tf.global_variables(policy_hparams.policy_network + "/.*") # pylint: disable=unexpected-keyword-arg ) self._sess = tf.Session() self._sess.run(tf.global_variables_initializer()) trainer_lib.restore_checkpoint(policy_dir, model_saver, self._sess)
Example #16
Source File: gene_expression_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def _test_model(self, hparams, model_cls): batch_size = 3 target_length = 6 target_out = 10 # GeneExpressionProblem.num_output_predictions input_length = target_length * 128 // 4 # chunk_size=4 input_vocab_size = 5 inputs = np.random.randint( 1, input_vocab_size + 1, size=(batch_size, input_length, 1, 1)) targets = np.random.random_sample((batch_size, target_length, 1, target_out)) features = { "inputs": tf.constant(inputs, dtype=tf.int32), "targets": tf.constant(targets, dtype=tf.float32), } p_hparams = hparams.problem_hparams logits, _ = model_cls( hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)(features) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) res = sess.run(logits) self.assertEqual(res.shape, (batch_size, target_length, 1, target_out))
Example #17
Source File: simulated_batch_gym_env.py From tensor2tensor with Apache License 2.0 | 6 votes |
def __init__(self, *args, **kwargs): with tf.Graph().as_default(): self._batch_env = SimulatedBatchEnv(*args, **kwargs) self._actions_t = tf.placeholder(shape=(self.batch_size,), dtype=tf.int32) self._rewards_t, self._dones_t = self._batch_env.simulate(self._actions_t) with tf.control_dependencies([self._rewards_t]): self._obs_t = self._batch_env.observ self._indices_t = tf.placeholder(shape=(self.batch_size,), dtype=tf.int32) self._reset_op = self._batch_env.reset( tf.range(self.batch_size, dtype=tf.int32) ) self._sess = tf.Session() self._sess.run(tf.global_variables_initializer()) self._batch_env.initialize(self._sess)
Example #18
Source File: image_transformer_2d_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def _test_imagetransformer_2d(self, net): batch_size = 3 size = 7 vocab_size = 256 hparams = image_transformer_2d.imagetransformer2d_tiny() p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size, hparams) inputs = np.random.randint( vocab_size, size=(batch_size, 1, 1, 1)) targets = np.random.randint( vocab_size, size=(batch_size, size, size, 3)) with self.test_session() as session: features = { "inputs": tf.constant(inputs, dtype=tf.int32), "targets": tf.constant(targets, dtype=tf.int32), "target_space_id": tf.constant(1, dtype=tf.int32), } model = net(hparams, tf.estimator.ModeKeys.TRAIN, p_hparams) logits, _ = model(features) session.run(tf.global_variables_initializer()) res = session.run(logits) self.assertEqual(res.shape, (batch_size, size, size, 3, vocab_size))
Example #19
Source File: image_transformer_2d_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def _test_img2img_transformer(self, net): batch_size = 3 hparams = image_transformer_2d.img2img_transformer2d_tiny() hparams.data_dir = "" p_hparams = registry.problem("image_celeba").get_hparams(hparams) inputs = np.random.randint(256, size=(batch_size, 4, 4, 3)) targets = np.random.randint(256, size=(batch_size, 8, 8, 3)) with self.test_session() as session: features = { "inputs": tf.constant(inputs, dtype=tf.int32), "targets": tf.constant(targets, dtype=tf.int32), "target_space_id": tf.constant(1, dtype=tf.int32), } model = net(hparams, tf.estimator.ModeKeys.TRAIN, p_hparams) logits, _ = model(features) session.run(tf.global_variables_initializer()) res = session.run(logits) self.assertEqual(res.shape, (batch_size, 8, 8, 3, 256))
Example #20
Source File: bytenet_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def testByteNet(self): vocab_size = 9 x = np.random.randint(1, high=vocab_size, size=(3, 5, 1, 1)) y = np.random.randint(1, high=vocab_size, size=(3, 6, 1, 1)) hparams = bytenet.bytenet_base() p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size, hparams) with self.test_session() as session: features = { "inputs": tf.constant(x, dtype=tf.int32), "targets": tf.constant(y, dtype=tf.int32), } model = bytenet.ByteNet( hparams, tf.estimator.ModeKeys.TRAIN, p_hparams) logits, _ = model(features) session.run(tf.global_variables_initializer()) res = session.run(logits) self.assertEqual(res.shape, (3, 50, 1, 1, vocab_size))
Example #21
Source File: shuffle_network.py From tensor2tensor with Apache License 2.0 | 6 votes |
def shuffle_layer(inputs, shuffle_fn=rol): """Shuffles the elements according to bitwise left or right rotation. Args: inputs: Tensor input from previous layer shuffle_fn: Shift function rol or ror Returns: tf.Tensor: Inputs shifted according to shuffle_fn """ length = tf.shape(inputs)[1] n_bits = tf.log(tf.cast(length - 1, tf.float32)) / tf.log(2.0) n_bits = tf.cast(n_bits, tf.int32) + 1 indices = tf.range(0, length) rev_indices = shuffle_fn(indices, n_bits) return tf.gather(inputs, rev_indices, axis=1)
Example #22
Source File: metrics_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def testAccuracyTopKMetric(self): predictions = np.random.randint(1, 5, size=(12, 12, 12, 1)) targets = np.random.randint(1, 5, size=(12, 12, 12, 1)) expected = np.mean((predictions == targets).astype(float)) with self.test_session() as session: predicted = tf.one_hot(predictions, depth=5, dtype=tf.float32) scores1, _ = metrics.padded_accuracy_topk( predicted, tf.constant(targets, dtype=tf.int32), k=1) scores2, _ = metrics.padded_accuracy_topk( predicted, tf.constant(targets, dtype=tf.int32), k=7) a1 = tf.reduce_mean(scores1) a2 = tf.reduce_mean(scores2) session.run(tf.global_variables_initializer()) actual1, actual2 = session.run([a1, a2]) self.assertAlmostEqual(actual1, expected) self.assertAlmostEqual(actual2, 1.0)
Example #23
Source File: data_reader.py From tensor2tensor with Apache License 2.0 | 6 votes |
def pad_batch(features, batch_multiple): """Pad batch dim of features to nearest multiple of batch_multiple.""" feature = list(features.items())[0][1] batch_size = tf.shape(feature)[0] mod = batch_size % batch_multiple has_mod = tf.cast(tf.cast(mod, tf.bool), tf.int32) batch_padding = batch_multiple * has_mod - mod padded_features = {} for k, feature in features.items(): rank = len(feature.shape) paddings = [[0, 0] for _ in range(rank)] paddings[0][1] = batch_padding padded_feature = tf.pad(feature, paddings) padded_features[k] = padded_feature return padded_features # TODO(lukaszkaiser): refactor the API to not be just a list of self params # but make sense for other uses too.
Example #24
Source File: autoencoders_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def get_mnist_random_output(self, model_name, hparams_set=None, mode=tf.estimator.ModeKeys.TRAIN): hparams_set = hparams_set or model_name x = np.random.randint(256, size=(1, 28, 28, 1)) y = np.random.randint(10, size=(1, 1)) features = { "targets": tf.constant(x, dtype=tf.int32), "inputs": tf.constant(y, dtype=tf.int32), } hparams = trainer_lib.create_hparams( hparams_set, problem_name="image_mnist_rev", data_dir=".") model = registry.model(model_name)(hparams, mode) tf.train.create_global_step() logits, _ = model(features) with self.test_session() as session: session.run(tf.global_variables_initializer()) res = session.run(logits) return res
Example #25
Source File: residual_shuffle_exchange.py From tensor2tensor with Apache License 2.0 | 6 votes |
def residual_shuffle_network(inputs, hparams): """Residual Shuffle-Exchange network with weight sharing. Args: inputs: inputs to the Shuffle-Exchange network. Should be in length of power of 2. hparams: Model configuration Returns: tf.Tensor: Outputs of the Shuffle-Exchange last layer """ input_shape = tf.shape(inputs) n_bits = tf.log(tf.cast(input_shape[1] - 1, tf.float32)) / tf.log(2.0) n_bits = tf.cast(n_bits, tf.int32) + 1 block_out = inputs for k in range(hparams.num_hidden_layers): with tf.variable_scope("benes_block_" + str(k), reuse=tf.AUTO_REUSE): forward_output = forward_part(block_out, hparams, n_bits) block_out = reverse_part(forward_output, hparams, n_bits) return RSU("last_layer", hparams.dropout, hparams.mode)(block_out)
Example #26
Source File: metrics_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def testSequenceEditDistanceMetric(self): predictions = np.array([[3, 4, 5, 1, 0, 0], [2, 1, 3, 4, 0, 0], [2, 1, 3, 4, 0, 0]]) # Targets are just a bit different: # - first sequence has a different prediction # - second sequence has a different prediction and one extra step # - third sequence is identical targets = np.array([[5, 4, 5, 1, 0, 0], [2, 5, 3, 4, 1, 0], [2, 1, 3, 4, 0, 0]]) # Reshape to match expected input format by metric fns. predictions = np.reshape(predictions, [3, 6, 1, 1]) targets = np.reshape(targets, [3, 6, 1, 1]) with self.test_session() as session: scores, weight = metrics.sequence_edit_distance( tf.one_hot(predictions, depth=6, dtype=tf.float32), tf.constant(targets, dtype=tf.int32)) session.run(tf.global_variables_initializer()) actual_scores, actual_weight = session.run([scores, weight]) self.assertAlmostEqual(actual_scores, 3.0 / 13) self.assertEqual(actual_weight, 13)
Example #27
Source File: rouge_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def testRougeLMetricE2E(self): vocab_size = 4 batch_size = 12 seq_length = 12 predictions = tf.one_hot( np.random.randint(vocab_size, size=(batch_size, seq_length, 1, 1)), depth=4, dtype=tf.float32) targets = np.random.randint(4, size=(12, 12, 1, 1)) with self.test_session() as session: scores, _ = rouge.rouge_l_fscore( predictions, tf.constant(targets, dtype=tf.int32)) a = tf.reduce_mean(scores) session.run(tf.global_variables_initializer()) session.run(a)
Example #28
Source File: metrics_test.py From tensor2tensor with Apache License 2.0 | 6 votes |
def testNegativeLogPerplexityMaskedAssert(self): predictions = np.random.randint(4, size=(12, 12, 12, 1)) targets = np.random.randint(4, size=(12, 12, 12, 1)) features = {} with self.assertRaisesRegexp( ValueError, 'masked_neg_log_perplexity requires targets_mask feature'): with self.test_session() as session: scores, _ = metrics.padded_neg_log_perplexity_with_masking( tf.one_hot(predictions, depth=4, dtype=tf.float32), tf.constant(targets, dtype=tf.int32), features) a = tf.reduce_mean(scores) session.run(tf.global_variables_initializer()) _ = session.run(a)
Example #29
Source File: ssd_model.py From benchmarks with Apache License 2.0 | 6 votes |
def loss_function(self, inputs, build_network_result): logits = build_network_result.logits # Unpack model output back to locations and confidence scores of predictions # Shape of pred_loc: [batch_size, NUM_SSD_BOXES, 4] # Shape of pred_label: [batch_size, NUM_SSD_BOXES, label_num] pred_loc, pred_label = tf.split(logits, [4, self.label_num], 2) # Shape of gt_loc: [batch_size, NUM_SSD_BOXES, 4] # Shape of gt_label: [batch_size, NUM_SSD_BOXES, 1] # Shape of num_gt: [batch_size] _, gt_loc, gt_label, num_gt = inputs gt_label = tf.cast(gt_label, tf.int32) box_loss = self._localization_loss(pred_loc, gt_loc, gt_label, num_gt) class_loss = self._classification_loss(pred_label, gt_label, num_gt) tf.summary.scalar('box_loss', tf.reduce_mean(box_loss)) tf.summary.scalar('class_loss', tf.reduce_mean(class_loss)) return class_loss + box_loss
Example #30
Source File: shuffle_network.py From tensor2tensor with Apache License 2.0 | 5 votes |
def max_pad_length(self, features): """Finds max padding length. If target length not specified use fixed padding length from hparams.max_length. Args: features: Dictionary with input and target tensors Returns: tf.Tensor: Length of input and output sequence. Length is power of 2. """ if self.hparams.force_max_length or features.get("targets") is None: assert math.log(self.hparams.max_length, 2).is_integer(), \ "hparams.max_length should be power of w" return self.hparams.max_length length = tf.shape(features["inputs"])[1] targets_length = tf.shape(features["targets"])[1] length = tf.maximum(length, targets_length) p = tf.log(tf.cast(length, tf.float32)) / tf.log(2.0) p = tf.cast(tf.ceil(p), tf.int32) return tf.pow(2, p)