Python tensorflow.constant() Examples
The following are 30
code examples of tensorflow.constant().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.

Example #1
Source File: optimization_test.py From BERT-Classification-Tutorial with Apache License 2.0 | 6 votes |
def test_adam(self): with self.test_session() as sess: w = tf.get_variable( "w", shape=[3], initializer=tf.constant_initializer([0.1, -0.2, -0.1])) x = tf.constant([0.4, 0.2, -0.5]) loss = tf.reduce_mean(tf.square(x - w)) tvars = tf.trainable_variables() grads = tf.gradients(loss, tvars) global_step = tf.train.get_or_create_global_step() optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2) train_op = optimizer.apply_gradients(zip(grads, tvars), global_step) init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess.run(init_op) for _ in range(100): sess.run(train_op) w_np = sess.run(w) self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2)
Example #2
Source File: preview.py From cwavegan with MIT License | 6 votes |
def noise_input_fn(params): """Input function for generating samples for PREDICT mode. Generates a single Tensor of fixed random noise. Use tf.data.Dataset to signal to the estimator when to terminate the generator returned by predict(). Args: params: param `dict` passed by TPUEstimator. Returns: 1-element `dict` containing the randomly generated noise. """ # random noise np.random.seed(0) noise_dataset = tf.data.Dataset.from_tensors(tf.constant( np.random.randn(params['batch_size'], FLAGS.noise_dim), dtype=tf.float32)) noise = noise_dataset.make_one_shot_iterator().get_next() return {'random_noise': noise}, None
Example #3
Source File: face_attack.py From Adversarial-Face-Attack with GNU General Public License v3.0 | 6 votes |
def structure(self, input_tensor): """ Args: input_tensor: NHWC """ rnd = tf.random_uniform((), 135, 160, dtype=tf.int32) rescaled = tf.image.resize_images( input_tensor, [rnd, rnd], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) h_rem = 160 - rnd w_rem = 160 - rnd pad_left = tf.random_uniform((), 0, w_rem, dtype=tf.int32) pad_right = w_rem - pad_left pad_top = tf.random_uniform((), 0, h_rem, dtype=tf.int32) pad_bottom = h_rem - pad_top padded = tf.pad(rescaled, [[0, 0], [pad_top, pad_bottom], [ pad_left, pad_right], [0, 0]]) padded.set_shape((input_tensor.shape[0], 160, 160, 3)) output = tf.cond(tf.random_uniform(shape=[1])[0] < tf.constant(0.9), lambda: padded, lambda: input_tensor) return output
Example #4
Source File: face_attack.py From Adversarial-Face-Attack with GNU General Public License v3.0 | 6 votes |
def build_pgd_attack(self, eps): victim_embeddings = tf.constant(self.victim_embeddings, dtype=tf.float32) def one_step_attack(image, grad): """ core components of this attack are: (a) PGD adversarial attack (https://arxiv.org/pdf/1706.06083.pdf) (b) momentum (https://arxiv.org/pdf/1710.06081.pdf) (c) input diversity (https://arxiv.org/pdf/1803.06978.pdf) """ orig_image = image image = self.structure(image) image = (image - 127.5) / 128.0 image = image + tf.random_uniform(tf.shape(image), minval=-1e-2, maxval=1e-2) prelogits, _ = self.network.inference(image, 1.0, False, bottleneck_layer_size=512) embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings') embeddings = tf.reshape(embeddings[0], [512, 1]) objective = tf.reduce_mean(tf.matmul(victim_embeddings, embeddings)) # to be maximized noise, = tf.gradients(objective, orig_image) noise = noise / tf.reduce_mean(tf.abs(noise), [1, 2, 3], keep_dims=True) noise = 0.9 * grad + noise adv = tf.clip_by_value(orig_image + tf.sign(noise) * 1.0, lower_bound, upper_bound) return adv, noise input = tf.to_float(self.image_batch) lower_bound = tf.clip_by_value(input - eps, 0, 255.) upper_bound = tf.clip_by_value(input + eps, 0, 255.) with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): adv, _ = tf.while_loop( lambda _, __: True, one_step_attack, (input, tf.zeros_like(input)), back_prop=False, maximum_iterations=100, parallel_iterations=1) self.adv_image = adv return adv
Example #5
Source File: test_utils_tf.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_clip_eta_goldilocks(self): # Test that the clipping handles perturbations that are # too small, just right, and too big correctly eta = tf.constant([[2.], [3.], [4.]]) assert eta.dtype == tf.float32, eta.dtype eps = 3. for ord_arg in [np.inf, 1, 2]: for sign in [-1., 1.]: clipped = clip_eta(eta * sign, ord_arg, eps) clipped_value = self.sess.run(clipped) gold = sign * np.array([[2.], [3.], [3.]]) self.assertClose(clipped_value, gold) grad, = tf.gradients(clipped, eta) grad_value = self.sess.run(grad) # Note: the second 1. is debatable (the left-sided derivative # and the right-sided derivative do not match, so formally # the derivative is not defined). This test makes sure that # we at least handle this oddity consistently across all the # argument values we test gold = sign * np.array([[1.], [1.], [0.]]) assert np.allclose(grad_value, gold)
Example #6
Source File: test_dropout.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_drop(): # Make sure dropout is activated successfully # We would like to configure the test to deterministically drop, # so that the test does not need to use multiple runs. # However, tf.nn.dropout divides by include_prob, so zero or # infinitesimal include_prob causes NaNs. # 1e-8 does not cause NaNs and shouldn't be a significant source # of test flakiness relative to dependency downloads failing, etc. model = MLP(input_shape=[1, 1], layers=[Dropout(name='output', include_prob=1e-8)]) x = tf.constant([[1]], dtype=tf.float32) y = model.get_layer(x, 'output', dropout=True) sess = tf.Session() y_value = sess.run(y) # Subject to very rare random failure because include_prob is not exact 0 assert y_value == 0., y_value
Example #7
Source File: tensor.py From spleeter with MIT License | 6 votes |
def check_tensor_shape(tensor_tf, target_shape): """ Return a Tensorflow boolean graph that indicates whether sample[features_key] has the specified target shape. Only check not None entries of target_shape. :param tensor_tf: Tensor to check shape for. :param target_shape: Target shape to compare tensor to. :returns: True if shape is valid, False otherwise (as TF boolean). """ result = tf.constant(True) for i, target_length in enumerate(target_shape): if target_length: result = tf.logical_and( result, tf.equal(tf.constant(target_length), tf.shape(tensor_tf)[i])) return result
Example #8
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateLogisticClassifier(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = LogisticClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) clone = clones[0] self.assertEqual(len(slim.get_variables()), 2) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(clone.outputs.op.name, 'LogisticClassifier/fully_connected/Sigmoid') self.assertEqual(clone.scope, '') self.assertDeviceEqual(clone.device, 'GPU:0') self.assertEqual(len(slim.losses.get_losses()), 1) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(update_ops, [])
Example #9
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateSingleclone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) clone = clones[0] self.assertEqual(len(slim.get_variables()), 5) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(clone.outputs.op.name, 'BatchNormClassifier/fully_connected/Sigmoid') self.assertEqual(clone.scope, '') self.assertDeviceEqual(clone.device, 'GPU:0') self.assertEqual(len(slim.losses.get_losses()), 1) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 2)
Example #10
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateOnecloneWithPS(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(clones), 1) clone = clones[0] self.assertEqual(clone.outputs.op.name, 'BatchNormClassifier/fully_connected/Sigmoid') self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:0') self.assertEqual(clone.scope, '') self.assertEqual(len(slim.get_variables()), 5) for v in slim.get_variables(): self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0') self.assertDeviceEqual(v.device, v.value().device)
Example #11
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateSingleclone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 5) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 2) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, 'GPU:0') self.assertDeviceEqual(v.device, 'CPU:0')
Example #12
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testNoSummariesOnGPUForEvals(self): with tf.Graph().as_default(): deploy_config = model_deploy.DeploymentConfig(num_clones=2) # clone function creates a fully_connected layer with a regularizer loss. def ModelFn(): inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32) reg = tf.contrib.layers.l2_regularizer(0.001) tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg) # No optimizer here, it's an eval. model = model_deploy.deploy(deploy_config, ModelFn) # The model summary op should have a few summary inputs and all of them # should be on the CPU. self.assertTrue(model.summary_op.op.inputs) for inp in model.summary_op.op.inputs: self.assertEqual('/device:CPU:0', inp.device)
Example #13
Source File: train.py From DOTA_models with Apache License 2.0 | 6 votes |
def _setup_learning_rate(config, global_step): """Sets up the learning rate with optional exponential decay. Args: config: Object containing learning rate configuration parameters. global_step: Tensor; the global step. Returns: learning_rate: Tensor; the learning rate with exponential decay. """ if config.learning_rate_decay_factor > 0: learning_rate = tf.train.exponential_decay( learning_rate=float(config.learning_rate), global_step=global_step, decay_steps=config.learning_rate_decay_steps, decay_rate=config.learning_rate_decay_factor, staircase=False) else: learning_rate = tf.constant(config.learning_rate) return learning_rate
Example #14
Source File: layers.py From DOTA_models with Apache License 2.0 | 6 votes |
def __init__(self, vocab_size, embedding_dim, normalize=False, vocab_freqs=None, keep_prob=1., **kwargs): self.vocab_size = vocab_size self.embedding_dim = embedding_dim self.normalized = normalize self.keep_prob = keep_prob if normalize: assert vocab_freqs is not None self.vocab_freqs = tf.constant( vocab_freqs, dtype=tf.float32, shape=(vocab_size, 1)) super(Embedding, self).__init__(**kwargs)
Example #15
Source File: model_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def initialize_fakes(self): self.images_shape = (self.batch_size, self.image_height, self.image_width, 3) self.fake_images = tf.constant( self.rng.randint(low=0, high=255, size=self.images_shape).astype('float32'), name='input_node') self.fake_conv_tower_np = self.rng.randn( *self.conv_tower_shape).astype('float32') self.fake_conv_tower = tf.constant(self.fake_conv_tower_np) self.fake_logits = tf.constant( self.rng.randn(*self.chars_logit_shape).astype('float32')) self.fake_labels = tf.constant( self.rng.randint( low=0, high=self.num_char_classes, size=(self.batch_size, self.seq_length)).astype('int64'))
Example #16
Source File: vision_baseline_lstm.py From DOTA_models with Apache License 2.0 | 6 votes |
def lstm_setup(name, x, batch_size, is_single_step, lstm_dim, lstm_out, num_steps, state_input_op): # returns state_name, state_init_op, updated_state_op, out_op with tf.name_scope('reshape_'+name): sh = x.get_shape().as_list() x = tf.reshape(x, shape=[batch_size, -1, sh[-1]]) with tf.variable_scope(name) as varscope: cell = tf.contrib.rnn.LSTMCell( num_units=lstm_dim, forget_bias=1.0, state_is_tuple=False, num_proj=lstm_out, use_peepholes=True, initializer=tf.random_uniform_initializer(-0.01, 0.01, seed=0), cell_clip=None, proj_clip=None) sh = [batch_size, 1, lstm_dim+lstm_out] state_init_op = tf.constant(0., dtype=tf.float32, shape=sh) fn = lambda ns: lstm_online(cell, ns, x, state_input_op, varscope) out_op, updated_state_op = tf.cond(is_single_step, lambda: fn(1), lambda: fn(num_steps)) return name, state_init_op, updated_state_op, out_op
Example #17
Source File: resnet_model.py From DOTA_models with Apache License 2.0 | 6 votes |
def _build_train_op(self): """Build training specific ops for the graph.""" self.lrn_rate = tf.constant(self.hps.lrn_rate, tf.float32) tf.summary.scalar('learning_rate', self.lrn_rate) trainable_variables = tf.trainable_variables() grads = tf.gradients(self.cost, trainable_variables) if self.hps.optimizer == 'sgd': optimizer = tf.train.GradientDescentOptimizer(self.lrn_rate) elif self.hps.optimizer == 'mom': optimizer = tf.train.MomentumOptimizer(self.lrn_rate, 0.9) apply_op = optimizer.apply_gradients( zip(grads, trainable_variables), global_step=self.global_step, name='train_step') train_ops = [apply_op] + self._extra_train_ops self.train_op = tf.group(*train_ops) # TODO(xpan): Consider batch_norm in contrib/layers/python/layers/layers.py
Example #18
Source File: graph_builder.py From DOTA_models with Apache License 2.0 | 6 votes |
def _create_learning_rate(hyperparams, step_var): """Creates learning rate var, with decay and switching for CompositeOptimizer. Args: hyperparams: a GridPoint proto containing optimizer spec, particularly learning_method to determine optimizer class to use. step_var: tf.Variable, global training step. Returns: a scalar `Tensor`, the learning rate based on current step and hyperparams. """ if hyperparams.learning_method != 'composite': base_rate = hyperparams.learning_rate else: spec = hyperparams.composite_optimizer_spec switch = tf.less(step_var, spec.switch_after_steps) base_rate = tf.cond(switch, lambda: tf.constant(spec.method1.learning_rate), lambda: tf.constant(spec.method2.learning_rate)) return tf.train.exponential_decay( base_rate, step_var, hyperparams.decay_steps, hyperparams.decay_base, staircase=hyperparams.decay_staircase)
Example #19
Source File: digraph_ops_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testArcSourcePotentialsFromTokens(self): with self.test_session(): tokens = tf.constant([[[4, 5, 6], [5, 6, 7], [6, 7, 8]], [[6, 7, 8], [5, 6, 7], [4, 5, 6]]], tf.float32) weights = tf.constant([2, 3, 5], tf.float32) arcs = digraph_ops.ArcSourcePotentialsFromTokens(tokens, weights) self.assertAllEqual(arcs.eval(), [[[53, 53, 53], [63, 63, 63], [73, 73, 73]], [[73, 73, 73], [63, 63, 63], [53, 53, 53]]])
Example #20
Source File: digraph_ops_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testRootPotentialsFromTokens(self): with self.test_session(): root = tf.constant([1, 2], tf.float32) tokens = tf.constant([[[4, 5, 6], [5, 6, 7], [6, 7, 8]], [[6, 7, 8], [5, 6, 7], [4, 5, 6]]], tf.float32) weights = tf.constant([[2, 3, 5], [7, 11, 13]], tf.float32) roots = digraph_ops.RootPotentialsFromTokens(root, tokens, weights) self.assertAllEqual(roots.eval(), [[375, 447, 519], [519, 447, 375]])
Example #21
Source File: digraph_ops_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCombineArcAndRootPotentials(self): with self.test_session(): arcs = tf.constant([[[1, 2, 3], [2, 3, 4], [3, 4, 5]], [[3, 4, 5], [2, 3, 4], [1, 2, 3]]], tf.float32) roots = tf.constant([[6, 7, 8], [8, 7, 6]], tf.float32) potentials = digraph_ops.CombineArcAndRootPotentials(arcs, roots) self.assertAllEqual(potentials.eval(), [[[6, 2, 3], [2, 7, 4], [3, 4, 8]], [[8, 4, 5], [2, 7, 4], [1, 2, 6]]])
Example #22
Source File: digraph_ops_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testLabelPotentialsFromTokens(self): with self.test_session(): tokens = tf.constant([[[1, 2], [3, 4], [5, 6]], [[6, 5], [4, 3], [2, 1]]], tf.float32) weights = tf.constant([[ 2, 3], [ 5, 7], [11, 13]], tf.float32) labels = digraph_ops.LabelPotentialsFromTokens(tokens, weights) self.assertAllEqual(labels.eval(), [[[ 8, 19, 37], [ 18, 43, 85], [ 28, 67, 133]], [[ 27, 65, 131], [ 17, 41, 83], [ 7, 17, 35]]])
Example #23
Source File: component.py From DOTA_models with Apache License 2.0 | 6 votes |
def build_structured_training(self, state, network_states): """Builds a beam search based training loop for this component. The default implementation builds a dummy graph and raises a TensorFlow runtime exception to indicate that structured training is not implemented. Args: state: MasterState from the 'AdvanceMaster' op that advances the underlying master to this component. network_states: dictionary of component NetworkState objects. Returns: (handle, cost, correct, total) -- These are TF ops corresponding to the final handle after unrolling, the total cost, and the total number of actions. Since the number of correctly predicted actions is not applicable in the structured training setting, a dummy value should returned. """ del network_states # Unused. with tf.control_dependencies([tf.Assert(False, ['Not implemented.'])]): handle = tf.identity(state.handle) cost = tf.constant(0.) correct, total = tf.constant(0), tf.constant(0) return handle, cost, correct, total
Example #24
Source File: modeling_test.py From BERT-Classification-Tutorial with Apache License 2.0 | 5 votes |
def ids_tensor(cls, shape, vocab_size, rng=None, name=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) return tf.constant(value=values, dtype=tf.int32, shape=shape, name=name)
Example #25
Source File: siamese_network_semantic.py From deep-siamese-text-similarity with MIT License | 5 votes |
def __init__( self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size, trainableEmbeddings): # Placeholders for input, output and dropout self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1") self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2") self.input_y = tf.placeholder(tf.float32, [None], name="input_y") self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob") # Keeping track of l2 regularization loss (optional) l2_loss = tf.constant(0.0, name="l2_loss") # Embedding layer with tf.name_scope("embedding"): self.W = tf.Variable( tf.constant(0.0, shape=[vocab_size, embedding_size]), trainable=trainableEmbeddings,name="W") self.embedded_words1 = tf.nn.embedding_lookup(self.W, self.input_x1) self.embedded_words2 = tf.nn.embedding_lookup(self.W, self.input_x2) print self.embedded_words1 # Create a convolution + maxpool layer for each filter size with tf.name_scope("output"): self.out1=self.stackedRNN(self.embedded_words1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units) self.out2=self.stackedRNN(self.embedded_words2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units) self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True)) self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True)))) self.distance = tf.reshape(self.distance, [-1], name="distance") with tf.name_scope("loss"): self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size) #### Accuracy computation is outside of this class. with tf.name_scope("accuracy"): self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5 correct_predictions = tf.equal(self.temp_sim, self.input_y) self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
Example #26
Source File: siamese_network.py From deep-siamese-text-similarity with MIT License | 5 votes |
def __init__( self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size): # Placeholders for input, output and dropout self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1") self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2") self.input_y = tf.placeholder(tf.float32, [None], name="input_y") self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob") # Keeping track of l2 regularization loss (optional) l2_loss = tf.constant(0.0, name="l2_loss") # Embedding layer with tf.name_scope("embedding"): self.W = tf.Variable( tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0), trainable=True,name="W") self.embedded_chars1 = tf.nn.embedding_lookup(self.W, self.input_x1) #self.embedded_chars_expanded1 = tf.expand_dims(self.embedded_chars1, -1) self.embedded_chars2 = tf.nn.embedding_lookup(self.W, self.input_x2) #self.embedded_chars_expanded2 = tf.expand_dims(self.embedded_chars2, -1) # Create a convolution + maxpool layer for each filter size with tf.name_scope("output"): self.out1=self.BiRNN(self.embedded_chars1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units) self.out2=self.BiRNN(self.embedded_chars2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units) self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True)) self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True)))) self.distance = tf.reshape(self.distance, [-1], name="distance") with tf.name_scope("loss"): self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size) #### Accuracy computation is outside of this class. with tf.name_scope("accuracy"): self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5 correct_predictions = tf.equal(self.temp_sim, self.input_y) self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
Example #27
Source File: networks.py From disentangling_conditional_gans with MIT License | 5 votes |
def get_weight(shape, gain=np.sqrt(2), use_wscale=False, fan_in=None): if fan_in is None: fan_in = np.prod(shape[:-1]) std = gain / np.sqrt(fan_in) # He init if use_wscale: wscale = tf.constant(np.float32(std), name='wscale') return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal()) * wscale else: return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal(0, std)) #---------------------------------------------------------------------------- # Fully-connected layer.
Example #28
Source File: networks.py From disentangling_conditional_gans with MIT License | 5 votes |
def leaky_relu(x, alpha=0.2): with tf.name_scope('LeakyRelu'): alpha = tf.constant(alpha, dtype=x.dtype, name='alpha') return tf.maximum(x * alpha, x) #---------------------------------------------------------------------------- # Nearest-neighbor upscaling layer.
Example #29
Source File: tf_model.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _bias_variable1(shape,name=None): """bias_variable generates a bias variable of a given shape.""" initial = tf.constant(0.5, shape=shape) return tf.Variable(initial,name=name)
Example #30
Source File: tf_model.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _bias_variable(shape,name=None): """bias_variable generates a bias variable of a given shape.""" initial = tf.constant(0.5, shape=shape) return tf.Variable(initial,name=name)