Python tensorflow.global_variables_initializer() Examples
The following are 30
code examples of tensorflow.global_variables_initializer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.

Example #1
Source File: resnet_v2_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testAtrousFullyConvolutionalUnknownHeightWidth(self): batch = 2 height, width = 65, 65 global_pool = False output_stride = 8 inputs = create_test_input(batch, None, None, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): output, _ = self._resnet_small(inputs, None, global_pool=global_pool, output_stride=output_stride) self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(output, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 9, 9, 32))
Example #2
Source File: resnet_v1_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testUnknownBatchSize(self): batch = 2 height, width = 65, 65 global_pool = True num_classes = 10 inputs = create_test_input(None, height, width, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): logits, _ = self._resnet_small(inputs, num_classes, global_pool=global_pool, spatial_squeeze=False, scope='resnet') self.assertTrue(logits.op.name.startswith('resnet/logits')) self.assertListEqual(logits.get_shape().as_list(), [None, 1, 1, num_classes]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 1, 1, num_classes))
Example #3
Source File: resnet_v1_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testAtrousFullyConvolutionalUnknownHeightWidth(self): batch = 2 height, width = 65, 65 global_pool = False output_stride = 8 inputs = create_test_input(batch, None, None, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): output, _ = self._resnet_small(inputs, None, global_pool=global_pool, output_stride=output_stride) self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(output, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 9, 9, 32))
Example #4
Source File: inception_resnet_v2_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 150, 150 num_classes = 1000 with self.test_session() as sess: train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) inception.inception_resnet_v2(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = inception.inception_resnet_v2(eval_inputs, num_classes, is_training=False, reuse=True) predictions = tf.argmax(logits, 1) sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,))
Example #5
Source File: mobilenet_v1_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testUnknowBatchSize(self): batch_size = 1 height, width = 224, 224 num_classes = 1000 inputs = tf.placeholder(tf.float32, (None, height, width, 3)) logits, _ = mobilenet_v1.mobilenet_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3)) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes))
Example #6
Source File: mobilenet_v1_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 150, 150 num_classes = 1000 train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) mobilenet_v1.mobilenet_v1(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes, reuse=True) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,))
Example #7
Source File: inception_v4_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 150, 150 num_classes = 1000 with self.test_session() as sess: train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) inception.inception_v4(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = inception.inception_v4(eval_inputs, num_classes, is_training=False, reuse=True) predictions = tf.argmax(logits, 1) sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,))
Example #8
Source File: inception_v2_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testUnknownImageShape(self): tf.reset_default_graph() batch_size = 2 height, width = 224, 224 num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.test_session() as sess: inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) logits, end_points = inception.inception_v2(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV2/Logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Mixed_5c'] feed_dict = {inputs: input_np} tf.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
Example #9
Source File: inception_v2_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testUnknowBatchSize(self): batch_size = 1 height, width = 224, 224 num_classes = 1000 inputs = tf.placeholder(tf.float32, (None, height, width, 3)) logits, _ = inception.inception_v2(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV2/Logits')) self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3)) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes))
Example #10
Source File: inception_v2_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 150, 150 num_classes = 1000 train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) inception.inception_v2(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = inception.inception_v2(eval_inputs, num_classes, reuse=True) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,))
Example #11
Source File: inception_v3_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testUnknownImageShape(self): tf.reset_default_graph() batch_size = 2 height, width = 299, 299 num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.test_session() as sess: inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) logits, end_points = inception.inception_v3(inputs, num_classes) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Mixed_7c'] feed_dict = {inputs: input_np} tf.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
Example #12
Source File: inception_v3_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testUnknowBatchSize(self): batch_size = 1 height, width = 299, 299 num_classes = 1000 inputs = tf.placeholder(tf.float32, (None, height, width, 3)) logits, _ = inception.inception_v3(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV3/Logits')) self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3)) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes))
Example #13
Source File: inception_v3_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 150, 150 num_classes = 1000 train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) inception.inception_v3(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = inception.inception_v3(eval_inputs, num_classes, is_training=False, reuse=True) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,))
Example #14
Source File: inception_v1_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testUnknowBatchSize(self): batch_size = 1 height, width = 224, 224 num_classes = 1000 inputs = tf.placeholder(tf.float32, (None, height, width, 3)) logits, _ = inception.inception_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3)) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes))
Example #15
Source File: inception_v1_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 224, 224 num_classes = 1000 train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) inception.inception_v1(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = inception.inception_v1(eval_inputs, num_classes, reuse=True) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,))
Example #16
Source File: translate.py From DOTA_models with Apache License 2.0 | 6 votes |
def create_model(session, forward_only): """Create translation model and initialize or load parameters in session.""" dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 model = seq2seq_model.Seq2SeqModel( FLAGS.from_vocab_size, FLAGS.to_vocab_size, _buckets, FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size, FLAGS.learning_rate, FLAGS.learning_rate_decay_factor, forward_only=forward_only, dtype=dtype) ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir) if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): print("Reading model parameters from %s" % ckpt.model_checkpoint_path) model.saver.restore(session, ckpt.model_checkpoint_path) else: print("Created model with fresh parameters.") session.run(tf.global_variables_initializer()) return model
Example #17
Source File: translate.py From DOTA_models with Apache License 2.0 | 6 votes |
def self_test(): """Test the translation model.""" with tf.Session() as sess: print("Self-test for neural translation model.") # Create model with vocabularies of 10, 2 small buckets, 2 layers of 32. model = seq2seq_model.Seq2SeqModel(10, 10, [(3, 3), (6, 6)], 32, 2, 5.0, 32, 0.3, 0.99, num_samples=8) sess.run(tf.global_variables_initializer()) # Fake data set for both the (3, 3) and (6, 6) bucket. data_set = ([([1, 1], [2, 2]), ([3, 3], [4]), ([5], [6])], [([1, 1, 1, 1, 1], [2, 2, 2, 2, 2]), ([3, 3, 3], [5, 6])]) for _ in xrange(5): # Train the fake model for 5 steps. bucket_id = random.choice([0, 1]) encoder_inputs, decoder_inputs, target_weights = model.get_batch( data_set, bucket_id) model.step(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, False)
Example #18
Source File: model_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def test_char_related_shapes(self): ocr_model = self.create_model() with self.test_session() as sess: endpoints_tf = ocr_model.create_base( images=self.fake_images, labels_one_hot=None) sess.run(tf.global_variables_initializer()) endpoints = sess.run(endpoints_tf) self.assertEqual((self.batch_size, self.seq_length, self.num_char_classes), endpoints.chars_logit.shape) self.assertEqual((self.batch_size, self.seq_length, self.num_char_classes), endpoints.chars_log_prob.shape) self.assertEqual((self.batch_size, self.seq_length), endpoints.predicted_chars.shape) self.assertEqual((self.batch_size, self.seq_length), endpoints.predicted_scores.shape)
Example #19
Source File: model_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def test_create_summaries_is_runnable(self): ocr_model = self.create_model() data = data_provider.InputEndpoints( images=self.fake_images, images_orig=self.fake_images, labels=self.fake_labels, labels_one_hot=slim.one_hot_encoding(self.fake_labels, self.num_char_classes)) endpoints = ocr_model.create_base( images=self.fake_images, labels_one_hot=None) charset = create_fake_charset(self.num_char_classes) summaries = ocr_model.create_summaries( data, endpoints, charset, is_training=False) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) tf.tables_initializer().run() sess.run(summaries) # just check it is runnable
Example #20
Source File: graph_builder_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def RunTraining(self, hyperparam_config): master_spec = self.LoadSpec('master_spec_link.textproto') self.assertTrue(isinstance(hyperparam_config, spec_pb2.GridPoint)) gold_doc = sentence_pb2.Sentence() text_format.Parse(_DUMMY_GOLD_SENTENCE, gold_doc) gold_doc_2 = sentence_pb2.Sentence() text_format.Parse(_DUMMY_GOLD_SENTENCE_2, gold_doc_2) reader_strings = [ gold_doc.SerializeToString(), gold_doc_2.SerializeToString() ] tf.logging.info('Generating graph with config: %s', hyperparam_config) with tf.Graph().as_default(): builder = graph_builder.MasterBuilder(master_spec, hyperparam_config) target = spec_pb2.TrainTarget() target.name = 'testTraining-all' train = builder.add_training_from_config(target) with self.test_session() as sess: logging.info('Initializing') sess.run(tf.global_variables_initializer()) # Run one iteration of training and verify nothing crashes. logging.info('Training') sess.run(train['run'], feed_dict={train['input_batch']: reader_strings})
Example #21
Source File: vgsl_model_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testEndToEndSizes0d(self): """Tests that the output sizes match when training/running real 0d data. Uses mnist with dual summarizing LSTMs to reduce to a single value. """ filename = _testdata('mnist-tiny') with self.test_session() as sess: model = vgsl_model.InitNetwork( filename, model_spec='4,0,0,1[Cr5,5,16 Mp3,3 Lfys16 Lfxs16]O0s12', mode='train') tf.global_variables_initializer().run(session=sess) coord = tf.train.Coordinator() tf.train.start_queue_runners(sess=sess, coord=coord) _, step = model.TrainAStep(sess) self.assertEqual(step, 1) output, labels = model.RunAStep(sess) self.assertEqual(len(output.shape), 2) self.assertEqual(len(labels.shape), 1) self.assertEqual(output.shape[0], labels.shape[0]) self.assertEqual(output.shape[1], 12) # TODO(rays) Support logistic and test with Imagenet (as 0d, multi-object.)
Example #22
Source File: build.py From Traffic_sign_detection_YOLO with MIT License | 5 votes |
def setup_meta_ops(self): cfg = dict({ 'allow_soft_placement': False, 'log_device_placement': False }) utility = min(self.FLAGS.gpu, 1.) if utility > 0.0: self.say('GPU mode with {} usage'.format(utility)) cfg['gpu_options'] = tf.GPUOptions( per_process_gpu_memory_fraction = utility) cfg['allow_soft_placement'] = True else: self.say('Running entirely on CPU') cfg['device_count'] = {'GPU': 0} if self.FLAGS.train: self.build_train_op() if self.FLAGS.summary: self.summary_op = tf.summary.merge_all() self.writer = tf.summary.FileWriter(self.FLAGS.summary + 'train') self.sess = tf.Session(config = tf.ConfigProto(**cfg)) self.sess.run(tf.global_variables_initializer()) if not self.ntrain: return self.saver = tf.train.Saver(tf.global_variables(), max_to_keep = self.FLAGS.keep) if self.FLAGS.load != 0: self.load_from_ckpt() if self.FLAGS.summary: self.writer.add_graph(self.sess.graph)
Example #23
Source File: main.py From QA with GNU General Public License v3.0 | 5 votes |
def train(): print("重新训练,请保证计算机拥有至少8G空闲内存与2G空闲显存") # 准备训练数据 print("正在准备训练数据,大约需要五分钟...") qTrain, aTrain, lTrain, qIdTrain = qaData.loadData(trainingFile, word2idx, unrollSteps, True) qDevelop, aDevelop, lDevelop, qIdDevelop = qaData.loadData(developFile, word2idx, unrollSteps, True) trainQuestionCounts = qIdTrain[-1] for i in range(len(qIdDevelop)): qIdDevelop[i] += trainQuestionCounts tqs, tta, tfa = [], [], [] for question, trueAnswer, falseAnswer in qaData.trainingBatchIter(qTrain + qDevelop, aTrain + aDevelop, lTrain + lDevelop, qIdTrain + qIdDevelop, batchSize): tqs.append(question), tta.append(trueAnswer), tfa.append(falseAnswer) print("加载完成!") # 开始训练 print("开始训练,全部训练过程大约需要12小时") sess.run(tf.global_variables_initializer()) lr = learningRate # 引入局部变量,防止shadow name for i in range(lrDownCount): optimizer = tf.train.GradientDescentOptimizer(lr) optimizer.apply_gradients(zip(grads, tvars)) trainOp = optimizer.apply_gradients(zip(grads, tvars), global_step=globalStep) for epoch in range(epochs): for question, trueAnswer, falseAnswer in zip(tqs, tta, tfa): startTime = time.time() feed_dict = { lstm.inputQuestions: question, lstm.inputTrueAnswers: trueAnswer, lstm.inputFalseAnswers: falseAnswer, lstm.keep_prob: dropout } _, step, _, _, loss = \ sess.run([trainOp, globalStep, lstm.trueCosSim, lstm.falseCosSim, lstm.loss], feed_dict) timeUsed = time.time() - startTime print("step:", step, "loss:", loss, "time:", timeUsed) saver.save(sess, saveFile) lr *= lrDownRate
Example #24
Source File: test_utils_keras.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_get_logits(self): import tensorflow as tf model = KerasModelWrapper(self.model) x = tf.placeholder(tf.float32, shape=(None, 100)) preds = model.get_probs(x) logits = model.get_logits(x) x_val = np.random.rand(2, 100) tf.global_variables_initializer().run(session=self.sess) p_val, logits = self.sess.run([preds, logits], feed_dict={x: x_val}) p_gt = np.exp(logits)/np.sum(np.exp(logits), axis=1, keepdims=True) self.assertTrue(np.allclose(p_val, p_gt, atol=1e-6))
Example #25
Source File: test_utils_keras.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_get_probs(self): import tensorflow as tf model = KerasModelWrapper(self.model) x = tf.placeholder(tf.float32, shape=(None, 100)) preds = model.get_probs(x) x_val = np.random.rand(2, 100) tf.global_variables_initializer().run(session=self.sess) p_val = self.sess.run(preds, feed_dict={x: x_val}) self.assertTrue(np.allclose(np.sum(p_val, axis=1), 1, atol=1e-6)) self.assertTrue(np.all(p_val>=0)) self.assertTrue(np.all(p_val<=1))
Example #26
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def setUp(self): super(TestVirtualAdversarialMethod, self).setUp() self.sess = tf.Session() self.sess.as_default() self.model = DummyModel() self.attack = VirtualAdversarialMethod(self.model, sess=self.sess) # initialize model with tf.name_scope('dummy_model'): self.model(tf.placeholder(tf.float32, shape=(None, 1000))) self.sess.run(tf.global_variables_initializer())
Example #27
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def setUp(self): super(TestSaliencyMapMethod, self).setUp() self.sess = tf.Session() self.sess.as_default() self.model = DummyModel() self.attack = SaliencyMapMethod(self.model, sess=self.sess) # initialize model with tf.name_scope('dummy_model'): self.model(tf.placeholder(tf.float32, shape=(None, 1000))) self.sess.run(tf.global_variables_initializer()) self.attack = SaliencyMapMethod(self.model, sess=self.sess)
Example #28
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_attack_strength(self): """ This test generates a random source and guide and feeds them in a randomly initialized CNN. Checks if an adversarial example can get at least 50% closer to the guide compared to the original distance of the source and the guide. """ tf.set_random_seed(1234) input_shape = self.input_shape x_src = tf.abs(tf.random_uniform(input_shape, 0., 1.)) x_guide = tf.abs(tf.random_uniform(input_shape, 0., 1.)) layer = 'fc7' attack_params = {'eps': 5./256, 'clip_min': 0., 'clip_max': 1., 'nb_iter': 10, 'eps_iter': 0.005, 'layer': layer} x_adv = self.attack.generate(x_src, x_guide, **attack_params) h_adv = self.model.fprop(x_adv)[layer] h_src = self.model.fprop(x_src)[layer] h_guide = self.model.fprop(x_guide)[layer] init = tf.global_variables_initializer() self.sess.run(init) ha, hs, hg, xa, xs, xg = self.sess.run( [h_adv, h_src, h_guide, x_adv, x_src, x_guide]) d_as = np.sqrt(((hs-ha)*(hs-ha)).sum()) d_ag = np.sqrt(((hg-ha)*(hg-ha)).sum()) d_sg = np.sqrt(((hg-hs)*(hg-hs)).sum()) print("L2 distance between source and adversarial example `%s`: %.4f" % (layer, d_as)) print("L2 distance between guide and adversarial example `%s`: %.4f" % (layer, d_ag)) print("L2 distance between source and guide `%s`: %.4f" % (layer, d_sg)) print("d_ag/d_sg*100 `%s`: %.4f" % (layer, d_ag*100/d_sg)) self.assertTrue(d_ag*100/d_sg < 50.)
Example #29
Source File: attack_model_featadv.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def main(argv): # Set TF random seed to improve reproducibility tf.set_random_seed(1234) input_shape = [FLAGS.batch_size, 224, 224, 3] x_src = tf.abs(tf.random_uniform(input_shape, 0., 1.)) x_guide = tf.abs(tf.random_uniform(input_shape, 0., 1.)) print("Input shape:") print(input_shape) model = make_imagenet_cnn(input_shape) attack = FastFeatureAdversaries(model) attack_params = {'eps': 0.3, 'clip_min': 0., 'clip_max': 1., 'nb_iter': FLAGS.nb_iter, 'eps_iter': 0.01, 'layer': FLAGS.layer} x_adv = attack.generate(x_src, x_guide, **attack_params) h_adv = model.fprop(x_adv)[FLAGS.layer] h_src = model.fprop(x_src)[FLAGS.layer] h_guide = model.fprop(x_guide)[FLAGS.layer] with tf.Session() as sess: init = tf.global_variables_initializer() sess.run(init) ha, hs, hg, xa, xs, xg = sess.run( [h_adv, h_src, h_guide, x_adv, x_src, x_guide]) print("L2 distance between source and adversarial example `%s`: %.4f" % (FLAGS.layer, ((hs-ha)*(hs-ha)).sum())) print("L2 distance between guide and adversarial example `%s`: %.4f" % (FLAGS.layer, ((hg-ha)*(hg-ha)).sum())) print("L2 distance between source and guide `%s`: %.4f" % (FLAGS.layer, ((hg-hs)*(hg-hs)).sum())) print("Maximum perturbation: %.4f" % np.abs((xa-xs)).max()) print("Original features: ") print(hs[:10, :10]) print("Adversarial features: ") print(ha[:10, :10])
Example #30
Source File: trainer.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _init_tf(self, X_batch, Y_batch): x_pre = self.g0_inputs['x_pre'] y = self.g0_inputs['y'] fd = {x_pre: X_batch, y: Y_batch} init_op = tf.global_variables_initializer() self.sess.run(init_op, feed_dict=fd)