Python tensorflow.Graph() Examples
The following are 30
code examples of tensorflow.Graph().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: bulk_component_test.py From DOTA_models with Apache License 2.0 | 9 votes |
def testFailsOnFixedFeature(self): component_spec = spec_pb2.ComponentSpec() text_format.Parse(""" name: "annotate" network_unit { registered_name: "IdentityNetwork" } fixed_feature { name: "fixed" embedding_dim: 32 size: 1 } """, component_spec) with tf.Graph().as_default(): comp = bulk_component.BulkAnnotatorComponentBuilder( self.master, component_spec) # Expect feature extraction to generate a runtime error due to the # fixed feature. with self.assertRaises(RuntimeError): comp.build_greedy_training(self.master_state, self.network_states)
Example #2
Source File: util.py From ARU-Net with GNU General Public License v2.0 | 8 votes |
def load_graph(frozen_graph_filename): # We load the protobuf file from the disk and parse it to retrieve the # unserialized graph_def with tf.gfile.GFile(frozen_graph_filename, "rb") as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) # Then, we can use again a convenient built-in function to import a graph_def into the # current default Graph with tf.Graph().as_default() as graph: tf.import_graph_def( graph_def, input_map=None, return_elements=None, name="", op_dict=None, producer_op_list=None ) return graph
Example #3
Source File: beam_reader_ops_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def PathScores(self, iterations, beam_size, max_steps, batch_size): with self.test_session(graph=tf.Graph()) as sess: t = self.MakeGraph(beam_size=beam_size, max_steps=max_steps, batch_size=batch_size).training sess.run(t['inits']) all_path_scores = [] beam_path_scores = [] for i in range(iterations): logging.info('run %d', i) tensors = ( sess.run( [t['alive_steps'], t['concat_scores'], t['all_path_scores'], t['beam_path_scores'], t['indices'], t['path_ids']])) logging.info('alive for %s, all_path_scores and beam_path_scores, ' 'indices and path_ids:' '\n%s\n%s\n%s\n%s', tensors[0], tensors[2], tensors[3], tensors[4], tensors[5]) logging.info('diff:\n%s', tensors[2] - tensors[3]) all_path_scores.append(tensors[2]) beam_path_scores.append(tensors[3]) return all_path_scores, beam_path_scores
Example #4
Source File: separator.py From spleeter with MIT License | 6 votes |
def __init__(self, params_descriptor, MWF=False, stft_backend="auto", multiprocess=True): """ Default constructor. :param params_descriptor: Descriptor for TF params to be used. :param MWF: (Optional) True if MWF should be used, False otherwise. """ self._params = load_configuration(params_descriptor) self._sample_rate = self._params['sample_rate'] self._MWF = MWF self._tf_graph = tf.Graph() self._predictor = None self._input_provider = None self._builder = None self._features = None self._session = None self._pool = Pool() if multiprocess else None self._tasks = [] self._params["stft_backend"] = get_backend(stft_backend)
Example #5
Source File: test_mnist_tutorial_pytorch.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_mnist_tutorial_pytorch(self): import tensorflow as tf from cleverhans_tutorials import mnist_tutorial_pytorch # Run the MNIST tutorial on a dataset of reduced size with tf.Graph().as_default(): np.random.seed(42) report = mnist_tutorial_pytorch.mnist_tutorial( nb_epochs=2, train_end=5000, test_end=333, ) # Check accuracy values contained in the AccuracyReport object self.assertGreater(report.clean_train_clean_eval, 0.9) self.assertLess(report.clean_train_adv_eval, 0.10)
Example #6
Source File: inception_resnet_v2_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testBuildOnlyUptoFinalEndpoint(self): batch_size = 5 height, width = 299, 299 endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_6a', 'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1'] for index, endpoint in enumerate(endpoints): with tf.Graph().as_default(): inputs = tf.random_uniform((batch_size, height, width, 3)) out_tensor, end_points = inception.inception_resnet_v2_base( inputs, final_endpoint=endpoint) if endpoint != 'PreAuxLogits': self.assertTrue(out_tensor.op.name.startswith( 'InceptionResnetV2/' + endpoint)) self.assertItemsEqual(endpoints[:index+1], end_points)
Example #7
Source File: mobilenet_v1_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testBuildOnlyUptoFinalEndpoint(self): batch_size = 5 height, width = 224, 224 endpoints = ['Conv2d_0', 'Conv2d_1_depthwise', 'Conv2d_1_pointwise', 'Conv2d_2_depthwise', 'Conv2d_2_pointwise', 'Conv2d_3_depthwise', 'Conv2d_3_pointwise', 'Conv2d_4_depthwise', 'Conv2d_4_pointwise', 'Conv2d_5_depthwise', 'Conv2d_5_pointwise', 'Conv2d_6_depthwise', 'Conv2d_6_pointwise', 'Conv2d_7_depthwise', 'Conv2d_7_pointwise', 'Conv2d_8_depthwise', 'Conv2d_8_pointwise', 'Conv2d_9_depthwise', 'Conv2d_9_pointwise', 'Conv2d_10_depthwise', 'Conv2d_10_pointwise', 'Conv2d_11_depthwise', 'Conv2d_11_pointwise', 'Conv2d_12_depthwise', 'Conv2d_12_pointwise', 'Conv2d_13_depthwise', 'Conv2d_13_pointwise'] for index, endpoint in enumerate(endpoints): with tf.Graph().as_default(): inputs = tf.random_uniform((batch_size, height, width, 3)) out_tensor, end_points = mobilenet_v1.mobilenet_v1_base( inputs, final_endpoint=endpoint) self.assertTrue(out_tensor.op.name.startswith( 'MobilenetV1/' + endpoint)) self.assertItemsEqual(endpoints[:index+1], end_points)
Example #8
Source File: inception_v4_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testBuildOnlyUpToFinalEndpoint(self): batch_size = 5 height, width = 299, 299 all_endpoints = [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c', 'Mixed_7d'] for index, endpoint in enumerate(all_endpoints): with tf.Graph().as_default(): inputs = tf.random_uniform((batch_size, height, width, 3)) out_tensor, end_points = inception.inception_v4_base( inputs, final_endpoint=endpoint) self.assertTrue(out_tensor.op.name.startswith( 'InceptionV4/' + endpoint)) self.assertItemsEqual(all_endpoints[:index+1], end_points)
Example #9
Source File: inception_v1_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testBuildOnlyUptoFinalEndpoint(self): batch_size = 5 height, width = 224, 224 endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c'] for index, endpoint in enumerate(endpoints): with tf.Graph().as_default(): inputs = tf.random_uniform((batch_size, height, width, 3)) out_tensor, end_points = inception.inception_v1_base( inputs, final_endpoint=endpoint) self.assertTrue(out_tensor.op.name.startswith( 'InceptionV1/' + endpoint)) self.assertItemsEqual(endpoints[:index+1], end_points)
Example #10
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateLogisticClassifier(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = LogisticClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) clone = clones[0] self.assertEqual(len(slim.get_variables()), 2) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(clone.outputs.op.name, 'LogisticClassifier/fully_connected/Sigmoid') self.assertEqual(clone.scope, '') self.assertDeviceEqual(clone.device, 'GPU:0') self.assertEqual(len(slim.losses.get_losses()), 1) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(update_ops, [])
Example #11
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateSingleclone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) clone = clones[0] self.assertEqual(len(slim.get_variables()), 5) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(clone.outputs.op.name, 'BatchNormClassifier/fully_connected/Sigmoid') self.assertEqual(clone.scope, '') self.assertDeviceEqual(clone.device, 'GPU:0') self.assertEqual(len(slim.losses.get_losses()), 1) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 2)
Example #12
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateOnecloneWithPS(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(clones), 1) clone = clones[0] self.assertEqual(clone.outputs.op.name, 'BatchNormClassifier/fully_connected/Sigmoid') self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:0') self.assertEqual(clone.scope, '') self.assertEqual(len(slim.get_variables()), 5) for v in slim.get_variables(): self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0') self.assertDeviceEqual(v.device, v.value().device)
Example #13
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testNoSummariesOnGPU(self): with tf.Graph().as_default(): deploy_config = model_deploy.DeploymentConfig(num_clones=2) # clone function creates a fully_connected layer with a regularizer loss. def ModelFn(): inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32) reg = tf.contrib.layers.l2_regularizer(0.001) tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg) model = model_deploy.deploy( deploy_config, ModelFn, optimizer=tf.train.GradientDescentOptimizer(1.0)) # The model summary op should have a few summary inputs and all of them # should be on the CPU. self.assertTrue(model.summary_op.op.inputs) for inp in model.summary_op.op.inputs: self.assertEqual('/device:CPU:0', inp.device)
Example #14
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateSingleclone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 5) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 2) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, 'GPU:0') self.assertDeviceEqual(v.device, 'CPU:0')
Example #15
Source File: bulk_component_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testBulkFeatureIdExtractorFailsOnLinkedFeature(self): component_spec = spec_pb2.ComponentSpec() text_format.Parse(""" name: "test" network_unit { registered_name: "IdentityNetwork" } fixed_feature { name: "fixed" embedding_dim: -1 size: 1 } linked_feature { name: "linked" embedding_dim: -1 size: 1 source_translator: "identity" source_component: "mock" } """, component_spec) with tf.Graph().as_default(): with self.assertRaises(ValueError): unused_comp = bulk_component.BulkFeatureIdExtractorComponentBuilder( self.master, component_spec)
Example #16
Source File: graph_builder_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testSetTracingTrue(self): """Checks that 'annotations' does call SetTracing if enabled.""" test_name = 'set-tracing-true' with tf.Graph().as_default(): builder, _ = self.getBuilderAndTarget(test_name) anno = builder.add_annotation(test_name, enable_tracing=True) # Check SetTracing is called after GetSession but before AttachDataReader. self.checkOpOrder('annotations', anno['annotations'], [ 'GetSession', 'SetTracing', 'AttachDataReader', 'ReleaseSession' ]) # Same for the 'traces' output, if that's what you were to call. self.checkOpOrder('traces', anno['traces'], [ 'GetSession', 'SetTracing', 'AttachDataReader', 'ReleaseSession' ])
Example #17
Source File: graph_builder_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testSetTracingFalse(self): """Checks that 'annotations' doesn't call SetTracing if disabled.""" test_name = 'set-tracing-false' with tf.Graph().as_default(): builder, _ = self.getBuilderAndTarget(test_name) # Note: "enable_tracing=False" is the default. anno = builder.add_annotation(test_name, enable_tracing=False) # ReleaseSession should still be there. path = _find_input_path_to_type(anno['annotations'], 'ReleaseSession') self.assertNotEmpty(path) # As should AttachDataReader. path = _find_input_path_to_type(path[-1], 'AttachDataReader') self.assertNotEmpty(path) # But SetTracing should not be called. set_tracing_path = _find_input_path_to_type(path[-1], 'SetTracing') self.assertEmpty(set_tracing_path) # Instead, we should go to GetSession. path = _find_input_path_to_type(path[-1], 'GetSession') self.assertNotEmpty(path)
Example #18
Source File: graph_builder_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testAttachDataReader(self): """Checks that train['run'] and 'annotations' call AttachDataReader.""" test_name = 'attach-data-reader' with tf.Graph().as_default(): builder, target = self.getBuilderAndTarget(test_name) train = builder.add_training_from_config(target) anno = builder.add_annotation(test_name) # AttachDataReader should be called between GetSession and ReleaseSession. self.checkOpOrder('train', train['run'], ['GetSession', 'AttachDataReader', 'ReleaseSession']) # A similar contract applies to the annotations. self.checkOpOrder('annotations', anno['annotations'], ['GetSession', 'AttachDataReader', 'ReleaseSession'])
Example #19
Source File: word2vec_optimized.py From DOTA_models with Apache License 2.0 | 6 votes |
def main(_): """Train a word2vec model.""" if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path: print("--train_data --eval_data and --save_path must be specified.") sys.exit(1) opts = Options() with tf.Graph().as_default(), tf.Session() as session: with tf.device("/cpu:0"): model = Word2Vec(opts, session) model.read_analogies() # Read analogy questions for _ in xrange(opts.epochs_to_train): model.train() # Process one epoch model.eval() # Eval analogies. # Perform a final save. model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"), global_step=model.global_step) if FLAGS.interactive: # E.g., # [0]: model.analogy(b'france', b'paris', b'russia') # [1]: model.nearby([b'proton', b'elephant', b'maxwell']) _start_shell(locals())
Example #20
Source File: export_inference_graph.py From DOTA_models with Apache License 2.0 | 6 votes |
def main(_): if not FLAGS.output_file: raise ValueError('You must supply the path to save to with --output_file') tf.logging.set_verbosity(tf.logging.INFO) with tf.Graph().as_default() as graph: dataset = dataset_factory.get_dataset(FLAGS.dataset_name, 'train', FLAGS.dataset_dir) network_fn = nets_factory.get_network_fn( FLAGS.model_name, num_classes=(dataset.num_classes - FLAGS.labels_offset), is_training=FLAGS.is_training) if hasattr(network_fn, 'default_image_size'): image_size = network_fn.default_image_size else: image_size = FLAGS.default_image_size placeholder = tf.placeholder(name='input', dtype=tf.float32, shape=[1, image_size, image_size, 3]) network_fn(placeholder) graph_def = graph.as_graph_def() with gfile.GFile(FLAGS.output_file, 'wb') as f: f.write(graph_def.SerializeToString())
Example #21
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testNoSummariesOnGPUForEvals(self): with tf.Graph().as_default(): deploy_config = model_deploy.DeploymentConfig(num_clones=2) # clone function creates a fully_connected layer with a regularizer loss. def ModelFn(): inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32) reg = tf.contrib.layers.l2_regularizer(0.001) tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg) # No optimizer here, it's an eval. model = model_deploy.deploy(deploy_config, ModelFn) # The model summary op should have a few summary inputs and all of them # should be on the CPU. self.assertTrue(model.summary_op.op.inputs) for inp in model.summary_op.op.inputs: self.assertEqual('/device:CPU:0', inp.device)
Example #22
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateLogisticClassifier(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = LogisticClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 2) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(update_ops, []) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, 'GPU:0') self.assertDeviceEqual(v.device, 'CPU:0')
Example #23
Source File: train.py From DOTA_models with Apache License 2.0 | 5 votes |
def main(unused_argv): if not FLAGS.input_file_pattern: raise ValueError("--input_file_pattern is required.") if not FLAGS.train_dir: raise ValueError("--train_dir is required.") model_config = configuration.model_config( input_file_pattern=FLAGS.input_file_pattern) training_config = configuration.training_config() tf.logging.info("Building training graph.") g = tf.Graph() with g.as_default(): model = skip_thoughts_model.SkipThoughtsModel(model_config, mode="train") model.build() learning_rate = _setup_learning_rate(training_config, model.global_step) optimizer = tf.train.AdamOptimizer(learning_rate) train_tensor = tf.contrib.slim.learning.create_train_op( total_loss=model.total_loss, optimizer=optimizer, global_step=model.global_step, clip_gradient_norm=training_config.clip_gradient_norm) saver = tf.train.Saver() tf.contrib.slim.learning.train( train_op=train_tensor, logdir=FLAGS.train_dir, graph=g, global_step=model.global_step, number_of_steps=training_config.number_of_steps, save_summaries_secs=training_config.save_summaries_secs, saver=saver, save_interval_secs=training_config.save_model_secs)
Example #24
Source File: cifar10_eval.py From DOTA_models with Apache License 2.0 | 5 votes |
def evaluate(): """Eval CIFAR-10 for a number of steps.""" with tf.Graph().as_default() as g: # Get images and labels for CIFAR-10. eval_data = FLAGS.eval_data == 'test' images, labels = cifar10.inputs(eval_data=eval_data) # Build a Graph that computes the logits predictions from the # inference model. logits = cifar10.inference(images) # Calculate predictions. top_k_op = tf.nn.in_top_k(logits, labels, 1) # Restore the moving average version of the learned variables for eval. variable_averages = tf.train.ExponentialMovingAverage( cifar10.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g) while True: eval_once(saver, summary_writer, top_k_op, summary_op) if FLAGS.run_once: break time.sleep(FLAGS.eval_interval_secs)
Example #25
Source File: alexnet_benchmark.py From DOTA_models with Apache License 2.0 | 5 votes |
def run_benchmark(): """Run the benchmark on AlexNet.""" with tf.Graph().as_default(): # Generate some dummy images. image_size = 224 # Note that our padding definition is slightly different the cuda-convnet. # In order to force the model to start with the same activations sizes, # we add 3 to the image_size and employ VALID padding above. images = tf.Variable(tf.random_normal([FLAGS.batch_size, image_size, image_size, 3], dtype=tf.float32, stddev=1e-1)) # Build a Graph that computes the logits predictions from the # inference model. pool5, parameters = inference(images) # Build an initialization operation. init = tf.global_variables_initializer() # Start running operations on the Graph. config = tf.ConfigProto() config.gpu_options.allocator_type = 'BFC' sess = tf.Session(config=config) sess.run(init) # Run the forward benchmark. time_tensorflow_run(sess, pool5, "Forward") # Add a simple objective so we can calculate the backward pass. objective = tf.nn.l2_loss(pool5) # Compute the gradient with respect to all the parameters. grad = tf.gradients(objective, parameters) # Run the backward benchmark. time_tensorflow_run(sess, grad, "Forward-backward")
Example #26
Source File: graphs_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def testBidirClassifier(self): at_methods = [None, 'rp', 'at', 'vat', 'atvat'] for method in at_methods: FLAGS.adv_training_method = method with tf.Graph().as_default(): graphs.VatxtBidirModel().classifier_graph() # Ensure variables have been reused # Embedding + 2 LSTM layers + hidden layers + logits layer expected_num_vars = 1 + 2 * 2 * FLAGS.rnn_num_layers + 2 * ( FLAGS.cl_num_layers) + 2 self.assertEqual(len(tf.trainable_variables()), expected_num_vars)
Example #27
Source File: graphs_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def testATMethods(self): at_methods = [None, 'rp', 'at', 'vat', 'atvat'] for method in at_methods: FLAGS.adv_training_method = method with tf.Graph().as_default(): graphs.VatxtModel().classifier_graph() # Ensure variables have been reused # Embedding + LSTM layers + hidden layers + logits layer expected_num_vars = 1 + 2 * FLAGS.rnn_num_layers + 2 * ( FLAGS.cl_num_layers) + 2 self.assertEqual(len(tf.trainable_variables()), expected_num_vars)
Example #28
Source File: attack_fgsm.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def main(_): # Images for inception classifier are normalized to be in [-1, 1] interval, # eps is a difference between pixels so it should be in [0, 2] interval. # Renormalizing epsilon from [0, 255] to [0, 2]. eps = 2.0 * FLAGS.max_epsilon / 255.0 batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3] num_classes = 1001 tf.logging.set_verbosity(tf.logging.INFO) with tf.Graph().as_default(): # Prepare graph x_input = tf.placeholder(tf.float32, shape=batch_shape) model = InceptionModel(num_classes) fgsm = FastGradientMethod(model) x_adv = fgsm.generate(x_input, eps=eps, clip_min=-1., clip_max=1.) # Run computation saver = tf.train.Saver(slim.get_model_variables()) session_creator = tf.train.ChiefSessionCreator( scaffold=tf.train.Scaffold(saver=saver), checkpoint_filename_with_path=FLAGS.checkpoint_path, master=FLAGS.master) with tf.train.MonitoredSession(session_creator=session_creator) as sess: for filenames, images in load_images(FLAGS.input_dir, batch_shape): adv_images = sess.run(x_adv, feed_dict={x_input: images}) save_images(adv_images, filenames, FLAGS.output_dir)
Example #29
Source File: mnist_eval.py From deep-learning-note with MIT License | 5 votes |
def evaluate(mnist): with tf.Graph().as_default() as g: x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input') y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input') validate_feed = { x: mnist.validation.images, y_: mnist.validation.labels} y = mnist_inference.inference(x, None) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) while True: with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] accuracy_score = sess.run(accuracy, feed_dict=validate_feed) print("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score)) else: print("No Checkpoint file found") return time.sleep(EVAL_INTERVAL_SECS)
Example #30
Source File: attack_random_noise.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def main(_): eps = FLAGS.max_epsilon / 255.0 batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3] with tf.Graph().as_default(): x_input = tf.placeholder(tf.float32, shape=batch_shape) noisy_images = x_input + eps * tf.sign(tf.random_normal(batch_shape)) x_output = tf.clip_by_value(noisy_images, 0.0, 1.0) with tf.Session(FLAGS.master) as sess: for filenames, images in load_images(FLAGS.input_dir, batch_shape): out_images = sess.run(x_output, feed_dict={x_input: images}) save_images(out_images, filenames, FLAGS.output_dir)