Python tensorflow.Session() Examples

The following are 30 code examples for showing how to use tensorflow.Session(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module tensorflow , or try the search function .

Example 1
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: build.py    License: MIT License 6 votes vote down vote up
def savepb(self):
		"""
		Create a standalone const graph def that 
		C++	can load and run.
		"""
		darknet_pb = self.to_darknet()
		flags_pb = self.FLAGS
		flags_pb.verbalise = False
		
		flags_pb.train = False
		# rebuild another tfnet. all const.
		tfnet_pb = TFNet(flags_pb, darknet_pb)		
		tfnet_pb.sess = tf.Session(graph = tfnet_pb.graph)
		# tfnet_pb.predict() # uncomment for unit testing
		name = 'built_graph/{}.pb'.format(self.meta['name'])
		os.makedirs(os.path.dirname(name), exist_ok=True)
		#Save dump of everything in meta
		with open('built_graph/{}.meta'.format(self.meta['name']), 'w') as fp:
			json.dump(self.meta, fp)
		self.say('Saving const graph def to {}'.format(name))
		graph_def = tfnet_pb.sess.graph_def
		tf.train.write_graph(graph_def,'./', name, False) 
Example 2
Project: disentangling_conditional_gans   Author: zalandoresearch   File: tfutil.py    License: MIT License 6 votes vote down vote up
def create_session(config_dict=dict(), force_as_default=False):
    config = tf.ConfigProto()
    for key, value in config_dict.items():
        fields = key.split('.')
        obj = config
        for field in fields[:-1]:
            obj = getattr(obj, field)
        setattr(obj, fields[-1], value)
    session = tf.Session(config=config)
    if force_as_default:
        session._default_session = session.as_default()
        session._default_session.enforce_nesting = False
        session._default_session.__enter__()
    return session

#----------------------------------------------------------------------------
# Initialize all tf.Variables that have not already been initialized.
# Equivalent to the following, but more efficient and does not bloat the tf graph:
#   tf.variables_initializer(tf.report_unitialized_variables()).run() 
Example 3
Project: neural-fingerprinting   Author: StephanZheng   File: test_defenses.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_feature_pairing(self):
        fgsm = FastGradientMethod(self.model)
        attack = lambda x: fgsm.generate(x)
        loss = FeaturePairing(self.model, weight=0.1, attack=attack)
        l = loss.fprop(self.x, self.y)
        with tf.Session() as sess:
            vl1 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
            vl2 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
        self.assertClose(vl1, sum([4.296023369, 2.963884830]) / 2., atol=1e-6)
        self.assertClose(vl2, sum([4.296023369, 2.963884830]) / 2., atol=1e-6)

        loss = FeaturePairing(self.model, weight=10., attack=attack)
        l = loss.fprop(self.x, self.y)
        with tf.Session() as sess:
            vl1 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
            vl2 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
        self.assertClose(vl1, sum([4.333082676, 3.00094414]) / 2., atol=1e-6)
        self.assertClose(vl2, sum([4.333082676, 3.00094414]) / 2., atol=1e-6) 
Example 4
Project: neural-fingerprinting   Author: StephanZheng   File: enjoy-adv.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, env, dueling, noisy, fname):
        self.g = tf.Graph()
        self.noisy = noisy
        self.dueling = dueling
        self.env = env
        with self.g.as_default():
            self.act = deepq.build_act_enjoy(
                make_obs_ph=lambda name: U.Uint8Input(
                    env.observation_space.shape, name=name),
                q_func=dueling_model if dueling else model,
                num_actions=env.action_space.n,
                noisy=noisy
            )
            self.saver = tf.train.Saver()
        self.sess = tf.Session(graph=self.g)

        if fname is not None:
            print('Loading Model...')
            self.saver.restore(self.sess, fname) 
Example 5
Project: neural-fingerprinting   Author: StephanZheng   File: utils.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def cleverhans_attack_wrapper(cleverhans_attack_fn, reset=True):
    def attack(a):
        session = tf.Session()
        with session.as_default():
            model = RVBCleverhansModel(a)
            adversarial_image = cleverhans_attack_fn(model, session, a)
            adversarial_image = np.squeeze(adversarial_image, axis=0)
            if reset:
                # optionally, reset to ignore other adversarials
                # found during the search
                a._reset()
            # run predictions to make sure the returned adversarial
            # is taken into account
            min_, max_ = a.bounds()
            adversarial_image = np.clip(adversarial_image, min_, max_)
            a.predictions(adversarial_image)
    return attack 
Example 6
Project: neural-fingerprinting   Author: StephanZheng   File: test_runner.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def setUp(self):
        super(TestRunnerMultiGPU, self).setUp()
        self.sess = tf.Session()

        inputs = []
        outputs = []
        self.niter = 10
        niter = self.niter
        # A Simple graph with `niter` sub-graphs.
        with tf.variable_scope(None, 'runner'):
            for i in range(niter):
                v = tf.get_variable('v%d' % i, shape=(100, 10))
                w = tf.get_variable('w%d' % i, shape=(100, 1))

                inputs += [{'v': v, 'w': w}]
                outputs += [{'v': v, 'w': w}]

        self.runner = RunnerMultiGPU(inputs, outputs, sess=self.sess) 
Example 7
Project: neural-fingerprinting   Author: StephanZheng   File: test_dropout.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_drop():
    # Make sure dropout is activated successfully

    # We would like to configure the test to deterministically drop,
    # so that the test does not need to use multiple runs.
    # However, tf.nn.dropout divides by include_prob, so zero or
    # infinitesimal include_prob causes NaNs.
    # 1e-8 does not cause NaNs and shouldn't be a significant source
    # of test flakiness relative to dependency downloads failing, etc.
    model = MLP(input_shape=[1, 1], layers=[Dropout(name='output',
                                                    include_prob=1e-8)])
    x = tf.constant([[1]], dtype=tf.float32)
    y = model.get_layer(x, 'output', dropout=True)
    sess = tf.Session()
    y_value = sess.run(y)
    # Subject to very rare random failure because include_prob is not exact 0
    assert y_value == 0., y_value 
Example 8
Project: neural-fingerprinting   Author: StephanZheng   File: test_dropout.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_override():
    # Make sure dropout_dict changes dropout probabilities successful

    # We would like to configure the test to deterministically drop,
    # so that the test does not need to use multiple runs.
    # However, tf.nn.dropout divides by include_prob, so zero or
    # infinitesimal include_prob causes NaNs.
    # For this test, random failure to drop will not cause the test to fail.
    # The stochastic version should not even run if everything is working
    # right.
    model = MLP(input_shape=[1, 1], layers=[Dropout(name='output',
                                                    include_prob=1e-8)])
    x = tf.constant([[1]], dtype=tf.float32)
    dropout_dict = {'output': 1.}
    y = model.get_layer(x, 'output', dropout=True, dropout_dict=dropout_dict)
    sess = tf.Session()
    y_value = sess.run(y)
    assert y_value == 1., y_value 
Example 9
Project: spleeter   Author: deezer   File: test_separator.py    License: MIT License 6 votes vote down vote up
def test_separate(test_file, configuration, backend):
    """ Test separation from raw data. """
    with tf.Session() as sess:
        instruments = MODEL_TO_INST[configuration]
        adapter = get_default_audio_adapter()
        waveform, _ = adapter.load(test_file)
        separator = Separator(configuration, stft_backend=backend)
        prediction = separator.separate(waveform, test_file)
        assert len(prediction) == len(instruments)
        for instrument in instruments:
            assert instrument in prediction
        for instrument in instruments:
            track = prediction[instrument]
            assert waveform.shape[:-1] == track.shape[:-1]
            assert not np.allclose(waveform, track)
            for compared in instruments:
                if instrument != compared:
                    assert not np.allclose(track, prediction[compared]) 
Example 10
Project: deep-learning-note   Author: wdxtub   File: 16_basic_kernels.py    License: MIT License 6 votes vote down vote up
def main():
    rgb = False
    if rgb:
        kernels_list = [kernels.BLUR_FILTER_RGB,
                        kernels.SHARPEN_FILTER_RGB,
                        kernels.EDGE_FILTER_RGB,
                        kernels.TOP_SOBEL_RGB,
                        kernels.EMBOSS_FILTER_RGB]
    else:
        kernels_list = [kernels.BLUR_FILTER,
                        kernels.SHARPEN_FILTER,
                        kernels.EDGE_FILTER,
                        kernels.TOP_SOBEL,
                        kernels.EMBOSS_FILTER]

    kernels_list = kernels_list[1:]
    image = read_one_image('data/images/naruto.jpeg')
    if not rgb:
        image = tf.image.rgb_to_grayscale(image)
    image = tf.expand_dims(image, 0) # make it into a batch of 1 element
    images = convolve(image, kernels_list, rgb)
    with tf.Session() as sess:
        images = sess.run(images) # convert images from tensors to float values
    show_images(images, rgb) 
Example 11
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    License: MIT License 5 votes vote down vote up
def setup_graph(self, input_audio_batch, target_phrase): 
        batch_size = input_audio_batch.shape[0]
        weird = (input_audio_batch.shape[1] - 1) // 320 
        logits_arg2 = np.tile(weird, batch_size)
        dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
        dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
        
        pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
        seq_len = np.tile(weird, batch_size).astype(np.int32)
        
        with tf.variable_scope('', reuse=tf.AUTO_REUSE):
            
            inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a')
            len_batch = tf.placeholder(tf.float32, name='b')
            arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c')
            arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d')
            arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e')
            len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f')
            
            logits = get_logits(inputs, arg2_logits)
            target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch)
            ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq)
            decoded, _ = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True)
            
            sess = tf.Session()
            saver = tf.train.Saver(tf.global_variables())
            saver.restore(sess, "models/session_dump")
            
        func1 = lambda a, b, c, d, e, f: sess.run(ctcloss, 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        func2 = lambda a, b, c, d, e, f: sess.run([ctcloss, decoded], 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        return (func1, func2) 
Example 12
Project: convseg   Author: chqiwang   File: server.py    License: MIT License 5 votes vote down vote up
def make_app(model_dir):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = 1.0
    config.allow_soft_placement = True
    config.log_device_placement = True
    sess = tf.Session(config=config)
    tagger = Tagger(sess=sess, model_dir=model_dir, scope=TASK.scope, batch_size=200)
    return tornado.web.Application([
        (r"/", MainHandler),
        (r"/%s" % TASK.scope, TaskHandler, {'tagger': tagger})
    ]) 
Example 13
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: loader.py    License: MIT License 5 votes vote down vote up
def load(self, ckpt, ignore):
        meta = ckpt + '.meta'
        with tf.Graph().as_default() as graph:
            with tf.Session().as_default() as sess:
                saver = tf.train.import_meta_graph(meta)
                saver.restore(sess, ckpt)
                for var in tf.global_variables():
                    name = var.name.split(':')[0]
                    packet = [name, var.get_shape().as_list()]
                    self.src_key += [packet]
                    self.vals += [var.eval(sess)] 
Example 14
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: build.py    License: MIT License 5 votes vote down vote up
def setup_meta_ops(self):
		cfg = dict({
			'allow_soft_placement': False,
			'log_device_placement': False
		})

		utility = min(self.FLAGS.gpu, 1.)
		if utility > 0.0:
			self.say('GPU mode with {} usage'.format(utility))
			cfg['gpu_options'] = tf.GPUOptions(
				per_process_gpu_memory_fraction = utility)
			cfg['allow_soft_placement'] = True
		else: 
			self.say('Running entirely on CPU')
			cfg['device_count'] = {'GPU': 0}

		if self.FLAGS.train: self.build_train_op()
		
		if self.FLAGS.summary:
			self.summary_op = tf.summary.merge_all()
			self.writer = tf.summary.FileWriter(self.FLAGS.summary + 'train')
		
		self.sess = tf.Session(config = tf.ConfigProto(**cfg))
		self.sess.run(tf.global_variables_initializer())

		if not self.ntrain: return
		self.saver = tf.train.Saver(tf.global_variables(), 
			max_to_keep = self.FLAGS.keep)
		if self.FLAGS.load != 0: self.load_from_ckpt()
		
		if self.FLAGS.summary:
			self.writer.add_graph(self.sess.graph) 
Example 15
Project: disentangling_conditional_gans   Author: zalandoresearch   File: tfutil.py    License: MIT License 5 votes vote down vote up
def init_tf(config_dict=dict()):
    if tf.get_default_session() is None:
        tf.set_random_seed(np.random.randint(1 << 31))
        create_session(config_dict, force_as_default=True)

#----------------------------------------------------------------------------
# Create tf.Session based on config dict of the form
# {'gpu_options.allow_growth': True} 
Example 16
Project: Adversarial-Face-Attack   Author: ppwwyyxx   File: face_attack.py    License: GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
        from models import inception_resnet_v1  # facenet model
        self.network = inception_resnet_v1

        self.image_batch = tf.placeholder(tf.uint8, shape=[None, 160, 160, 3], name='images')

        image = (tf.cast(self.image_batch, tf.float32) - 127.5) / 128.0
        prelogits, _ = self.network.inference(image, 1.0, False, bottleneck_layer_size=512)
        self.embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        self.sess = tf.Session()
        saver = tf.train.Saver()
        saver.restore(self.sess, 'models/20180402-114759/model-20180402-114759.ckpt-275') 
Example 17
Project: Adversarial-Face-Attack   Author: ppwwyyxx   File: face_attack.py    License: GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
        self.graph = tf.Graph()
        with self.graph.as_default():
            gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
            sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
            with sess.as_default():
                self.pnet, self.rnet, self.onet = FaceDet.create_mtcnn(sess, None) 
Example 18
Project: jiji-with-tensorflow-example   Author: unageanu   File: model.py    License: MIT License 5 votes vote down vote up
def __enter__(self):
        self.session = tf.Session()
        return self 
Example 19
Project: neural-fingerprinting   Author: StephanZheng   File: gen_noisy.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def evaluate_checkpoint(sess,model):
    dataset = 'cifar'

    #with tf.Session() as sess:
    # Iterate over the samples batch-by-batch
    num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
    adv_x_samples=[]
    adv_y_samples=[]
    for ibatch in range(num_batches):
      bstart = ibatch * eval_batch_size
      bend = min(bstart + eval_batch_size, num_eval_examples)

      x_batch = mnist.test.images[bstart:bend,:]
      y_batch = mnist.test.labels[bstart:bend]

      x_batch_adv = attack.perturb(x_batch, y_batch, sess)
      if(ibatch == 0):
          adv_x_samples = x_batch_adv
          adv_y_samples = y_batch
      else:
          adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0)
          adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0)
    if(args.attack == 'xent'):
      atck = 'pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    elif(args.attack == 'cw_pgd'):
      atck = 'cw_pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    else:
      f = open(os.path.join(args.log_dir, "custom.p"), "w")
    pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f)
    f.close() 
Example 20
Project: neural-fingerprinting   Author: StephanZheng   File: gen_whitebox_adv.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def evaluate_checkpoint(sess,model):
    dataset = 'cifar'

    #with tf.Session() as sess:
    # Iterate over the samples batch-by-batch
    num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
    adv_x_samples=[]
    adv_y_samples=[]
    for ibatch in range(num_batches):
      bstart = ibatch * eval_batch_size
      bend = min(bstart + eval_batch_size, num_eval_examples)

      x_batch = mnist.test.images[bstart:bend,:]
      y_batch = mnist.test.labels[bstart:bend]

      x_batch_adv = attack.perturb(x_batch, y_batch, sess)
      if(ibatch == 0):
          adv_x_samples = x_batch_adv
          adv_y_samples = y_batch
      else:
          adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0)
          adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0)
    if(args.attack == 'xent'):
      atck = 'pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    elif(args.attack == 'cw_pgd'):
      atck = 'cw_pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    else:
      f = open(os.path.join(args.log_dir, "custom.p"), "w")
    pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f)
    f.close() 
Example 21
Project: neural-fingerprinting   Author: StephanZheng   File: gen_whitebox_adv.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def evaluate_checkpoint(sess,model):
    dataset = 'mnist'

    #with tf.Session() as sess:
    # Iterate over the samples batch-by-batch
    num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
    adv_x_samples=[]
    adv_y_samples=[]
    for ibatch in range(num_batches):
      bstart = ibatch * eval_batch_size
      bend = min(bstart + eval_batch_size, num_eval_examples)

      x_batch = mnist.test.images[bstart:bend,:]
      y_batch = mnist.test.labels[bstart:bend]

      dict_nat = {model.x_input: x_batch,
                  model.y_input: y_batch}

      x_batch_adv = attack.perturb(x_batch, y_batch, sess)
      if(ibatch == 0):
          adv_x_samples = x_batch_adv
          adv_y_samples = y_batch
      else:
          adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0)
          adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0)
    if(args.attack == 'xent'):
      atck = 'pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    elif(args.attack == 'cw_pgd'):
      atck = 'cw_pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    else:
      f = open(os.path.join(args.log_dir, "custom.p"), "w")
    pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f)
    f.close() 
Example 22
Project: neural-fingerprinting   Author: StephanZheng   File: test_defenses.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_xe(self):
        loss = CrossEntropy(self.model, smoothing=0.)
        l = loss.fprop(self.x, self.y)
        with tf.Session() as sess:
            vl1 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
            vl2 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
        self.assertClose(vl1, sum([2.210599660, 1.53666997]) / 2., atol=1e-6)
        self.assertClose(vl2, sum([2.210599660, 1.53666997]) / 2., atol=1e-6) 
Example 23
Project: neural-fingerprinting   Author: StephanZheng   File: test_defenses.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_xe_smoothing(self):
        loss = CrossEntropy(self.model, smoothing=0.1)
        l = loss.fprop(self.x, self.y)
        with tf.Session() as sess:
            vl1 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
            vl2 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
        self.assertClose(vl1, sum([2.10587597, 1.47194624]) / 2., atol=1e-6)
        self.assertClose(vl2, sum([2.10587597, 1.47194624]) / 2., atol=1e-6) 
Example 24
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils_keras.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        from keras.models import Sequential
        from keras.layers import Dense, Activation
        import tensorflow as tf

        def dummy_model():
            input_shape = (100,)
            return Sequential([Dense(20, name='l1',
                                     input_shape=input_shape),
                               Dense(10, name='l2'),
                               Activation('softmax', name='softmax')])

        self.sess = tf.Session()
        self.sess.as_default()
        self.model = dummy_model() 
Example 25
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_model(self):
        sess = tf.Session()

        # Exception is thrown when model does not have __call__ attribute
        with self.assertRaises(Exception) as context:
            model = tf.placeholder(tf.float32, shape=(None, 10))
            Attack(model, back='tf', sess=sess)
        self.assertTrue(context.exception) 
Example 26
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_parse(self):
        sess = tf.Session()

        test_attack = Attack(Model('model', 10, {}), back='tf', sess=sess)
        self.assertTrue(test_attack.parse_params({})) 
Example 27
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        super(TestVirtualAdversarialMethod, self).setUp()

        self.sess = tf.Session()
        self.sess.as_default()
        self.model = DummyModel()
        self.attack = VirtualAdversarialMethod(self.model, sess=self.sess)

        # initialize model
        with tf.name_scope('dummy_model'):
            self.model(tf.placeholder(tf.float32, shape=(None, 1000)))
        self.sess.run(tf.global_variables_initializer()) 
Example 28
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        super(TestSPSA, self).setUp()

        self.sess = tf.Session()
        self.model = SimpleModel()
        self.attack = SPSA(self.model, sess=self.sess) 
Example 29
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        super(TestBasicIterativeMethod, self).setUp()

        self.sess = tf.Session()
        self.model = SimpleModel()
        self.attack = BasicIterativeMethod(self.model, sess=self.sess) 
Example 30
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        super(TestMomentumIterativeMethod, self).setUp()

        self.sess = tf.Session()
        self.model = SimpleModel()
        self.attack = MomentumIterativeMethod(self.model, sess=self.sess)