Python tensorflow.Session() Examples

The following are code examples for showing how to use tensorflow.Session(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: prediction-constrained-topic-models   Author: dtak   File: slda_utils__diffable_param_manager__tensorflow.py    MIT License 6 votes vote down vote up
def unflatten_to_common_param_dict__tf(
        param_vec=None,
        n_states=1,
        n_labels=1,
        n_vocabs=1,
        **dim_kwargs):
    K = int(n_states)
    V = int(n_vocabs)
    C = int(n_labels) 
    S = K * (V-1) + K * C
    _param_vec = tf.placeholder(shape=[S], dtype=tf.float64)
    _param_dict = _unflatten_to_common_param_dict__tf_graph(
        _param_vec,
        n_states=n_states,
        n_labels=n_labels,
        n_vocabs=n_vocabs,
        )
    sess = tf.Session()
    param_dict = sess.run([_param_dict], feed_dict={_param_vec:param_vec})[0]
    return param_dict 
Example 2
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: build.py    MIT License 6 votes vote down vote up
def savepb(self):
		"""
		Create a standalone const graph def that 
		C++	can load and run.
		"""
		darknet_pb = self.to_darknet()
		flags_pb = self.FLAGS
		flags_pb.verbalise = False
		
		flags_pb.train = False
		# rebuild another tfnet. all const.
		tfnet_pb = TFNet(flags_pb, darknet_pb)		
		tfnet_pb.sess = tf.Session(graph = tfnet_pb.graph)
		# tfnet_pb.predict() # uncomment for unit testing
		name = 'built_graph/{}.pb'.format(self.meta['name'])
		os.makedirs(os.path.dirname(name), exist_ok=True)
		#Save dump of everything in meta
		with open('built_graph/{}.meta'.format(self.meta['name']), 'w') as fp:
			json.dump(self.meta, fp)
		self.say('Saving const graph def to {}'.format(name))
		graph_def = tfnet_pb.sess.graph_def
		tf.train.write_graph(graph_def,'./', name, False) 
Example 3
Project: VAE-MF-TensorFlow   Author: dongwookim-ml   File: movielens_test.py    MIT License 6 votes vote down vote up
def cross_validation():
    M = read_dataset()
    n_fold = 10

    rating_idx = np.array(M.nonzero()).T
    kf = KFold(n_splits=n_fold, random_state=0)

    with tf.Session() as sess:
        model = VAEMF(sess, num_user, num_item,
                      hidden_encoder_dim=hidden_encoder_dim, hidden_decoder_dim=hidden_decoder_dim,
                      latent_dim=latent_dim, output_dim=output_dim, learning_rate=learning_rate, batch_size=batch_size, reg_param=reg_param, one_hot=one_hot)

        for i, (train_idx, test_idx) in enumerate(kf.split(rating_idx)):
            print("{0}/{1} Fold start| Train size={2}, Test size={3}".format(i,
                                                                             n_fold, train_idx.size, test_idx.size))
            model.train(M, train_idx=train_idx,
                        test_idx=test_idx, n_steps=n_steps) 
Example 4
Project: VAE-MF-TensorFlow   Author: dongwookim-ml   File: movielens_vae_test.py    MIT License 6 votes vote down vote up
def cross_validation():
    M = read_dataset()
    n_fold = 10

    rating_idx = np.array(M.nonzero()).T
    kf = KFold(n_splits=n_fold, random_state=0)

    with tf.Session() as sess:
        model = VAEMF(sess, num_user, num_item,
                      hidden_encoder_dim=hidden_encoder_dim, hidden_decoder_dim=hidden_decoder_dim,
                      latent_dim=latent_dim, output_dim=output_dim, learning_rate=learning_rate, batch_size=batch_size, reg_param=reg_param)

        for i, (train_idx, test_idx) in enumerate(kf.split(rating_idx)):
            print("{0}/{1} Fold start| Train size={2}, Test size={3}".format(i,
                                                                             n_fold, train_idx.size, test_idx.size))
            model.train(M, train_idx=train_idx,
                        test_idx=test_idx, n_steps=n_steps) 
Example 5
Project: Automated-Social-Annotation   Author: acadTags   File: SVM.py    MIT License 6 votes vote down vote up
def get_embedded_words(dataX,word_embedding_final,vocab_size):
    input_x = tf.placeholder(tf.int32, [None, FLAGS.sequence_length], name="input_x")  # X
    word_embedding = tf.constant(word_embedding_final, dtype=tf.float32)  # convert to tensor
    #with tf.variable_scope("embedding", reuse=tf.AUTO_REUSE):
    #    Embedding = tf.get_variable("Embedding",shape=[vocab_size, embed_size])
    #t_assign_embedding = tf.assign(Embedding,word_embedding)  # assign this value to our embedding variables of our model.
    embedded_words = tf.nn.embedding_lookup(word_embedding,input_x) #shape:[None,sentence_length,embed_size]
    # concatenating all embedding
    #embedded_words_reshaped = tf.reshape(embedded_words, shape=[len(testX),-1])  #
    # use averaged embedding
    embedded_words_reshaped = tf.reduce_mean(embedded_words, axis=1)
    
    #config = tf.ConfigProto(
    #    device_count = {'GPU': 0} # this enforce the program to run on CPU only.
    #)
    #sess = tf.Session(config=config)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    feed_dict = {input_x: dataX[:]}
    #sess.run(t_assign_embedding)
    embedded_words = sess.run(embedded_words, feed_dict)
    embedded_words_mat = sess.run(embedded_words_reshaped, feed_dict) 
    #print(embedded_words_mat.shape)
    return embedded_words_mat 
Example 6
Project: disentangling_conditional_gans   Author: zalandoresearch   File: tfutil.py    MIT License 6 votes vote down vote up
def create_session(config_dict=dict(), force_as_default=False):
    config = tf.ConfigProto()
    for key, value in config_dict.items():
        fields = key.split('.')
        obj = config
        for field in fields[:-1]:
            obj = getattr(obj, field)
        setattr(obj, fields[-1], value)
    session = tf.Session(config=config)
    if force_as_default:
        session._default_session = session.as_default()
        session._default_session.enforce_nesting = False
        session._default_session.__enter__()
    return session

#----------------------------------------------------------------------------
# Initialize all tf.Variables that have not already been initialized.
# Equivalent to the following, but more efficient and does not bloat the tf graph:
#   tf.variables_initializer(tf.report_unitialized_variables()).run() 
Example 7
Project: Electrolyte_Analysis_FTIR   Author: Samuel-Buteau   File: Constant_run.py    MIT License 6 votes vote down vote up
def initialize_session(logdir, seed=None):
    """Create a session and saver initialized from a checkpoint if found."""
    if not seed ==0:
        numpy.random.seed(seed=seed)



    config = tf.ConfigProto(

    )
    # config.gpu_options.allow_growth = True
    logdir = os.path.expanduser(logdir)
    checkpoint = tf.train.latest_checkpoint(logdir)
    saver = tf.train.Saver()
    with tf.Session(config=config) as sess:
        if checkpoint:
            print('Load checkpoint {}.'.format(checkpoint))
            saver.restore(sess, checkpoint)
        else:
            print('Initialize new model.')
            os.makedirs(logdir, exist_ok=True)
            sess.run(tf.global_variables_initializer())
        yield sess, saver 
Example 8
Project: fbpconv_tf   Author: panakino   File: unet.py    GNU General Public License v3.0 6 votes vote down vote up
def predict(self, model_path, x_test):
        """
        Uses the model to create a prediction for the given data

        :param model_path: path to the model checkpoint to restore
        :param x_test: Data to predict on. Shape [n, nx, ny, channels]
        :returns prediction: The unet prediction Shape [n, px, py, labels] (px=nx-self.offset/2)
        """

        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            # Initialize variables
            sess.run(init)

            # Restore model weights from previously saved model
            self.restore(sess, model_path)

            y_dummy = np.empty((x_test.shape[0], x_test.shape[1], x_test.shape[2], self.n_class))
            prediction = sess.run(self.predicter, feed_dict={self.x: x_test, self.y: y_dummy, self.keep_prob: 1.})

        return prediction 
Example 9
Project: neural-fingerprinting   Author: StephanZheng   File: test_defenses.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_feature_pairing(self):
        fgsm = FastGradientMethod(self.model)
        attack = lambda x: fgsm.generate(x)
        loss = FeaturePairing(self.model, weight=0.1, attack=attack)
        l = loss.fprop(self.x, self.y)
        with tf.Session() as sess:
            vl1 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
            vl2 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
        self.assertClose(vl1, sum([4.296023369, 2.963884830]) / 2., atol=1e-6)
        self.assertClose(vl2, sum([4.296023369, 2.963884830]) / 2., atol=1e-6)

        loss = FeaturePairing(self.model, weight=10., attack=attack)
        l = loss.fprop(self.x, self.y)
        with tf.Session() as sess:
            vl1 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
            vl2 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
        self.assertClose(vl1, sum([4.333082676, 3.00094414]) / 2., atol=1e-6)
        self.assertClose(vl2, sum([4.333082676, 3.00094414]) / 2., atol=1e-6) 
Example 10
Project: neural-fingerprinting   Author: StephanZheng   File: enjoy-adv.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, env, dueling, noisy, fname):
        self.g = tf.Graph()
        self.noisy = noisy
        self.dueling = dueling
        self.env = env
        with self.g.as_default():
            self.act = deepq.build_act_enjoy(
                make_obs_ph=lambda name: U.Uint8Input(
                    env.observation_space.shape, name=name),
                q_func=dueling_model if dueling else model,
                num_actions=env.action_space.n,
                noisy=noisy
            )
            self.saver = tf.train.Saver()
        self.sess = tf.Session(graph=self.g)

        if fname is not None:
            print('Loading Model...')
            self.saver.restore(self.sess, fname) 
Example 11
Project: neural-fingerprinting   Author: StephanZheng   File: utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def cleverhans_attack_wrapper(cleverhans_attack_fn, reset=True):
    def attack(a):
        session = tf.Session()
        with session.as_default():
            model = RVBCleverhansModel(a)
            adversarial_image = cleverhans_attack_fn(model, session, a)
            adversarial_image = np.squeeze(adversarial_image, axis=0)
            if reset:
                # optionally, reset to ignore other adversarials
                # found during the search
                a._reset()
            # run predictions to make sure the returned adversarial
            # is taken into account
            min_, max_ = a.bounds()
            adversarial_image = np.clip(adversarial_image, min_, max_)
            a.predictions(adversarial_image)
    return attack 
Example 12
Project: neural-fingerprinting   Author: StephanZheng   File: test_runner.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def setUp(self):
        super(TestRunnerMultiGPU, self).setUp()
        self.sess = tf.Session()

        inputs = []
        outputs = []
        self.niter = 10
        niter = self.niter
        # A Simple graph with `niter` sub-graphs.
        with tf.variable_scope(None, 'runner'):
            for i in range(niter):
                v = tf.get_variable('v%d' % i, shape=(100, 10))
                w = tf.get_variable('w%d' % i, shape=(100, 1))

                inputs += [{'v': v, 'w': w}]
                outputs += [{'v': v, 'w': w}]

        self.runner = RunnerMultiGPU(inputs, outputs, sess=self.sess) 
Example 13
Project: neural-fingerprinting   Author: StephanZheng   File: test_dropout.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_drop():
    # Make sure dropout is activated successfully

    # We would like to configure the test to deterministically drop,
    # so that the test does not need to use multiple runs.
    # However, tf.nn.dropout divides by include_prob, so zero or
    # infinitesimal include_prob causes NaNs.
    # 1e-8 does not cause NaNs and shouldn't be a significant source
    # of test flakiness relative to dependency downloads failing, etc.
    model = MLP(input_shape=[1, 1], layers=[Dropout(name='output',
                                                    include_prob=1e-8)])
    x = tf.constant([[1]], dtype=tf.float32)
    y = model.get_layer(x, 'output', dropout=True)
    sess = tf.Session()
    y_value = sess.run(y)
    # Subject to very rare random failure because include_prob is not exact 0
    assert y_value == 0., y_value 
Example 14
Project: neural-fingerprinting   Author: StephanZheng   File: test_dropout.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_override():
    # Make sure dropout_dict changes dropout probabilities successful

    # We would like to configure the test to deterministically drop,
    # so that the test does not need to use multiple runs.
    # However, tf.nn.dropout divides by include_prob, so zero or
    # infinitesimal include_prob causes NaNs.
    # For this test, random failure to drop will not cause the test to fail.
    # The stochastic version should not even run if everything is working
    # right.
    model = MLP(input_shape=[1, 1], layers=[Dropout(name='output',
                                                    include_prob=1e-8)])
    x = tf.constant([[1]], dtype=tf.float32)
    dropout_dict = {'output': 1.}
    y = model.get_layer(x, 'output', dropout=True, dropout_dict=dropout_dict)
    sess = tf.Session()
    y_value = sess.run(y)
    assert y_value == 1., y_value 
Example 15
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def setup_graph(self, input_audio_batch, target_phrase): 
        batch_size = input_audio_batch.shape[0]
        weird = (input_audio_batch.shape[1] - 1) // 320 
        logits_arg2 = np.tile(weird, batch_size)
        dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
        dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
        
        pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
        seq_len = np.tile(weird, batch_size).astype(np.int32)
        
        with tf.variable_scope('', reuse=tf.AUTO_REUSE):
            
            inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a')
            len_batch = tf.placeholder(tf.float32, name='b')
            arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c')
            arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d')
            arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e')
            len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f')
            
            logits = get_logits(inputs, arg2_logits)
            target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch)
            ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq)
            decoded, _ = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True)
            
            sess = tf.Session()
            saver = tf.train.Saver(tf.global_variables())
            saver.restore(sess, "models/session_dump")
            
        func1 = lambda a, b, c, d, e, f: sess.run(ctcloss, 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        func2 = lambda a, b, c, d, e, f: sess.run([ctcloss, decoded], 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        return (func1, func2) 
Example 16
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_seg_nowubi.py    MIT License 5 votes vote down vote up
def seg_sequence(graph, tX_char, tX_pinyin, batch_size):
    inp_char_op = graph.get_operation_by_name("prefix/input_placeholder_char")
    inp_char = inp_char_op.outputs[0]

    inp_pinyin_op = graph.get_operation_by_name("prefix/input_placeholder_pinyin")
    inp_pinyin = inp_pinyin_op.outputs[0]


    trans_node = graph.get_operation_by_name("prefix/transitions")
    transitions = trans_node.outputs[0]

    label_node = graph.get_operation_by_name("prefix/Reshape_11")
    label_scores = label_node.outputs[0]

    results = []
    with tf.Session(graph = graph) as sess:
        totalLen = tX_char.shape[0]
        numBatch = int((totalLen - 1) / batch_size) + 1

        for i in xrange(numBatch):
            endOff = (i + 1) * batch_size
            if endOff > totalLen:
                endOff = totalLen

            feed_dict = {inp_char : tX_char[i * batch_size : endOff], inp_pinyin : tX_pinyin[i * batch_size : endOff]}
            unary_scores, transMatrix = sess.run(
              [label_scores, transitions], feed_dict)

            for unary_score in unary_scores:
                viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                  unary_score, transMatrix)

                results.append(viterbi_sequence)

    return results 
Example 17
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_seg_nopy.py    MIT License 5 votes vote down vote up
def seg_sequence(graph, tX_char, tX_pinyin, tX_wubi, batch_size):
    inp_char_op = graph.get_operation_by_name("prefix/input_placeholder_char")
    inp_char = inp_char_op.outputs[0]

    inp_pinyin_op = graph.get_operation_by_name("prefix/input_placeholder_pinyin")
    inp_pinyin = inp_pinyin_op.outputs[0]

    inp_wubi_op = graph.get_operation_by_name("prefix/input_placeholder_wubi")
    inp_wubi = inp_wubi_op.outputs[0]

    trans_node = graph.get_operation_by_name("prefix/transitions")
    transitions = trans_node.outputs[0]

    label_node = graph.get_operation_by_name("prefix/Reshape_11")
    label_scores = label_node.outputs[0]

    results = []
    with tf.Session(graph = graph) as sess:
        totalLen = tX_char.shape[0]
        numBatch = int((totalLen - 1) / batch_size) + 1

        for i in xrange(numBatch):
            endOff = (i + 1) * batch_size
            if endOff > totalLen:
                endOff = totalLen

            feed_dict = {inp_char : tX_char[i * batch_size : endOff], inp_pinyin : tX_pinyin[i * batch_size : endOff], inp_wubi : tX_wubi[i * batch_size : endOff]}
            unary_scores, transMatrix = sess.run(
              [label_scores, transitions], feed_dict)

            for unary_score in unary_scores:
                viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                  unary_score, transMatrix)

                results.append(viterbi_sequence)

    return results 
Example 18
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm_crf_seg_nowubi.py    MIT License 5 votes vote down vote up
def seg_sequence(graph, tX_char, tX_pinyin, batch_size):
    inp_char_op = graph.get_operation_by_name("prefix/input_placeholder_char")
    inp_char = inp_char_op.outputs[0]

    inp_pinyin_op = graph.get_operation_by_name("prefix/input_placeholder_pinyin")
    inp_pinyin = inp_pinyin_op.outputs[0]


    trans_node = graph.get_operation_by_name("prefix/transitions")
    transitions = trans_node.outputs[0]

    label_node = graph.get_operation_by_name("prefix/Reshape_11")
    label_scores = label_node.outputs[0]

    results = []
    with tf.Session(graph = graph) as sess:
        totalLen = tX_char.shape[0]
        numBatch = int((totalLen - 1) / batch_size) + 1

        for i in xrange(numBatch):
            endOff = (i + 1) * batch_size
            if endOff > totalLen:
                endOff = totalLen

            feed_dict = {inp_char : tX_char[i * batch_size : endOff], inp_pinyin : tX_pinyin[i * batch_size : endOff]}
            unary_scores, transMatrix = sess.run(
              [label_scores, transitions], feed_dict)

            for unary_score in unary_scores:
                viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                  unary_score, transMatrix)

                results.append(viterbi_sequence)

    return results 
Example 19
Project: multi-embedding-cws   Author: wangjksjtu   File: crf_seg.py    MIT License 5 votes vote down vote up
def seg_sequence(graph, tX_char, tX_pinyin, tX_wubi, batch_size):
    inp_char_op = graph.get_operation_by_name("prefix/input_placeholder_char")
    inp_char = inp_char_op.outputs[0]

    inp_pinyin_op = graph.get_operation_by_name("prefix/input_placeholder_pinyin")
    inp_pinyin = inp_pinyin_op.outputs[0]

    inp_wubi_op = graph.get_operation_by_name("prefix/input_placeholder_wubi")
    inp_wubi = inp_wubi_op.outputs[0]

    trans_node = graph.get_operation_by_name("prefix/transitions")
    transitions = trans_node.outputs[0]

    label_node = graph.get_operation_by_name("prefix/Reshape_11")
    label_scores = label_node.outputs[0]

    results = []
    with tf.Session(graph = graph) as sess:
        totalLen = tX_char.shape[0]
        numBatch = int((totalLen - 1) / batch_size) + 1

        for i in xrange(numBatch):
            endOff = (i + 1) * batch_size
            if endOff > totalLen:
                endOff = totalLen

            feed_dict = {inp_char : tX_char[i * batch_size : endOff], inp_pinyin : tX_pinyin[i * batch_size : endOff], inp_wubi : tX_wubi[i * batch_size : endOff]}
            unary_scores, transMatrix = sess.run(
              [label_scores, transitions], feed_dict)

            for unary_score in unary_scores:
                viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                  unary_score, transMatrix)

                results.append(viterbi_sequence)

    return results 
Example 20
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm_crf_seg.py    MIT License 5 votes vote down vote up
def seg_sequence(graph, tX_char, tX_pinyin, tX_wubi, batch_size):
    inp_char_op = graph.get_operation_by_name("prefix/input_placeholder_char")
    inp_char = inp_char_op.outputs[0]

    inp_pinyin_op = graph.get_operation_by_name("prefix/input_placeholder_pinyin")
    inp_pinyin = inp_pinyin_op.outputs[0]

    inp_wubi_op = graph.get_operation_by_name("prefix/input_placeholder_wubi")
    inp_wubi = inp_wubi_op.outputs[0]

    trans_node = graph.get_operation_by_name("prefix/transitions")
    transitions = trans_node.outputs[0]

    label_node = graph.get_operation_by_name("prefix/Reshape_11")
    label_scores = label_node.outputs[0]

    results = []
    with tf.Session(graph = graph) as sess:
        totalLen = tX_char.shape[0]
        numBatch = int((totalLen - 1) / batch_size) + 1

        for i in xrange(numBatch):
            endOff = (i + 1) * batch_size
            if endOff > totalLen:
                endOff = totalLen

            feed_dict = {inp_char : tX_char[i * batch_size : endOff], inp_pinyin : tX_pinyin[i * batch_size : endOff], inp_wubi : tX_wubi[i * batch_size : endOff]}
            unary_scores, transMatrix = sess.run(
              [label_scores, transitions], feed_dict)

            for unary_score in unary_scores:
                viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                  unary_score, transMatrix)

                results.append(viterbi_sequence)

    return results 
Example 21
Project: SyNEThesia   Author: RunOrVeith   File: session_management.py    MIT License 5 votes vote down vote up
def __enter__(self):
        self.model.initialize()
        # TODO allow a debug session instead
        session = tf.Session().__enter__()
        summary_writer = tf.summary.FileWriter(self.log_dir)
        saver = tf.train.Saver(max_to_keep=self.max_saves_to_keep)
        self._session = session
        self._saver = saver
        self._summary_writer = summary_writer
        return self 
Example 22
Project: meta-transfer-learning   Author: erfaneshrati   File: run_miniimagenet.py    MIT License 5 votes vote down vote up
def main():
    """
    Load data and train a model on it.
    """
    args = argument_parser().parse_args()
    os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)

    random.seed(args.seed)

    train_set, val_set, test_set = read_dataset(DATA_DIR)
    if args.metatransfer:
        model = MiniImageNetMetaTransferModel(args.classes, **model_kwargs(args))
    else:
        model = MiniImageNetModel(args.classes, **model_kwargs(args))
    config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        if not args.pretrained:
            print('Training...')
            train(sess, model, train_set, test_set, args.checkpoint, **train_kwargs(args))
        else:
            print('Restoring from checkpoint...')
            tf.train.Saver().restore(sess, tf.train.latest_checkpoint(args.checkpoint))

        print('Evaluating...')
        eval_kwargs = evaluate_kwargs(args)
#        print('Train accuracy: ' + str(evaluate(sess, model, train_set, **eval_kwargs)))
#        print('Validation accuracy: ' + str(evaluate(sess, model, val_set, **eval_kwargs)))
        print('Test accuracy: ' + str(evaluate(sess, model, test_set, **eval_kwargs))) 
Example 23
Project: convseg   Author: chqiwang   File: server.py    MIT License 5 votes vote down vote up
def make_app(model_dir):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = 1.0
    config.allow_soft_placement = True
    config.log_device_placement = True
    sess = tf.Session(config=config)
    tagger = Tagger(sess=sess, model_dir=model_dir, scope=TASK.scope, batch_size=200)
    return tornado.web.Application([
        (r"/", MainHandler),
        (r"/%s" % TASK.scope, TaskHandler, {'tagger': tagger})
    ]) 
Example 24
Project: AutoDL   Author: tanguofu   File: test_custom_pyop.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_covert():
    shape = tf.TensorShape([1,2,3,4])
    tensor = tf.convert_to_tensor(shape)
    with tf.Session() as sess:
       v = sess.run(tensor)
       print(v) 
Example 25
Project: AutoDL   Author: tanguofu   File: test_custom_pyop.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_pack():
    x = tf.placeholder(tf.int32, shape=(4,))
    shapepack = tf.py_func(struct_pack, [x],tf.string)
    with tf.Session() as sess:
       v = sess.run(shapepack, feed_dict={x:[1,2,3,4]})
       print((type(v),v)) 
Example 26
Project: AutoDL   Author: tanguofu   File: test_custom_pyop.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_unpack():
    z = tf.placeholder(tf.string, shape=())
    shapeunpack = tf.py_func(struct_unpack, [z], tf.int32)
    
    v = b'\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00'
    with tf.Session() as sess:
       v = sess.run([shapeunpack], feed_dict={z:v})
       print(v) 
Example 27
Project: AutoDL   Author: tanguofu   File: learning_rate.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_PiecewiseLR():
    global_step = tf.train.get_or_create_global_step()
    allsample = 500
    minibatchsize = 128
    lr = PiecewiseLR(allsample, minibatchsize, global_step)
    updater = tf.assign(global_step, global_step + 1)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(300):
           print(sess.run([global_step, lr]))
           sess.run([updater]) 
Example 28
Project: AutoDL   Author: tanguofu   File: inspect_graph_shape.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def infernce_graph_shape(graph_def_f, shape_str = None):
    graph_name = graph_def_f.split('/')[-1]
   
    graph = restore.load_graph_def(graph_def_f)
    save_graph_visual(graph, graph_name)
    
    if not shape_str:
       return graph
   
    with tf.Session() as sess:
        tensor_shape = [int(i) for i in shape_str.split(',')]
        tensor_value = np.random.rand(*tuple(tensor_shape))
      
        input_tensor = getStartTensor(sess.graph)
        for i in input_tensor:
           i.set_shape(tensor_shape)
        starts = {i : tensor_value for i in input_tensor}
        shape_map =  {i.name : shape_str for i in input_tensor}
        shape_map = getShapeMap(sess, starts, shape_map)

        gv_nodes = []
        gv_edges = []     
        for o in sess.graph.get_operations():
           gv_nodes += [(o.name, {'label':getOperLabel(sess, o)})]
           gv_edges += [((i.op.name, o.name), {'label': str(shape_map.get(i.name, [i.name]))}) for i in o.inputs ]
    
        graphviz_visual(gv_nodes, gv_edges, "shape_" + graph_name)
    return None 
Example 29
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: loader.py    MIT License 5 votes vote down vote up
def load(self, ckpt, ignore):
        meta = ckpt + '.meta'
        with tf.Graph().as_default() as graph:
            with tf.Session().as_default() as sess:
                saver = tf.train.import_meta_graph(meta)
                saver.restore(sess, ckpt)
                for var in tf.global_variables():
                    name = var.name.split(':')[0]
                    packet = [name, var.get_shape().as_list()]
                    self.src_key += [packet]
                    self.vals += [var.eval(sess)] 
Example 30
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: build.py    MIT License 5 votes vote down vote up
def setup_meta_ops(self):
		cfg = dict({
			'allow_soft_placement': False,
			'log_device_placement': False
		})

		utility = min(self.FLAGS.gpu, 1.)
		if utility > 0.0:
			self.say('GPU mode with {} usage'.format(utility))
			cfg['gpu_options'] = tf.GPUOptions(
				per_process_gpu_memory_fraction = utility)
			cfg['allow_soft_placement'] = True
		else: 
			self.say('Running entirely on CPU')
			cfg['device_count'] = {'GPU': 0}

		if self.FLAGS.train: self.build_train_op()
		
		if self.FLAGS.summary:
			self.summary_op = tf.summary.merge_all()
			self.writer = tf.summary.FileWriter(self.FLAGS.summary + 'train')
		
		self.sess = tf.Session(config = tf.ConfigProto(**cfg))
		self.sess.run(tf.global_variables_initializer())

		if not self.ntrain: return
		self.saver = tf.train.Saver(tf.global_variables(), 
			max_to_keep = self.FLAGS.keep)
		if self.FLAGS.load != 0: self.load_from_ckpt()
		
		if self.FLAGS.summary:
			self.writer.add_graph(self.sess.graph) 
Example 31
Project: VAE-MF-TensorFlow   Author: dongwookim-ml   File: movielens_test.py    MIT License 5 votes vote down vote up
def train():
    M = read_dataset()

    num_rating = np.count_nonzero(M)
    idx = np.arange(num_rating)
    np.random.seed(0)
    np.random.shuffle(idx)

    train_idx = idx[:int(0.8 * num_rating)]
    valid_idx = idx[int(0.8 * num_rating):int(0.9 * num_rating)]
    test_idx = idx[int(0.9 * num_rating):]

    result_path = "{0}_{1}_{2}_{3}_{4}_{5}_{6}_{7}".format(
        hidden_encoder_dim, hidden_decoder_dim, latent_dim, output_dim, learning_rate, batch_size, reg_param, one_hot)
    if not os.path.exists(result_path + "/model.ckpt.index"):
        with tf.Session() as sess:
            model = VAEMF(sess, num_user, num_item,
                          hidden_encoder_dim=hidden_encoder_dim, hidden_decoder_dim=hidden_decoder_dim,
                          latent_dim=latent_dim, output_dim=output_dim, learning_rate=learning_rate, batch_size=batch_size, reg_param=reg_param, one_hot=one_hot)
            print("Train size={0}, Validation size={1}, Test size={2}".format(
                train_idx.size, valid_idx.size, test_idx.size))
            best_mse, best_mae = model.train_test_validation(
                M, train_idx=train_idx, test_idx=test_idx, valid_idx=valid_idx, n_steps=n_steps, result_path=result_path)

            print("Best MSE = {0}, best MAE = {1}".format(
                best_mse, best_mae)) 
Example 32
Project: VAE-MF-TensorFlow   Author: dongwookim-ml   File: movielens_test.py    MIT License 5 votes vote down vote up
def train_test_validation():
    M = read_dataset()

    num_rating = np.count_nonzero(M)
    idx = np.arange(num_rating)
    np.random.seed(0)
    np.random.shuffle(idx)

    train_idx = idx[:int(0.8 * num_rating)]
    valid_idx = idx[int(0.8 * num_rating):int(0.9 * num_rating)]
    test_idx = idx[int(0.9 * num_rating):]

    for hidden_encoder_dim, hidden_decoder_dim, latent_dim, output_dim, learning_rate, batch_size, reg_param, one_hot in itertools.product(hedims, hddims, ldims, odims, lrates, bsizes, regs, one_hots):
        result_path = "{0}_{1}_{2}_{3}_{4}_{5}_{6}_{7}".format(
            hidden_encoder_dim, hidden_decoder_dim, latent_dim, output_dim, learning_rate, batch_size, reg_param, one_hot)
        if not os.path.exists(result_path + "/model.ckpt.index"):
            with tf.Session() as sess:
                model = VAEMF(sess, num_user, num_item,
                              hidden_encoder_dim=hidden_encoder_dim, hidden_decoder_dim=hidden_decoder_dim,
                              latent_dim=latent_dim, output_dim=output_dim, learning_rate=learning_rate, batch_size=batch_size, reg_param=reg_param, one_hot=one_hot)
                print("Train size={0}, Validation size={1}, Test size={2}".format(
                    train_idx.size, valid_idx.size, test_idx.size))
                best_mse, best_mae = model.train_test_validation(
                    M, train_idx=train_idx, test_idx=test_idx, valid_idx=valid_idx, n_steps=n_steps, result_path=result_path)

                print("Best MSE = {0}, best MAE = {1}".format(
                    best_mse, best_mae))

                with open('result.csv', 'a') as f:
                    f.write("{0},{1},{2},{3},{4},{5},{6},{7},{8},{9}\n".format(hidden_encoder_dim, hidden_decoder_dim,
                                                                               latent_dim, output_dim, learning_rate, batch_size, reg_param, one_hot, best_mse, best_mae))

        tf.reset_default_graph() 
Example 33
Project: VAE-MF-TensorFlow   Author: dongwookim-ml   File: movielens_vae_test.py    MIT License 5 votes vote down vote up
def train_test_validation():
    M = read_dataset()

    num_rating = np.count_nonzero(M)
    idx = np.arange(num_rating)
    np.random.seed(1)
    np.random.shuffle(idx)

    train_idx = idx[:int(0.85 * num_rating)]
    valid_idx = idx[int(0.85 * num_rating):int(0.90 * num_rating)]
    test_idx = idx[int(0.90 * num_rating):]

    for hidden_encoder_dim, hidden_decoder_dim, latent_dim, learning_rate, batch_size, reg_param, vae in itertools.product(hedims, hddims, ldims, lrates, bsizes, regs, vaes):
        result_path = "{0}_{1}_{2}_{3}_{4}_{5}_{6}".format(
            hidden_encoder_dim, hidden_decoder_dim, latent_dim, learning_rate, batch_size, reg_param, vae)
        if not os.path.exists(result_path + "/model.ckpt.index"):
            config = tf.ConfigProto()
            config.gpu_options.allow_growth=True
            with tf.Session(config=config) as sess:
                model = VAEMF(sess, num_user, num_item,
                              hidden_encoder_dim=hidden_encoder_dim, hidden_decoder_dim=hidden_decoder_dim,
                              latent_dim=latent_dim, learning_rate=learning_rate, batch_size=batch_size, reg_param=reg_param, vae=vae)
                print("Train size={0}, Validation size={1}, Test size={2}".format(
                    train_idx.size, valid_idx.size, test_idx.size))
                print(result_path)
                best_rmse = model.train_test_validation(M, train_idx=train_idx, test_idx=test_idx, valid_idx=valid_idx, n_steps=n_steps, result_path=result_path)

                print("Best MSE = {0}".format(best_rmse))

                with open('result.csv', 'a') as f:
                    f.write("{0},{1},{2},{3},{4},{5},{6},{7}\n".format(hidden_encoder_dim, hidden_decoder_dim,
                                                                               latent_dim, learning_rate, batch_size, reg_param, vae, best_rmse))

        tf.reset_default_graph() 
Example 34
Project: disentangling_conditional_gans   Author: zalandoresearch   File: tfutil.py    MIT License 5 votes vote down vote up
def init_tf(config_dict=dict()):
    if tf.get_default_session() is None:
        tf.set_random_seed(np.random.randint(1 << 31))
        create_session(config_dict, force_as_default=True)

#----------------------------------------------------------------------------
# Create tf.Session based on config dict of the form
# {'gpu_options.allow_growth': True} 
Example 35
Project: Adversarial-Face-Attack   Author: ppwwyyxx   File: face_attack.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
        from models import inception_resnet_v1  # facenet model
        self.network = inception_resnet_v1

        self.image_batch = tf.placeholder(tf.uint8, shape=[None, 160, 160, 3], name='images')

        image = (tf.cast(self.image_batch, tf.float32) - 127.5) / 128.0
        prelogits, _ = self.network.inference(image, 1.0, False, bottleneck_layer_size=512)
        self.embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        self.sess = tf.Session()
        saver = tf.train.Saver()
        saver.restore(self.sess, 'models/20180402-114759/model-20180402-114759.ckpt-275') 
Example 36
Project: Adversarial-Face-Attack   Author: ppwwyyxx   File: face_attack.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
        self.graph = tf.Graph()
        with self.graph.as_default():
            gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
            sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
            with sess.as_default():
                self.pnet, self.rnet, self.onet = FaceDet.create_mtcnn(sess, None) 
Example 37
Project: jiji-with-tensorflow-example   Author: unageanu   File: model.py    MIT License 5 votes vote down vote up
def __enter__(self):
        self.session = tf.Session()
        return self 
Example 38
Project: neural-fingerprinting   Author: StephanZheng   File: gen_noisy.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def evaluate_checkpoint(sess,model):
    dataset = 'cifar'

    #with tf.Session() as sess:
    # Iterate over the samples batch-by-batch
    num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
    adv_x_samples=[]
    adv_y_samples=[]
    for ibatch in range(num_batches):
      bstart = ibatch * eval_batch_size
      bend = min(bstart + eval_batch_size, num_eval_examples)

      x_batch = mnist.test.images[bstart:bend,:]
      y_batch = mnist.test.labels[bstart:bend]

      x_batch_adv = attack.perturb(x_batch, y_batch, sess)
      if(ibatch == 0):
          adv_x_samples = x_batch_adv
          adv_y_samples = y_batch
      else:
          adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0)
          adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0)
    if(args.attack == 'xent'):
      atck = 'pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    elif(args.attack == 'cw_pgd'):
      atck = 'cw_pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    else:
      f = open(os.path.join(args.log_dir, "custom.p"), "w")
    pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f)
    f.close() 
Example 39
Project: neural-fingerprinting   Author: StephanZheng   File: gen_whitebox_adv.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def evaluate_checkpoint(sess,model):
    dataset = 'cifar'

    #with tf.Session() as sess:
    # Iterate over the samples batch-by-batch
    num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
    adv_x_samples=[]
    adv_y_samples=[]
    for ibatch in range(num_batches):
      bstart = ibatch * eval_batch_size
      bend = min(bstart + eval_batch_size, num_eval_examples)

      x_batch = mnist.test.images[bstart:bend,:]
      y_batch = mnist.test.labels[bstart:bend]

      x_batch_adv = attack.perturb(x_batch, y_batch, sess)
      if(ibatch == 0):
          adv_x_samples = x_batch_adv
          adv_y_samples = y_batch
      else:
          adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0)
          adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0)
    if(args.attack == 'xent'):
      atck = 'pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    elif(args.attack == 'cw_pgd'):
      atck = 'cw_pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    else:
      f = open(os.path.join(args.log_dir, "custom.p"), "w")
    pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f)
    f.close() 
Example 40
Project: neural-fingerprinting   Author: StephanZheng   File: gen_whitebox_adv.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def evaluate_checkpoint(sess,model):
    dataset = 'mnist'

    #with tf.Session() as sess:
    # Iterate over the samples batch-by-batch
    num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
    adv_x_samples=[]
    adv_y_samples=[]
    for ibatch in range(num_batches):
      bstart = ibatch * eval_batch_size
      bend = min(bstart + eval_batch_size, num_eval_examples)

      x_batch = mnist.test.images[bstart:bend,:]
      y_batch = mnist.test.labels[bstart:bend]

      dict_nat = {model.x_input: x_batch,
                  model.y_input: y_batch}

      x_batch_adv = attack.perturb(x_batch, y_batch, sess)
      if(ibatch == 0):
          adv_x_samples = x_batch_adv
          adv_y_samples = y_batch
      else:
          adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0)
          adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0)
    if(args.attack == 'xent'):
      atck = 'pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    elif(args.attack == 'cw_pgd'):
      atck = 'cw_pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    else:
      f = open(os.path.join(args.log_dir, "custom.p"), "w")
    pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f)
    f.close() 
Example 41
Project: neural-fingerprinting   Author: StephanZheng   File: test_defenses.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_xe(self):
        loss = CrossEntropy(self.model, smoothing=0.)
        l = loss.fprop(self.x, self.y)
        with tf.Session() as sess:
            vl1 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
            vl2 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
        self.assertClose(vl1, sum([2.210599660, 1.53666997]) / 2., atol=1e-6)
        self.assertClose(vl2, sum([2.210599660, 1.53666997]) / 2., atol=1e-6) 
Example 42
Project: neural-fingerprinting   Author: StephanZheng   File: test_defenses.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_xe_smoothing(self):
        loss = CrossEntropy(self.model, smoothing=0.1)
        l = loss.fprop(self.x, self.y)
        with tf.Session() as sess:
            vl1 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
            vl2 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
        self.assertClose(vl1, sum([2.10587597, 1.47194624]) / 2., atol=1e-6)
        self.assertClose(vl2, sum([2.10587597, 1.47194624]) / 2., atol=1e-6) 
Example 43
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils_keras.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        from keras.models import Sequential
        from keras.layers import Dense, Activation
        import tensorflow as tf

        def dummy_model():
            input_shape = (100,)
            return Sequential([Dense(20, name='l1',
                                     input_shape=input_shape),
                               Dense(10, name='l2'),
                               Activation('softmax', name='softmax')])

        self.sess = tf.Session()
        self.sess.as_default()
        self.model = dummy_model() 
Example 44
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_model(self):
        sess = tf.Session()

        # Exception is thrown when model does not have __call__ attribute
        with self.assertRaises(Exception) as context:
            model = tf.placeholder(tf.float32, shape=(None, 10))
            Attack(model, back='tf', sess=sess)
        self.assertTrue(context.exception) 
Example 45
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_parse(self):
        sess = tf.Session()

        test_attack = Attack(Model('model', 10, {}), back='tf', sess=sess)
        self.assertTrue(test_attack.parse_params({})) 
Example 46
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        super(TestVirtualAdversarialMethod, self).setUp()

        self.sess = tf.Session()
        self.sess.as_default()
        self.model = DummyModel()
        self.attack = VirtualAdversarialMethod(self.model, sess=self.sess)

        # initialize model
        with tf.name_scope('dummy_model'):
            self.model(tf.placeholder(tf.float32, shape=(None, 1000)))
        self.sess.run(tf.global_variables_initializer()) 
Example 47
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        super(TestSPSA, self).setUp()

        self.sess = tf.Session()
        self.model = SimpleModel()
        self.attack = SPSA(self.model, sess=self.sess) 
Example 48
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        super(TestBasicIterativeMethod, self).setUp()

        self.sess = tf.Session()
        self.model = SimpleModel()
        self.attack = BasicIterativeMethod(self.model, sess=self.sess) 
Example 49
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        super(TestMomentumIterativeMethod, self).setUp()

        self.sess = tf.Session()
        self.model = SimpleModel()
        self.attack = MomentumIterativeMethod(self.model, sess=self.sess) 
Example 50
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        super(TestCarliniWagnerL2, self).setUp()

        self.sess = tf.Session()
        self.model = SimpleModel()
        self.attack = CarliniWagnerL2(self.model, sess=self.sess)