Python tensorflow.Session() Examples

The following are 30 code examples of tensorflow.Session(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: enjoy-adv.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, env, dueling, noisy, fname):
        self.g = tf.Graph()
        self.noisy = noisy
        self.dueling = dueling
        self.env = env
        with self.g.as_default():
            self.act = deepq.build_act_enjoy(
                make_obs_ph=lambda name: U.Uint8Input(
                    env.observation_space.shape, name=name),
                q_func=dueling_model if dueling else model,
                num_actions=env.action_space.n,
                noisy=noisy
            )
            self.saver = tf.train.Saver()
        self.sess = tf.Session(graph=self.g)

        if fname is not None:
            print('Loading Model...')
            self.saver.restore(self.sess, fname) 
Example #2
Source File: test_separator.py    From spleeter with MIT License 6 votes vote down vote up
def test_separate(test_file, configuration, backend):
    """ Test separation from raw data. """
    with tf.Session() as sess:
        instruments = MODEL_TO_INST[configuration]
        adapter = get_default_audio_adapter()
        waveform, _ = adapter.load(test_file)
        separator = Separator(configuration, stft_backend=backend)
        prediction = separator.separate(waveform, test_file)
        assert len(prediction) == len(instruments)
        for instrument in instruments:
            assert instrument in prediction
        for instrument in instruments:
            track = prediction[instrument]
            assert waveform.shape[:-1] == track.shape[:-1]
            assert not np.allclose(waveform, track)
            for compared in instruments:
                if instrument != compared:
                    assert not np.allclose(track, prediction[compared]) 
Example #3
Source File: test_defenses.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_feature_pairing(self):
        fgsm = FastGradientMethod(self.model)
        attack = lambda x: fgsm.generate(x)
        loss = FeaturePairing(self.model, weight=0.1, attack=attack)
        l = loss.fprop(self.x, self.y)
        with tf.Session() as sess:
            vl1 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
            vl2 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
        self.assertClose(vl1, sum([4.296023369, 2.963884830]) / 2., atol=1e-6)
        self.assertClose(vl2, sum([4.296023369, 2.963884830]) / 2., atol=1e-6)

        loss = FeaturePairing(self.model, weight=10., attack=attack)
        l = loss.fprop(self.x, self.y)
        with tf.Session() as sess:
            vl1 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
            vl2 = sess.run(l, feed_dict={self.x: self.vx, self.y: self.vy})
        self.assertClose(vl1, sum([4.333082676, 3.00094414]) / 2., atol=1e-6)
        self.assertClose(vl2, sum([4.333082676, 3.00094414]) / 2., atol=1e-6) 
Example #4
Source File: test_dropout.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_override():
    # Make sure dropout_dict changes dropout probabilities successful

    # We would like to configure the test to deterministically drop,
    # so that the test does not need to use multiple runs.
    # However, tf.nn.dropout divides by include_prob, so zero or
    # infinitesimal include_prob causes NaNs.
    # For this test, random failure to drop will not cause the test to fail.
    # The stochastic version should not even run if everything is working
    # right.
    model = MLP(input_shape=[1, 1], layers=[Dropout(name='output',
                                                    include_prob=1e-8)])
    x = tf.constant([[1]], dtype=tf.float32)
    dropout_dict = {'output': 1.}
    y = model.get_layer(x, 'output', dropout=True, dropout_dict=dropout_dict)
    sess = tf.Session()
    y_value = sess.run(y)
    assert y_value == 1., y_value 
Example #5
Source File: test_dropout.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_drop():
    # Make sure dropout is activated successfully

    # We would like to configure the test to deterministically drop,
    # so that the test does not need to use multiple runs.
    # However, tf.nn.dropout divides by include_prob, so zero or
    # infinitesimal include_prob causes NaNs.
    # 1e-8 does not cause NaNs and shouldn't be a significant source
    # of test flakiness relative to dependency downloads failing, etc.
    model = MLP(input_shape=[1, 1], layers=[Dropout(name='output',
                                                    include_prob=1e-8)])
    x = tf.constant([[1]], dtype=tf.float32)
    y = model.get_layer(x, 'output', dropout=True)
    sess = tf.Session()
    y_value = sess.run(y)
    # Subject to very rare random failure because include_prob is not exact 0
    assert y_value == 0., y_value 
Example #6
Source File: 16_basic_kernels.py    From deep-learning-note with MIT License 6 votes vote down vote up
def main():
    rgb = False
    if rgb:
        kernels_list = [kernels.BLUR_FILTER_RGB,
                        kernels.SHARPEN_FILTER_RGB,
                        kernels.EDGE_FILTER_RGB,
                        kernels.TOP_SOBEL_RGB,
                        kernels.EMBOSS_FILTER_RGB]
    else:
        kernels_list = [kernels.BLUR_FILTER,
                        kernels.SHARPEN_FILTER,
                        kernels.EDGE_FILTER,
                        kernels.TOP_SOBEL,
                        kernels.EMBOSS_FILTER]

    kernels_list = kernels_list[1:]
    image = read_one_image('data/images/naruto.jpeg')
    if not rgb:
        image = tf.image.rgb_to_grayscale(image)
    image = tf.expand_dims(image, 0) # make it into a batch of 1 element
    images = convolve(image, kernels_list, rgb)
    with tf.Session() as sess:
        images = sess.run(images) # convert images from tensors to float values
    show_images(images, rgb) 
Example #7
Source File: tfutil.py    From disentangling_conditional_gans with MIT License 6 votes vote down vote up
def create_session(config_dict=dict(), force_as_default=False):
    config = tf.ConfigProto()
    for key, value in config_dict.items():
        fields = key.split('.')
        obj = config
        for field in fields[:-1]:
            obj = getattr(obj, field)
        setattr(obj, fields[-1], value)
    session = tf.Session(config=config)
    if force_as_default:
        session._default_session = session.as_default()
        session._default_session.enforce_nesting = False
        session._default_session.__enter__()
    return session

#----------------------------------------------------------------------------
# Initialize all tf.Variables that have not already been initialized.
# Equivalent to the following, but more efficient and does not bloat the tf graph:
#   tf.variables_initializer(tf.report_unitialized_variables()).run() 
Example #8
Source File: test_runner.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def setUp(self):
        super(TestRunnerMultiGPU, self).setUp()
        self.sess = tf.Session()

        inputs = []
        outputs = []
        self.niter = 10
        niter = self.niter
        # A Simple graph with `niter` sub-graphs.
        with tf.variable_scope(None, 'runner'):
            for i in range(niter):
                v = tf.get_variable('v%d' % i, shape=(100, 10))
                w = tf.get_variable('w%d' % i, shape=(100, 1))

                inputs += [{'v': v, 'w': w}]
                outputs += [{'v': v, 'w': w}]

        self.runner = RunnerMultiGPU(inputs, outputs, sess=self.sess) 
Example #9
Source File: build.py    From Traffic_sign_detection_YOLO with MIT License 6 votes vote down vote up
def savepb(self):
		"""
		Create a standalone const graph def that 
		C++	can load and run.
		"""
		darknet_pb = self.to_darknet()
		flags_pb = self.FLAGS
		flags_pb.verbalise = False
		
		flags_pb.train = False
		# rebuild another tfnet. all const.
		tfnet_pb = TFNet(flags_pb, darknet_pb)		
		tfnet_pb.sess = tf.Session(graph = tfnet_pb.graph)
		# tfnet_pb.predict() # uncomment for unit testing
		name = 'built_graph/{}.pb'.format(self.meta['name'])
		os.makedirs(os.path.dirname(name), exist_ok=True)
		#Save dump of everything in meta
		with open('built_graph/{}.meta'.format(self.meta['name']), 'w') as fp:
			json.dump(self.meta, fp)
		self.say('Saving const graph def to {}'.format(name))
		graph_def = tfnet_pb.sess.graph_def
		tf.train.write_graph(graph_def,'./', name, False) 
Example #10
Source File: utils.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def cleverhans_attack_wrapper(cleverhans_attack_fn, reset=True):
    def attack(a):
        session = tf.Session()
        with session.as_default():
            model = RVBCleverhansModel(a)
            adversarial_image = cleverhans_attack_fn(model, session, a)
            adversarial_image = np.squeeze(adversarial_image, axis=0)
            if reset:
                # optionally, reset to ignore other adversarials
                # found during the search
                a._reset()
            # run predictions to make sure the returned adversarial
            # is taken into account
            min_, max_ = a.bounds()
            adversarial_image = np.clip(adversarial_image, min_, max_)
            a.predictions(adversarial_image)
    return attack 
Example #11
Source File: lenet_mnist_eval.py    From deep-learning-note with MIT License 5 votes vote down vote up
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [
            mnist_train.BATCH_SIZE,
            mnist_inference.IMAGE_SIZE,
            mnist_inference.IMAGE_SIZE,
            mnist_inference.NUM_CHANNELS], name='x-input')
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')

        validate_feed = {
            x: mnist.validation.images,
            y_: mnist.validation.labels}

        y = mnist_inference.inference(x, None)

        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                    accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
                    print("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score))
                else:
                    print("No Checkpoint file found")
                    return
            time.sleep(EVAL_INTERVAL_SECS) 
Example #12
Source File: test_serial.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_save_and_load_var():
    """
    Tests that we can save and load a PicklableVariable with joblib
    """
    sess = tf.Session()
    with sess.as_default():
        x = np.ones(1)
        xv = PicklableVariable(x)
        xv.var.initializer.run()
        save("/tmp/var.joblib", xv)
        sess.run(tf.assign(xv.var, np.ones(1) * 2))
        new_xv = load("/tmp/var.joblib")
        assert np.allclose(sess.run(xv.var), np.ones(1) * 2)
        assert np.allclose(sess.run(new_xv.var), np.ones(1)) 
Example #13
Source File: 4_simulate_sin.py    From deep-learning-note with MIT License 5 votes vote down vote up
def train():
    # 学习率
    lr = 0.01

    x = tf.placeholder(tf.float32)
    y = tf.placeholder(tf.float32)

    y_ = inference(x)
    # 损失函数
    loss = tf.square(y_ - y)

    # 随机梯度下降
    opt = tf.train.GradientDescentOptimizer(lr)
    train_op = opt.minimize(loss)

    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init)
        print("start training")
        for i in range(1000000):
            train_x, train_y = get_train_data()
            sess.run(train_op, feed_dict={x: train_x, y: train_y})

            if i % 10000 == 0:
                times = int(i / 10000)
                test_x_ndarray = np.arange(0, 2 * np.pi, 0.01)
                test_y_ndarray = np.zeros([len(test_x_ndarray)])
                ind = 0
                for test_x in test_x_ndarray:
                    test_y = sess.run(y_, feed_dict={x: test_x, y: 1})
                    np.put(test_y_ndarray, ind, test_y)
                    ind += 1
                draw_correct_line()
                pylab.plot(test_x_ndarray, test_y_ndarray, '--', label= str(times)+'times')
                pylab.show() 
Example #14
Source File: 1_basic_linear.py    From deep-learning-note with MIT License 5 votes vote down vote up
def main():
    train_x = tf.placeholder(tf.float32)
    train_label = tf.placeholder(tf.float32)
    test_x = tf.placeholder(tf.float32)
    test_label = tf.placeholder(tf.float32)

    with tf.variable_scope("inference"):
        train_y = inference(train_x)
        tf.get_variable_scope().reuse_variables()
        test_y = inference(test_x)

    train_loss = tf.square(train_y - train_label)
    test_loss = tf.square(test_y - test_label)
    opt = tf.train.GradientDescentOptimizer(0.002)
    train_op = opt.minimize(train_loss)

    init = tf.global_variables_initializer()

    train_data_x, train_data_label = get_data(1000)
    test_data_x, test_data_label = get_data(1)

    with tf.Session() as sess:
        sess.run(init)
        for i in range(1000):
            sess.run(train_op, feed_dict={train_x: train_data_x[i],
                                          train_label: train_data_label[i]})
            if i % 10 == 0:
                test_loss_value = sess.run(test_loss, feed_dict={test_x:test_data_x[0],
                                                                 test_label:test_data_label[0]})
                print("step %d eval loss is %.3f" % (i, test_loss_value)) 
Example #15
Source File: names.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def train(data_dir, checkpoint_path, config):
    """Trains the model with the given data

    Args:
        data_dir: path to the data for the model (see data_utils for data
            format)
        checkpoint_path: the path to save the trained model checkpoints
        config: one of the above configs that specify the model and how it
            should be run and trained
    Returns:
        None
    """
    # Prepare Name data.
    print("Reading Name data in %s" % data_dir)
    names, counts = data_utils.read_names(data_dir)

    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)
        with tf.variable_scope("model", reuse=None, initializer=initializer):
            m = NamignizerModel(is_training=True, config=config)

        tf.global_variables_initializer().run()

        for i in range(config.max_max_epoch):
            lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
            m.assign_lr(session, config.learning_rate * lr_decay)

            print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
            train_perplexity = run_epoch(session, m, names, counts, config.epoch_size, m.train_op,
                                         verbose=True)
            print("Epoch: %d Train Perplexity: %.3f" %
                  (i + 1, train_perplexity))

            m.saver.save(session, checkpoint_path, global_step=i) 
Example #16
Source File: names.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def namignize(names, checkpoint_path, config):
    """Recognizes names and prints the Perplexity of the model for each names
    in the list

    Args:
        names: a list of names in the model format
        checkpoint_path: the path to restore the trained model from, should not
            include the model name, just the path to
        config: one of the above configs that specify the model and how it
            should be run and trained
    Returns:
        None
    """
    with tf.Graph().as_default(), tf.Session() as session:

        with tf.variable_scope("model"):
            m = NamignizerModel(is_training=False, config=config)

        m.saver.restore(session, checkpoint_path)

        for name in names:
            x, y = data_utils.name_to_batch(name, m.batch_size, m.num_steps)

            cost, loss, _ = session.run([m.cost, m.loss, tf.no_op()],
                                  {m.input_data: x,
                                   m.targets: y,
                                   m.weights: np.concatenate((
                                       np.ones(len(name)), np.zeros(m.batch_size * m.num_steps - len(name))))})

            print("Name {} gives us a perplexity of {}".format(
                name, np.exp(cost))) 
Example #17
Source File: mnist_train.py    From deep-learning-note with MIT License 5 votes vote down vote up
def train(mnist):
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZATION_RATE)
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)

    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)

    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name='train')

    # 初始化 TF 持久化类
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})

            if i % 1000 == 0:
                print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
                # 保存当前模型
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) 
Example #18
Source File: mnist_eval.py    From deep-learning-note with MIT License 5 votes vote down vote up
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
        validate_feed = {
            x: mnist.validation.images,
            y_: mnist.validation.labels}

        y = mnist_inference.inference(x, None)

        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                    accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
                    print("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score))
                else:
                    print("No Checkpoint file found")
                    return
            time.sleep(EVAL_INTERVAL_SECS) 
Example #19
Source File: names.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def namignator(checkpoint_path, config):
    """Generates names randomly according to a given model

    Args:
        checkpoint_path: the path to restore the trained model from, should not
            include the model name, just the path to
        config: one of the above configs that specify the model and how it
            should be run and trained
    Returns:
        None
    """
    # mutate the config to become a name generator config
    config.num_steps = 1
    config.batch_size = 1

    with tf.Graph().as_default(), tf.Session() as session:

        with tf.variable_scope("model"):
            m = NamignizerModel(is_training=False, config=config)

        m.saver.restore(session, checkpoint_path)

        activations, final_state, _ = session.run([m.activations, m.final_state, tf.no_op()],
                                                  {m.input_data: np.zeros((1, 1)),
                                                   m.targets: np.zeros((1, 1)),
                                                   m.weights: np.ones(1)})

        # sample from our softmax activations
        next_letter = np.random.choice(27, p=activations[0])
        name = [next_letter]
        while next_letter != 0:
            activations, final_state, _ = session.run([m.activations, m.final_state, tf.no_op()],
                                                      {m.input_data: [[next_letter]],
                                                       m.targets: np.zeros((1, 1)),
                                                       m.initial_state: final_state,
                                                       m.weights: np.ones(1)})

            next_letter = np.random.choice(27, p=activations[0])
            name += [next_letter]

        print(map(lambda x: chr(x + 96), name)) 
Example #20
Source File: download_and_convert_mnist.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def _add_to_tfrecord(data_filename, labels_filename, num_images,
                     tfrecord_writer):
  """Loads data from the binary MNIST files and writes files to a TFRecord.

  Args:
    data_filename: The filename of the MNIST images.
    labels_filename: The filename of the MNIST labels.
    num_images: The number of images in the dataset.
    tfrecord_writer: The TFRecord writer to use for writing.
  """
  images = _extract_images(data_filename, num_images)
  labels = _extract_labels(labels_filename, num_images)

  shape = (_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
  with tf.Graph().as_default():
    image = tf.placeholder(dtype=tf.uint8, shape=shape)
    encoded_png = tf.image.encode_png(image)

    with tf.Session('') as sess:
      for j in range(num_images):
        sys.stdout.write('\r>> Converting image %d/%d' % (j + 1, num_images))
        sys.stdout.flush()

        png_string = sess.run(encoded_png, feed_dict={image: images[j]})

        example = dataset_utils.image_to_tfexample(
            png_string, 'png'.encode(), _IMAGE_SIZE, _IMAGE_SIZE, labels[j])
        tfrecord_writer.write(example.SerializeToString()) 
Example #21
Source File: seq2seq_attention_decode.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def DecodeLoop(self):
    """Decoding loop for long running process."""
    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
    step = 0
    while step < FLAGS.max_decode_steps:
      time.sleep(DECODE_LOOP_DELAY_SECS)
      if not self._Decode(self._saver, sess):
        continue
      step += 1 
Example #22
Source File: 11_w2v_visual.py    From deep-learning-note with MIT License 5 votes vote down vote up
def visualize(self, visual_fld, num_visualize):
        """ run "'tensorboard --logdir='visualization'" to see the embeddings """

        # create the list of num_variable most common words to visualize
        w2v_utils.most_common_words(visual_fld, num_visualize)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            ckpt = tf.train.get_checkpoint_state(os.path.dirname('data/checkpoints/checkpoint'))

            # if that checkpoint exists, restore from checkpoint
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)

            final_embed_matrix = sess.run(self.embed_matrix)

            # you have to store embeddings in a new variable
            embedding_var = tf.Variable(final_embed_matrix[:num_visualize], name='embedding')
            sess.run(embedding_var.initializer)

            config = projector.ProjectorConfig()
            summary_writer = tf.summary.FileWriter(visual_fld)

            # add embedding to the config file
            embedding = config.embeddings.add()
            embedding.tensor_name = embedding_var.name

            # link this tensor to its metadata file, in this case the first NUM_VISUALIZE words of vocab
            embedding.metadata_path = 'vocab_' + str(num_visualize) + '.tsv'

            # saves a configuration file that TensorBoard will read during startup.
            projector.visualize_embeddings(summary_writer, config)
            saver_embed = tf.train.Saver([embedding_var])
            saver_embed.save(sess, os.path.join(visual_fld, 'model.ckpt'), 1) 
Example #23
Source File: test_separator.py    From spleeter with MIT License 5 votes vote down vote up
def test_separate_to_file(test_file, configuration, backend):
    """ Test file based separation. """
    with tf.Session() as sess:
        instruments = MODEL_TO_INST[configuration]
        separator = Separator(configuration, stft_backend=backend)
        name = splitext(basename(test_file))[0]
        with TemporaryDirectory() as directory:
            separator.separate_to_file(
                test_file,
                directory)
            for instrument in instruments:
                assert exists(join(
                    directory,
                    '{}/{}.wav'.format(name, instrument))) 
Example #24
Source File: separator.py    From spleeter with MIT License 5 votes vote down vote up
def _get_session(self):
        if self._session is None:
            saver = tf.train.Saver()
            latest_checkpoint = tf.train.latest_checkpoint(get_default_model_dir(self._params['model_dir']))
            self._session = tf.Session()
            saver.restore(self._session, latest_checkpoint)
        return self._session 
Example #25
Source File: audio_transfer_learning.py    From sklearn-audio-transfer-learning with ISC License 5 votes vote down vote up
def extract_vggish_features(paths, path2gt, model): 
    """Extracts VGGish features and their corresponding ground_truth and identifiers (the path).

       VGGish features are extracted from non-overlapping audio patches of 0.96 seconds, 
       where each audio patch covers 64 mel bands and 96 frames of 10 ms each.

       We repeat ground_truth and identifiers to fit the number of extracted VGGish features.
    """
    # 1) Extract log-mel spectrograms
    first_audio = True
    for p in paths:
        if first_audio:
            input_data = vggish_input.wavfile_to_examples(config['audio_folder'] + p)
            ground_truth = np.repeat(path2gt[p], input_data.shape[0], axis=0)
            identifiers = np.repeat(p, input_data.shape[0], axis=0)
            first_audio = False
        else:
            tmp_in = vggish_input.wavfile_to_examples(config['audio_folder'] + p)
            input_data = np.concatenate((input_data, tmp_in), axis=0)
            tmp_gt = np.repeat(path2gt[p], tmp_in.shape[0], axis=0)
            ground_truth = np.concatenate((ground_truth, tmp_gt), axis=0)
            tmp_id = np.repeat(p, tmp_in.shape[0], axis=0)
            identifiers = np.concatenate((identifiers, tmp_id), axis=0)

    # 2) Load Tensorflow model to extract VGGish features
    with tf.Graph().as_default(), tf.Session() as sess:
        vggish_slim.define_vggish_slim(training=False)
        vggish_slim.load_vggish_slim_checkpoint(sess, 'vggish_model.ckpt')
        features_tensor = sess.graph.get_tensor_by_name(vggish_params.INPUT_TENSOR_NAME)
        embedding_tensor = sess.graph.get_tensor_by_name(vggish_params.OUTPUT_TENSOR_NAME)
        extracted_feat = sess.run([embedding_tensor], feed_dict={features_tensor: input_data})
        feature = np.squeeze(np.asarray(extracted_feat))

    return [feature, ground_truth, identifiers] 
Example #26
Source File: DeepFM.py    From tensorflow-DeepFM with MIT License 5 votes vote down vote up
def _init_session(self):
        config = tf.ConfigProto(device_count={"gpu": 0})
        config.gpu_options.allow_growth = True
        return tf.Session(config=config) 
Example #27
Source File: run_audio_attack.py    From Black-Box-Audio with MIT License 5 votes vote down vote up
def setup_graph(self, input_audio_batch, target_phrase): 
        batch_size = input_audio_batch.shape[0]
        weird = (input_audio_batch.shape[1] - 1) // 320 
        logits_arg2 = np.tile(weird, batch_size)
        dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
        dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
        
        pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
        seq_len = np.tile(weird, batch_size).astype(np.int32)
        
        with tf.variable_scope('', reuse=tf.AUTO_REUSE):
            
            inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a')
            len_batch = tf.placeholder(tf.float32, name='b')
            arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c')
            arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d')
            arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e')
            len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f')
            
            logits = get_logits(inputs, arg2_logits)
            target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch)
            ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq)
            decoded, _ = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True)
            
            sess = tf.Session()
            saver = tf.train.Saver(tf.global_variables())
            saver.restore(sess, "models/session_dump")
            
        func1 = lambda a, b, c, d, e, f: sess.run(ctcloss, 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        func2 = lambda a, b, c, d, e, f: sess.run([ctcloss, decoded], 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        return (func1, func2) 
Example #28
Source File: test_utils_tf.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        super(TestUtils, self).setUp()

        self.sess = tf.Session() 
Example #29
Source File: trainer.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _init_session(self):
        # Set TF random seed to improve reproducibility
        self.rng = np.random.RandomState([2017, 8, 30])
        tf.set_random_seed(1234)

        # Create TF session
        self.sess = tf.Session(
            config=tf.ConfigProto(allow_soft_placement=True))

        # Object used to keep track of (and return) key accuracies
        if self.hparams.save:
            self.writer = tf.summary.FileWriter(self.hparams.save_dir,
                                                flush_secs=10)
        else:
            self.writer = None 
Example #30
Source File: attack_model_featadv.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def main(argv):
    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)

    input_shape = [FLAGS.batch_size, 224, 224, 3]
    x_src = tf.abs(tf.random_uniform(input_shape, 0., 1.))
    x_guide = tf.abs(tf.random_uniform(input_shape, 0., 1.))
    print("Input shape:")
    print(input_shape)

    model = make_imagenet_cnn(input_shape)
    attack = FastFeatureAdversaries(model)
    attack_params = {'eps': 0.3, 'clip_min': 0., 'clip_max': 1.,
                     'nb_iter': FLAGS.nb_iter, 'eps_iter': 0.01,
                     'layer': FLAGS.layer}
    x_adv = attack.generate(x_src, x_guide, **attack_params)
    h_adv = model.fprop(x_adv)[FLAGS.layer]
    h_src = model.fprop(x_src)[FLAGS.layer]
    h_guide = model.fprop(x_guide)[FLAGS.layer]

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        ha, hs, hg, xa, xs, xg = sess.run(
            [h_adv, h_src, h_guide, x_adv, x_src, x_guide])

        print("L2 distance between source and adversarial example `%s`: %.4f" %
              (FLAGS.layer, ((hs-ha)*(hs-ha)).sum()))
        print("L2 distance between guide and adversarial example `%s`: %.4f" %
              (FLAGS.layer, ((hg-ha)*(hg-ha)).sum()))
        print("L2 distance between source and guide `%s`: %.4f" %
              (FLAGS.layer, ((hg-hs)*(hg-hs)).sum()))
        print("Maximum perturbation: %.4f" % np.abs((xa-xs)).max())
        print("Original features: ")
        print(hs[:10, :10])
        print("Adversarial features: ")
        print(ha[:10, :10])