Python tensorflow.placeholder() Examples

The following are code examples for showing how to use tensorflow.placeholder(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_crf_train.py    MIT License 7 votes vote down vote up
def initialization(c2vPath):
    c2v = load_w2v(c2vPath, FLAGS.embedding_size)

    global WORDS
    WORDS = tf.Variable(c2v, name = "words")

    with tf.variable_scope('Softmax') as scope:
        hidden_W = tf.get_variable(
            shape = [FLAGS.num_hidden * 2, FLAGS.num_tags],
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            name = "weights",
            regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b = tf.Variable(tf.zeros([FLAGS.num_tags], name = "bias"))

    inp = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder")
    return inp, hidden_W, hidden_b 
Example 2
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm3_crf_train.py    MIT License 6 votes vote down vote up
def initialization(c2vPath):
    c2v = load_w2v(c2vPath, FLAGS.embedding_size)

    global WORDS
    WORDS = tf.Variable(c2v, name = "words")

    with tf.variable_scope('Softmax') as scope:
        hidden_W = tf.get_variable(
            shape = [FLAGS.num_hidden * 2, FLAGS.num_tags],
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            name = "weights",
            regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b = tf.Variable(tf.zeros([FLAGS.num_tags], name = "bias"))

    inp = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder")
    return inp, hidden_W, hidden_b 
Example 3
Project: SyNEThesia   Author: RunOrVeith   File: synethesia_model.py    MIT License 6 votes vote down vote up
def _build_model(self):
        # TODO dont ignore base image
        # TODO compare difference to previous slice
        with tf.variable_scope("synethesia"):
            self.sound_feature = tf.placeholder(dtype=tf.float32, shape=[None, self.feature_dim],
                                           name="feature_input")
            img_and_sound, self.base_img = self._img_from_sound(sound_feature=self.sound_feature)
            self.generated_img = self._build_encoder(x=img_and_sound)
            self.reproduced_sound = self._build_decoder(from_img=self.generated_img)
            assert self.reproduced_sound.get_shape()[1:] == self.sound_feature.get_shape()[1:]

            loss = self._build_loss(real_img=self.base_img,
                                    generated_img=self.generated_img,
                                    real_sound=self.sound_feature,
                                    generated_sound=self.reproduced_sound)
            self._global_step, self._learning_rate, self._optimizer = self._build_optimizer(loss=loss)

            self._summary_op = self._build_summary() 
Example 4
Project: meta-transfer-learning   Author: erfaneshrati   File: models.py    MIT License 6 votes vote down vote up
def __init__(self, num_classes, optimizer=DEFAULT_OPTIMIZER, **optim_kwargs):
        self.input_ph = tf.placeholder(tf.float32, shape=(None, 28, 28))
        out = tf.reshape(self.input_ph, (-1, 28, 28, 1))
        for _ in range(4):
            out = tf.layers.conv2d(out, 64, 3, strides=2, padding='same')
            out = tf.layers.batch_normalization(out, training=True)
            out = tf.nn.relu(out)
        out = tf.reshape(out, (-1, int(np.prod(out.get_shape()[1:]))))
        self.logits = tf.layers.dense(out, num_classes)
        self.label_ph = tf.placeholder(tf.int32, shape=(None,))
        self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.label_ph,
                                                                   logits=self.logits)
        self.predictions = tf.argmax(self.logits, axis=-1)
        self.minimize_op = optimizer(**optim_kwargs).minimize(self.loss)

# pylint: disable=R0903 
Example 5
Project: meta-transfer-learning   Author: erfaneshrati   File: models.py    MIT License 6 votes vote down vote up
def __init__(self, num_classes, optimizer=DEFAULT_OPTIMIZER, **optim_kwargs):
        self.input_ph = tf.placeholder(tf.float32, shape=(None, 84, 84, 3))
        out = self.input_ph
        for _ in range(4):
            out = tf.layers.conv2d(out, 32, 3, padding='same')
            out = tf.layers.batch_normalization(out, training=True)
            out = tf.layers.max_pooling2d(out, 2, 2, padding='same')
            out = tf.nn.relu(out)
        out = tf.reshape(out, (-1, int(np.prod(out.get_shape()[1:]))))
        self.logits = tf.layers.dense(out, num_classes,name='dense_classifier')
        self.label_ph = tf.placeholder(tf.int32, shape=(None,))
        self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.label_ph,
                                                                   logits=self.logits)
        self.predictions = tf.argmax(self.logits, axis=-1)
        self.minimize_op_metalearner = optimizer(**optim_kwargs).minimize(self.loss)
        with tf.variable_scope('dense_classifier', reuse=True):
            w = tf.get_variable('kernel')
            b = tf.get_variable('bias')
        self.w_zero_op = w.assign(tf.zeros(w.shape))
        self.b_zero_op = b.assign(tf.zeros(b.shape)) 
Example 6
Project: prediction-constrained-topic-models   Author: dtak   File: slda_utils__diffable_param_manager__tensorflow.py    MIT License 6 votes vote down vote up
def unflatten_to_common_param_dict__tf(
        param_vec=None,
        n_states=1,
        n_labels=1,
        n_vocabs=1,
        **dim_kwargs):
    K = int(n_states)
    V = int(n_vocabs)
    C = int(n_labels) 
    S = K * (V-1) + K * C
    _param_vec = tf.placeholder(shape=[S], dtype=tf.float64)
    _param_dict = _unflatten_to_common_param_dict__tf_graph(
        _param_vec,
        n_states=n_states,
        n_labels=n_labels,
        n_vocabs=n_vocabs,
        )
    sess = tf.Session()
    param_dict = sess.run([_param_dict], feed_dict={_param_vec:param_vec})[0]
    return param_dict 
Example 7
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: build.py    MIT License 6 votes vote down vote up
def build_forward(self):
		verbalise = self.FLAGS.verbalise

		# Placeholders
		inp_size = [None] + self.meta['inp_size']
		self.inp = tf.placeholder(tf.float32, inp_size, 'input')
		self.feed = dict() # other placeholders

		# Build the forward pass
		state = identity(self.inp)
		roof = self.num_layer - self.ntrain
		self.say(HEADER, LINE)
		for i, layer in enumerate(self.darknet.layers):
			scope = '{}-{}'.format(str(i),layer.type)
			args = [layer, state, i, roof, self.feed]
			state = op_create(*args)
			mess = state.verbalise()
			self.say(mess)
		self.say(LINE)

		self.top = state
		self.out = tf.identity(state.out, name='output') 
Example 8
Project: Automated-Social-Annotation   Author: acadTags   File: SVM.py    MIT License 6 votes vote down vote up
def get_embedded_words(dataX,word_embedding_final,vocab_size):
    input_x = tf.placeholder(tf.int32, [None, FLAGS.sequence_length], name="input_x")  # X
    word_embedding = tf.constant(word_embedding_final, dtype=tf.float32)  # convert to tensor
    #with tf.variable_scope("embedding", reuse=tf.AUTO_REUSE):
    #    Embedding = tf.get_variable("Embedding",shape=[vocab_size, embed_size])
    #t_assign_embedding = tf.assign(Embedding,word_embedding)  # assign this value to our embedding variables of our model.
    embedded_words = tf.nn.embedding_lookup(word_embedding,input_x) #shape:[None,sentence_length,embed_size]
    # concatenating all embedding
    #embedded_words_reshaped = tf.reshape(embedded_words, shape=[len(testX),-1])  #
    # use averaged embedding
    embedded_words_reshaped = tf.reduce_mean(embedded_words, axis=1)
    
    #config = tf.ConfigProto(
    #    device_count = {'GPU': 0} # this enforce the program to run on CPU only.
    #)
    #sess = tf.Session(config=config)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    feed_dict = {input_x: dataX[:]}
    #sess.run(t_assign_embedding)
    embedded_words = sess.run(embedded_words, feed_dict)
    embedded_words_mat = sess.run(embedded_words_reshaped, feed_dict) 
    #print(embedded_words_mat.shape)
    return embedded_words_mat 
Example 9
Project: Neural-LP   Author: fanyangxyz   File: model.py    MIT License 6 votes vote down vote up
def _build_input(self):
        self.tails = tf.placeholder(tf.int32, [None])
        self.heads = tf.placeholder(tf.int32, [None])
        self.targets = tf.one_hot(indices=self.heads, depth=self.num_entity)
            
        if not self.query_is_language:
            self.queries = tf.placeholder(tf.int32, [None, self.num_step])
            self.query_embedding_params = tf.Variable(self._random_uniform_unit(
                                                          self.num_query + 1, # <END> token 
                                                          self.query_embed_size), 
                                                      dtype=tf.float32)
        
            rnn_inputs = tf.nn.embedding_lookup(self.query_embedding_params, 
                                                self.queries)
        else:
            self.queries = tf.placeholder(tf.int32, [None, self.num_step, self.num_word])
            self.vocab_embedding_params = tf.Variable(self._random_uniform_unit(
                                                          self.num_vocab + 1, # <END> token
                                                          self.vocab_embed_size),
                                                      dtype=tf.float32)
            embedded_query = tf.nn.embedding_lookup(self.vocab_embedding_params, 
                                                    self.queries)
            rnn_inputs = tf.reduce_mean(embedded_query, axis=2)

        return rnn_inputs 
Example 10
Project: disentangling_conditional_gans   Author: zalandoresearch   File: tfutil.py    MIT License 6 votes vote down vote up
def autosummary(name, value):
    id = name.replace('/', '_')
    if is_tf_expression(value):
        with tf.name_scope('summary_' + id), tf.device(value.device):
            update_op = _create_autosummary_var(name, value)
            with tf.control_dependencies([update_op]):
                return tf.identity(value)
    else: # python scalar or numpy array
        if name not in _autosummary_immediate:
            with absolute_name_scope('Autosummary/' + id), tf.device(None), tf.control_dependencies(None):
                update_value = tf.placeholder(tf.float32)
                update_op = _create_autosummary_var(name, update_value)
                _autosummary_immediate[name] = update_op, update_value
        update_op, update_value = _autosummary_immediate[name]
        run(update_op, {update_value: np.float32(value)})
        return value

# Create the necessary ops to include autosummaries in TensorBoard report.
# Note: This should be done only once per graph. 
Example 11
Project: fbpconv_tf   Author: panakino   File: unet.py    GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, channels=3, n_class=2, cost="euclidean", cost_kwargs={}, **kwargs):
        tf.reset_default_graph()

        self.n_class = n_class
        self.summaries = kwargs.get("summaries", True)

        self.x = tf.placeholder("float", shape=[None, None, None, channels])
        self.y = tf.placeholder("float", shape=[None, None, None, n_class])
        self.keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)

        logits, self.variables, self.offset = create_conv_net(self.x, self.keep_prob, channels, n_class, **kwargs)

        self.cost = self._get_cost(logits, cost, cost_kwargs)

        self.gradients_node = tf.gradients(self.cost, self.variables)

        tf.summary.image('summary_input', get_image_summary(self.x))
        tf.summary.image('summary_output', get_image_summary(logits))
        tf.summary.image('summary_gt', get_image_summary(self.y))

        self.predicter = logits
        self.rsnr= tf.reduce_mean(rsnr_tf(self.predicter,self.y))
        #self.rsnr= tf.reduce_mean(rsnr_tf(self.predicter,self.y)) 
Example 12
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils_keras.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_fprop(self):
        import tensorflow as tf
        model = KerasModelWrapper(self.model)
        x = tf.placeholder(tf.float32, shape=(None, 100))
        out_dict = model.fprop(x)

        self.assertEqual(set(out_dict.keys()), set(['l1', 'l2', 'softmax']))
        # Test the dimension of the hidden represetation
        self.assertEqual(int(out_dict['l1'].shape[1]), 20)
        self.assertEqual(int(out_dict['l2'].shape[1]), 10)

        # Test the caching
        x2 = tf.placeholder(tf.float32, shape=(None, 100))
        out_dict2 = model.fprop(x2)
        self.assertEqual(set(out_dict2.keys()), set(['l1', 'l2', 'softmax']))
        self.assertEqual(int(out_dict2['l1'].shape[1]), 20) 
Example 13
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def setup_graph(self, input_audio_batch, target_phrase): 
        batch_size = input_audio_batch.shape[0]
        weird = (input_audio_batch.shape[1] - 1) // 320 
        logits_arg2 = np.tile(weird, batch_size)
        dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
        dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
        
        pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
        seq_len = np.tile(weird, batch_size).astype(np.int32)
        
        with tf.variable_scope('', reuse=tf.AUTO_REUSE):
            
            inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a')
            len_batch = tf.placeholder(tf.float32, name='b')
            arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c')
            arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d')
            arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e')
            len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f')
            
            logits = get_logits(inputs, arg2_logits)
            target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch)
            ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq)
            decoded, _ = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True)
            
            sess = tf.Session()
            saver = tf.train.Saver(tf.global_variables())
            saver.restore(sess, "models/session_dump")
            
        func1 = lambda a, b, c, d, e, f: sess.run(ctcloss, 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        func2 = lambda a, b, c, d, e, f: sess.run([ctcloss, decoded], 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        return (func1, func2) 
Example 14
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_cnn_train.py    MIT License 5 votes vote down vote up
def initialization(c2vPath):
    c2v = load_w2v(c2vPath, FLAGS.embedding_size)

    global WORDS
    WORDS = tf.Variable(c2v, name = "words")

    inp = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder")

    with tf.variable_scope('Softmax') as scope:
        hidden_W = tf.get_variable(
            shape = [FLAGS.num_hidden * 2, FLAGS.num_tags],
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            name = "weights",
            regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b = tf.Variable(tf.zeros([FLAGS.num_tags], name = "bias"))

    global cfilter
    with tf.variable_scope('CNN_Layer') as scope:
        cfilter = tf.get_variable(
            "cfilter",
            shape = [FLAGS.mrank + 1, 2 * FLAGS.num_hidden, 1, 2 * FLAGS.num_hidden],
            regularizer = tf.contrib.layers.l2_regularizer(0.0001),
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            dtype = tf.float32)

    return inp, hidden_W, hidden_b 
Example 15
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm4_crf_train.py    MIT License 5 votes vote down vote up
def initialization(c2vPath, p2vPath, w2vPath):
    c2v = load_w2v(c2vPath, FLAGS.embedding_size)
    p2v = load_w2v(p2vPath, FLAGS.embedding_size)
    w2v = load_w2v(w2vPath, FLAGS.embedding_size)
    global WORDS_char
    global WORDS_pinyin
    global WORDS_wubi
    WORDS_char = tf.Variable(c2v, name = "words")
    WORDS_pinyin = tf.Variable(p2v, name = "pinyin")
    WORDS_wubi = tf.Variable(w2v, name = "wubi")
    with tf.variable_scope('Softmax') as scope:
        hidden_W = tf.get_variable( shape = [FLAGS.num_hidden * 2, FLAGS.num_tags],
                initializer = tf.truncated_normal_initializer(stddev = 0.01),
                name = "weights",
                regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b = tf.Variable(tf.zeros([FLAGS.num_tags], name = "bias"))

        hidden_W_fc = tf.get_variable(
            shape = [FLAGS.embedding_size * 3, FLAGS.embedding_size],
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            name = "weights_fc",
            regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b_fc = tf.Variable(tf.zeros([FLAGS.embedding_size], name = "bias_fc"))

    inp_char = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder_char")
    inp_pinyin = tf.placeholder(tf.int32,
                                shape = [None, FLAGS.max_sentence_len],
                                name = "input_placeholder_pinyin")
    inp_wubi = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder_wubi")
    return inp_char, inp_pinyin, inp_wubi, hidden_W, hidden_b, hidden_W_fc, hidden_b_fc 
Example 16
Project: multi-embedding-cws   Author: wangjksjtu   File: nopy_fc_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def initialization(c2vPath, w2vPath):
    c2v = load_w2v(c2vPath, FLAGS.embedding_size)
    w2v = load_w2v(w2vPath, FLAGS.embedding_size)
    global WORDS_char
    global WORDS_wubi
    WORDS_char = tf.Variable(c2v, name = "words")
    WORDS_wubi = tf.Variable(w2v, name = "wubi")
    with tf.variable_scope('Softmax') as scope:
        hidden_W = tf.get_variable( shape = [FLAGS.num_hidden * 2, FLAGS.num_tags],
                initializer = tf.truncated_normal_initializer(stddev = 0.01),
                name = "weights",
                regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b = tf.Variable(tf.zeros([FLAGS.num_tags], name = "bias"))

        hidden_W_fc = tf.get_variable(
            shape = [FLAGS.embedding_size * 3, FLAGS.embedding_size],
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            name = "weights_fc",
            regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b_fc = tf.Variable(tf.zeros([FLAGS.embedding_size], name = "bias_fc"))

    inp_char = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder_char")
    inp_wubi = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder_wubi")
    return inp_char, inp_wubi, hidden_W, hidden_b, hidden_W_fc, hidden_b_fc 
Example 17
Project: multi-embedding-cws   Author: wangjksjtu   File: nowubi_fc_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def initialization(c2vPath, p2vPath):
    c2v = load_w2v(c2vPath, FLAGS.embedding_size)
    p2v = load_w2v(p2vPath, FLAGS.embedding_size)
    global WORDS_char
    global WORDS_pinyin
    WORDS_char = tf.Variable(c2v, name = "words")
    WORDS_pinyin = tf.Variable(p2v, name = "pinyin")
    with tf.variable_scope('Softmax') as scope:
        hidden_W = tf.get_variable( shape = [FLAGS.num_hidden * 2, FLAGS.num_tags],
                initializer = tf.truncated_normal_initializer(stddev = 0.01),
                name = "weights",
                regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b = tf.Variable(tf.zeros([FLAGS.num_tags], name = "bias"))

        hidden_W_fc = tf.get_variable(
            shape = [FLAGS.embedding_size * 2, FLAGS.embedding_size],
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            name = "weights_fc",
            regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b_fc = tf.Variable(tf.zeros([FLAGS.embedding_size], name = "bias_fc"))

    inp_char = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder_char")
    inp_pinyin = tf.placeholder(tf.int32,
                                shape = [None, FLAGS.max_sentence_len],
                                name = "input_placeholder_pinyin")
    return inp_char, inp_pinyin, hidden_W, hidden_b, hidden_W_fc, hidden_b_fc 
Example 18
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def initialization(c2vPath, p2vPath, w2vPath):
    c2v = load_w2v(c2vPath, FLAGS.embedding_size)
    p2v = load_w2v(p2vPath, FLAGS.embedding_size)
    w2v = load_w2v(w2vPath, FLAGS.embedding_size)
    global WORDS_char
    global WORDS_pinyin
    global WORDS_wubi
    WORDS_char = tf.Variable(c2v, name = "words")
    WORDS_pinyin = tf.Variable(p2v, name = "pinyin")
    WORDS_wubi = tf.Variable(w2v, name = "wubi")
    with tf.variable_scope('Softmax') as scope:
        hidden_W = tf.get_variable( shape = [FLAGS.num_hidden * 2, FLAGS.num_tags],
                initializer = tf.truncated_normal_initializer(stddev = 0.01),
                name = "weights",
                regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b = tf.Variable(tf.zeros([FLAGS.num_tags], name = "bias"))

        hidden_W_fc = tf.get_variable(
            shape = [FLAGS.embedding_size * 3, FLAGS.embedding_size],
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            name = "weights_fc",
            regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b_fc = tf.Variable(tf.zeros([FLAGS.embedding_size], name = "bias_fc"))

    inp_char = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder_char")
    inp_pinyin = tf.placeholder(tf.int32,
                                shape = [None, FLAGS.max_sentence_len],
                                name = "input_placeholder_pinyin")
    inp_wubi = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder_wubi")
    return inp_char, inp_pinyin, inp_wubi, hidden_W, hidden_b, hidden_W_fc, hidden_b_fc 
Example 19
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm_crf_train.py    MIT License 5 votes vote down vote up
def initialization(c2vPath, p2vPath, w2vPath):
    c2v = load_w2v(c2vPath, FLAGS.embedding_size)
    p2v = load_w2v(p2vPath, FLAGS.embedding_size)
    w2v = load_w2v(p2vPath, FLAGS.embedding_size)

    global WORDS_char
    global WORDS_pinyin
    global WORDS_wubi

    WORDS_char = tf.Variable(c2v, name = "words")
    WORDS_pinyin = tf.Variable(p2v, name = "pinyin")
    WORDS_wubi = tf.Variable(w2v, name = "wubi")


    with tf.variable_scope('Softmax') as scope:
        hidden_W = tf.get_variable(
            shape = [FLAGS.num_hidden * 2, FLAGS.num_tags],
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            name = "weights",
            regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b = tf.Variable(tf.zeros([FLAGS.num_tags], name = "bias"))

        hidden_W_fc = tf.get_variable(
            shape = [FLAGS.embedding_size * 3, FLAGS.embedding_size],
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            name = "weights_fc",
            regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b_fc = tf.Variable(tf.zeros([FLAGS.embedding_size], name = "bias_fc"))

    inp_char = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder_char")
    inp_pinyin = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder_pinyin")
    inp_wubi = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder_wubi")
    return inp_char, inp_pinyin, inp_wubi, hidden_W, hidden_b, hidden_W_fc, hidden_b_fc 
Example 20
Project: multi-embedding-cws   Author: wangjksjtu   File: nowubi_share_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def initialization(c2vPath, p2vPath):
    c2v = load_w2v(c2vPath, FLAGS.embedding_size)
    p2v = load_w2v(p2vPath, FLAGS.embedding_size)

    global WORDS_char
    global WORDS_pinyin

    WORDS_char = tf.Variable(c2v, name = "words")
    WORDS_pinyin = tf.Variable(p2v, name = "pinyin")


    with tf.variable_scope('Softmax') as scope:
        hidden_W_char = tf.get_variable(
            shape = [FLAGS.num_hidden * 2, FLAGS.num_tags],
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            name = "weights_char",
            regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b_char = tf.Variable(tf.zeros([FLAGS.num_tags], name = "bias"))

        hidden_W_pinyin = tf.get_variable(
            shape = [FLAGS.num_hidden * 2, FLAGS.num_tags],
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            name = "weights_pinyin",
            regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b_pinyin = tf.Variable(tf.zeros([FLAGS.num_tags], name = "bias"))


    inp_char = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder_char")
    inp_pinyin = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder_pinyin")
    return inp_char, inp_pinyin, hidden_W_char, hidden_b_char, hidden_W_pinyin, \
           hidden_b_pinyin 
Example 21
Project: multi-embedding-cws   Author: wangjksjtu   File: nopy_share_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def initialization(c2vPath, w2vPath):
    c2v = load_w2v(c2vPath, FLAGS.embedding_size)
    w2v = load_w2v(w2vPath, FLAGS.embedding_size)

    global WORDS_char
    global WORDS_wubi

    WORDS_char = tf.Variable(c2v, name = "words")
    WORDS_wubi = tf.Variable(w2v, name = "wubi")


    with tf.variable_scope('Softmax') as scope:
        hidden_W_char = tf.get_variable(
            shape = [FLAGS.num_hidden * 2, FLAGS.num_tags],
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            name = "weights_char",
            regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b_char = tf.Variable(tf.zeros([FLAGS.num_tags], name = "bias"))


        hidden_W_wubi = tf.get_variable(
            shape = [FLAGS.num_hidden * 2, FLAGS.num_tags],
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            name = "weights_wubi",
            regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b_wubi = tf.Variable(tf.zeros([FLAGS.num_tags], name = "bias"))


    inp_char = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder_char")
    inp_wubi = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder_wubi")
    return inp_char, inp_wubi, hidden_W_char, hidden_b_char, hidden_W_wubi, hidden_b_wubi 
Example 22
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_time.py    MIT License 5 votes vote down vote up
def initialization(c2vPath, p2vPath, w2vPath):
    c2v = load_w2v(c2vPath, FLAGS.embedding_size)
    p2v = load_w2v(p2vPath, FLAGS.embedding_size)
    w2v = load_w2v(w2vPath, FLAGS.embedding_size)
    global WORDS_char
    global WORDS_pinyin
    global WORDS_wubi
    WORDS_char = tf.Variable(c2v, name = "words")
    WORDS_pinyin = tf.Variable(p2v, name = "pinyin")
    WORDS_wubi = tf.Variable(w2v, name = "wubi")
    with tf.variable_scope('Softmax') as scope:
        hidden_W = tf.get_variable( shape = [FLAGS.num_hidden * 2, FLAGS.num_tags],
                initializer = tf.truncated_normal_initializer(stddev = 0.01),
                name = "weights",
                regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b = tf.Variable(tf.zeros([FLAGS.num_tags], name = "bias"))

        hidden_W_fc = tf.get_variable(
            shape = [FLAGS.embedding_size * 3, FLAGS.embedding_size],
            initializer = tf.truncated_normal_initializer(stddev = 0.01),
            name = "weights_fc",
            regularizer = tf.contrib.layers.l2_regularizer(0.001))

        hidden_b_fc = tf.Variable(tf.zeros([FLAGS.embedding_size], name = "bias_fc"))

    inp_char = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder_char")
    inp_pinyin = tf.placeholder(tf.int32,
                                shape = [None, FLAGS.max_sentence_len],
                                name = "input_placeholder_pinyin")
    inp_wubi = tf.placeholder(tf.int32,
                              shape = [None, FLAGS.max_sentence_len],
                              name = "input_placeholder_wubi")
    return inp_char, inp_pinyin, inp_wubi, hidden_W, hidden_b, hidden_W_fc, hidden_b_fc 
Example 23
Project: SyNEThesia   Author: RunOrVeith   File: synethesia_model.py    MIT License 5 votes vote down vote up
def _load_base_image(self):
        return tf.placeholder(dtype=tf.float32, shape=[None, *self.img_size, 3]) 
Example 24
Project: SyNEThesia   Author: RunOrVeith   File: synethesia_model.py    MIT License 5 votes vote down vote up
def _build_optimizer(self, loss, decay_rate=0.95, decay_steps=10000):
        with tf.variable_scope("optimizer"):
            global_step = tf.get_variable("global_step", shape=[], dtype=tf.int64, trainable=False)
            learning_rate = tf.placeholder(dtype=tf.float32, shape=[],
                                           name="learning_rate")
            decayed_learning_rate = tf.train.exponential_decay(learning_rate=learning_rate,
                                                               global_step=global_step,
                                                               decay_steps=decay_steps, decay_rate=decay_rate,
                                                               staircase=False, name="rate_decay")
            optimizer = tf.train.AdamOptimizer(decayed_learning_rate).minimize(loss, global_step=global_step,
                                                                               name="optimizer")

        return global_step, learning_rate, optimizer 
Example 25
Project: SyNEThesia   Author: RunOrVeith   File: model_skeleton.py    MIT License 5 votes vote down vote up
def __init__(self):
        self._global_step = None
        self._learning_rate = None
        self._optimizer = None
        self._summary_op = None

        self.is_training = tf.placeholder(dtype=tf.bool, shape=[], name="is_training") 
Example 26
Project: meta-transfer-learning   Author: erfaneshrati   File: variables.py    MIT License 5 votes vote down vote up
def __init__(self, session, variables):
        self._session = session
        self._variables = variables
        self._placeholders = [tf.placeholder(v.dtype.base_dtype, shape=v.get_shape())
                              for v in variables]
        assigns = [tf.assign(v, p) for v, p in zip(self._variables, self._placeholders)]
        self._assign_op = tf.group(*assigns) 
Example 27
Project: meta-transfer-learning   Author: erfaneshrati   File: models.py    MIT License 5 votes vote down vote up
def __init__(self, num_classes, optimizer=DEFAULT_OPTIMIZER, **optim_kwargs):
        self.input_ph = tf.placeholder(tf.float32, shape=(None, 84, 84, 3))
        out = self.input_ph
        for _ in range(4):
            out = tf.layers.conv2d(out, 32, 3, padding='same')
            out = tf.layers.batch_normalization(out, training=True)
            out = tf.layers.max_pooling2d(out, 2, 2, padding='same')
            out = tf.nn.relu(out)
        out = tf.reshape(out, (-1, int(np.prod(out.get_shape()[1:]))))
        self.embedding = out
        self.logits_metalearner = tf.layers.dense(out, num_classes, kernel_initializer=tf.zeros_initializer(), bias_initializer=tf.zeros_initializer(),name='dense_metalearner')
        self.logits_classifier = tf.layers.dense(out, 64, kernel_initializer=tf.zeros_initializer(), bias_initializer=tf.zeros_initializer(),name='dense_classifier')
        self.label_ph = tf.placeholder(tf.int32, shape=(None,))
        self.real_label = tf.placeholder(tf.int32, shape=(None,))
        self.loss_metalearner = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.label_ph,
                                                                   logits=self.logits_metalearner)
        self.loss_classifier = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.real_label,
                                                                   logits=self.logits_classifier)
        self.predictions = tf.argmax(self.logits_metalearner, axis=-1)
        self.minimize_op_metalearner = optimizer(**optim_kwargs).minimize(self.loss_metalearner)
        self.minimize_op_classifier = optimizer(**optim_kwargs).minimize(self.loss_classifier)

        with tf.variable_scope('dense_classifier', reuse=True):
            w = tf.get_variable('kernel')
            b = tf.get_variable('bias')
        self.w_zero_op = w.assign(tf.zeros(w.shape))
        self.b_zero_op = b.assign(tf.zeros(b.shape)) 
Example 28
Project: explirefit   Author: codogogo   File: wordpair_model.py    Apache License 2.0 5 votes vote down vote up
def define_optimization(self, loss_function, dist_reg_factor, l2_reg_factor = 0.01, learning_rate = 1e-3, loss_function_params = None):
		print("Defining loss...")
		with tf.name_scope(self.scope + "__placeholders"):
			self.input_y = tf.placeholder(tf.float64, [None], name="input_y")
		if loss_function_params:
			self.pure_loss = loss_function(self.outputs, self.input_y, loss_function_params)
		else:
			self.pure_loss = loss_function(self.outputs, self.input_y)

		if self.distance_measure == "cosine":
			cosines1 = tf.constant(1.0, dtype = tf.float64) - tf.reduce_sum(tf.multiply(tf.nn.l2_normalize(self.mlp1.outputs, dim = [1]), tf.nn.l2_normalize(self.embs_w1, dim = [1])), axis = 1)
			cosines2 = tf.constant(1.0, dtype = tf.float64) - tf.reduce_sum(tf.multiply(tf.nn.l2_normalize(self.mlp2.outputs, dim = [1]), tf.nn.l2_normalize(self.embs_w2, dim = [1])), axis = 1)
			self.distance_loss = tf.reduce_sum(cosines1) + tf.reduce_sum(cosines2)
			
		elif self.distance_measure == "euclidean": 
			self.distance_loss = tf.nn.l2_loss(tf.subtract(self.mlp1.outputs, self.embs_w1)) + tf.nn.l2_loss(tf.subtract(self.mlp2.outputs, self.embs_w2))
		
		else: 	
			raise ValueError("Unknown distance function")
		
		self.loss = self.pure_loss + dist_reg_factor * self.distance_loss
		self.loss += l2_reg_factor * self.l2_loss
			
		print("Defining optimizer...")
		self.train_step = tf.train.AdamOptimizer(learning_rate).minimize(self.loss)
		print("Done!...") 
Example 29
Project: explirefit   Author: codogogo   File: mlp_layer.py    Apache License 2.0 5 votes vote down vote up
def define_model(self, activation = tf.nn.tanh, previous_layer = None, share_params = None):
		self.previous_layer = previous_layer
		with tf.name_scope(self.scope + self.unique_scope_addition + "__placeholders"):
			if previous_layer is None: 
				self.input = tf.placeholder(tf.float64, [None, self.input_size], name = "input_x")
			self.dropout =  tf.placeholder(tf.float64, name="dropout")
		if previous_layer is not None:
			self.input = previous_layer

		self.Ws = []
		self.biases = []
		with tf.variable_scope(self.scope + "__variables", reuse = share_params):
			for i in range(len(self.hidden_layer_sizes)): 
				self.Ws.append(tf.get_variable("W_" + str(i), shape=[(self.input_size if i == 0 else self.hidden_layer_sizes[i-1]), self.hidden_layer_sizes[i]], initializer=tf.contrib.layers.xavier_initializer(), dtype = tf.float64))
				self.biases.append(tf.get_variable("b_" + str(i), initializer=tf.constant(0.1, shape=[self.hidden_layer_sizes[i]], dtype = tf.float64), dtype = tf.float64))
				#self.Ws.append(tf.get_variable("W_" + str(i), initializer=tf.eye(self.input_size if i == 0 else self.hidden_layer_sizes[i-1], self.hidden_layer_sizes[i], dtype = tf.float64), dtype = tf.float64))
				#self.biases.append(tf.get_variable("b_" + str(i), initializer=tf.constant(0, shape=[self.hidden_layer_sizes[i]], dtype = tf.float64), dtype = tf.float64))
								

		self.layer_outputs = []
		data_runner = self.input
		for i in range(len(self.Ws)):
			data_runner = tf.nn.dropout(activation(tf.nn.xw_plus_b(data_runner, self.Ws[i], self.biases[i])), self.dropout)
			#data_runner = tf.nn.dropout(tf.nn.xw_plus_b(data_runner, self.Ws[i], self.biases[i]), self.dropout)
			self.layer_outputs.append(data_runner)
		self.outputs = self.layer_outputs[-1]
		
		self.l2_loss = 0
		for i in range(len(self.Ws)):
			self.l2_loss = self.l2_loss + tf.nn.l2_loss(self.Ws[i]) + tf.nn.l2_loss(self.biases[i]) 
Example 30
Project: explirefit   Author: codogogo   File: mlp_layer.py    Apache License 2.0 5 votes vote down vote up
def define_loss(self, loss_function, l2_reg_factor = 0):
		self.input_y = tf.placeholder(tf.float64, [None, self.hidden_layer_sizes[-1]], name = self.scope + "__input_y")		
		self.preds = tf.nn.dropout(self.outputs, self.dropout)
		self.pure_loss = loss_function(self.preds, self.input_y)
		self.loss = self.pure_loss + l2_reg_factor * self.l2_loss 
Example 31
Project: TensorFlow-TransX   Author: thunlp   File: transE.py    MIT License 5 votes vote down vote up
def __init__(self, config):

		entity_total = config.entity
		relation_total = config.relation
		batch_size = config.batch_size
		size = config.hidden_size
		margin = config.margin

		self.pos_h = tf.placeholder(tf.int32, [None])
		self.pos_t = tf.placeholder(tf.int32, [None])
		self.pos_r = tf.placeholder(tf.int32, [None])

		self.neg_h = tf.placeholder(tf.int32, [None])
		self.neg_t = tf.placeholder(tf.int32, [None])
		self.neg_r = tf.placeholder(tf.int32, [None])

		with tf.name_scope("embedding"):
			self.ent_embeddings = tf.get_variable(name = "ent_embedding", shape = [entity_total, size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
			self.rel_embeddings = tf.get_variable(name = "rel_embedding", shape = [relation_total, size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
			pos_h_e = tf.nn.embedding_lookup(self.ent_embeddings, self.pos_h)
			pos_t_e = tf.nn.embedding_lookup(self.ent_embeddings, self.pos_t)
			pos_r_e = tf.nn.embedding_lookup(self.rel_embeddings, self.pos_r)
			neg_h_e = tf.nn.embedding_lookup(self.ent_embeddings, self.neg_h)
			neg_t_e = tf.nn.embedding_lookup(self.ent_embeddings, self.neg_t)
			neg_r_e = tf.nn.embedding_lookup(self.rel_embeddings, self.neg_r)

		if config.L1_flag:
			pos = tf.reduce_sum(abs(pos_h_e + pos_r_e - pos_t_e), 1, keep_dims = True)
			neg = tf.reduce_sum(abs(neg_h_e + neg_r_e - neg_t_e), 1, keep_dims = True)
			self.predict = pos
		else:
			pos = tf.reduce_sum((pos_h_e + pos_r_e - pos_t_e) ** 2, 1, keep_dims = True)
			neg = tf.reduce_sum((neg_h_e + neg_r_e - neg_t_e) ** 2, 1, keep_dims = True)
			self.predict = pos

		with tf.name_scope("output"):
			self.loss = tf.reduce_sum(tf.maximum(pos - neg + margin, 0)) 
Example 32
Project: convseg   Author: chqiwang   File: tagger.py    MIT License 5 votes vote down vote up
def build_input_graph(self, vocab_size, emb_size, word_vocab_size, word_emb_size, word_window_size):
        """
        Gather embeddings from lookup tables.
        """
        seq_ids = tf.placeholder(dtype=INT_TYPE, shape=[None, None], name='seq_ids')
        seq_word_ids = [tf.placeholder(dtype=INT_TYPE, shape=[None, None], name='seq_feature_%d_ids' % i)
                        for i in range(word_window_size)]
        embeddings = tf.get_variable('embeddings', [vocab_size, emb_size])
        embedding_output = tf.nn.embedding_lookup([embeddings], seq_ids)
        word_outputs = []
        word_embeddings = tf.get_variable('word_embeddings', [word_vocab_size, word_emb_size])
        for i in range(word_window_size):
            word_outputs.append(tf.nn.embedding_lookup([word_embeddings], seq_word_ids[i]))

        return seq_ids, seq_word_ids, tf.concat([embedding_output] + word_outputs, 2, 'inputs') 
Example 33
Project: deep-siamese-text-similarity   Author: dhwajraj   File: siamese_network_semantic.py    MIT License 5 votes vote down vote up
def __init__(
        self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size, trainableEmbeddings):

        # Placeholders for input, output and dropout
        self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
        self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
        self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0, name="l2_loss")
          
        # Embedding layer
        with tf.name_scope("embedding"):
            self.W = tf.Variable(
                tf.constant(0.0, shape=[vocab_size, embedding_size]),
                trainable=trainableEmbeddings,name="W")
            self.embedded_words1 = tf.nn.embedding_lookup(self.W, self.input_x1)
            self.embedded_words2 = tf.nn.embedding_lookup(self.W, self.input_x2)
        print self.embedded_words1
        # Create a convolution + maxpool layer for each filter size
        with tf.name_scope("output"):
            self.out1=self.stackedRNN(self.embedded_words1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
            self.out2=self.stackedRNN(self.embedded_words2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
            self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
            self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
            self.distance = tf.reshape(self.distance, [-1], name="distance")
        with tf.name_scope("loss"):
            self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
        #### Accuracy computation is outside of this class.
        with tf.name_scope("accuracy"):
            self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
            correct_predictions = tf.equal(self.temp_sim, self.input_y)
            self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") 
Example 34
Project: deep-siamese-text-similarity   Author: dhwajraj   File: siamese_network.py    MIT License 5 votes vote down vote up
def __init__(
        self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size):

        # Placeholders for input, output and dropout
        self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
        self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
        self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0, name="l2_loss")
          
        # Embedding layer
        with tf.name_scope("embedding"):
            self.W = tf.Variable(
                tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
                trainable=True,name="W")
            self.embedded_chars1 = tf.nn.embedding_lookup(self.W, self.input_x1)
            #self.embedded_chars_expanded1 = tf.expand_dims(self.embedded_chars1, -1)
            self.embedded_chars2 = tf.nn.embedding_lookup(self.W, self.input_x2)
            #self.embedded_chars_expanded2 = tf.expand_dims(self.embedded_chars2, -1)

        # Create a convolution + maxpool layer for each filter size
        with tf.name_scope("output"):
            self.out1=self.BiRNN(self.embedded_chars1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
            self.out2=self.BiRNN(self.embedded_chars2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
            self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
            self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
            self.distance = tf.reshape(self.distance, [-1], name="distance")
        with tf.name_scope("loss"):
            self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
        #### Accuracy computation is outside of this class.
        with tf.name_scope("accuracy"):
            self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
            correct_predictions = tf.equal(self.temp_sim, self.input_y)
            self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") 
Example 35
Project: AutoDL   Author: tanguofu   File: test_custom_pyop.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_pack():
    x = tf.placeholder(tf.int32, shape=(4,))
    shapepack = tf.py_func(struct_pack, [x],tf.string)
    with tf.Session() as sess:
       v = sess.run(shapepack, feed_dict={x:[1,2,3,4]})
       print((type(v),v)) 
Example 36
Project: AutoDL   Author: tanguofu   File: test_custom_pyop.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_unpack():
    z = tf.placeholder(tf.string, shape=())
    shapeunpack = tf.py_func(struct_unpack, [z], tf.int32)
    
    v = b'\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00'
    with tf.Session() as sess:
       v = sess.run([shapeunpack], feed_dict={z:v})
       print(v) 
Example 37
Project: AutoDL   Author: tanguofu   File: mobilenet_test.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def testCreation(self):
    spec = dict(mobilenet_v2.V2_DEF)
    _, ep = mobilenet.mobilenet(
        tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
    num_convs = len(find_ops('Conv2D'))

    # This is mostly a sanity test. No deep reason for these particular
    # constants.
    #
    # All but first 2 and last one have  two convolutions, and there is one
    # extra conv that is not in the spec. (logits)
    self.assertEqual(num_convs, len(spec['spec']) * 2 - 2)
    # Check that depthwise are exposed.
    for i in range(2, 17):
      self.assertIn('layer_%d/depthwise_output' % i, ep) 
Example 38
Project: AutoDL   Author: tanguofu   File: mobilenet_test.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def testCreationNoClasses(self):
    spec = copy.deepcopy(mobilenet_v2.V2_DEF)
    net, ep = mobilenet.mobilenet(
        tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec,
        num_classes=None)
    self.assertIs(net, ep['global_pool']) 
Example 39
Project: AutoDL   Author: tanguofu   File: mobilenet_test.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def testWithSplits(self):
    spec = copy.deepcopy(mobilenet_v2.V2_DEF)
    spec['overrides'] = {
        (ops.expanded_conv,): dict(split_expansion=2),
    }
    _, _ = mobilenet.mobilenet(
        tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
    num_convs = len(find_ops('Conv2D'))
    # All but 3 op has 3 conv operatore, the remainign 3 have one
    # and there is one unaccounted.
    self.assertEqual(num_convs, len(spec['spec']) * 3 - 5) 
Example 40
Project: AutoDL   Author: tanguofu   File: mobilenet_test.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def testWithOutputStride8(self):
    out, _ = mobilenet.mobilenet_base(
        tf.placeholder(tf.float32, (10, 224, 224, 16)),
        conv_defs=mobilenet_v2.V2_DEF,
        output_stride=8,
        scope='MobilenetV2')
    self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) 
Example 41
Project: AutoDL   Author: tanguofu   File: mobilenet_test.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def testDivisibleBy(self):
    tf.reset_default_graph()
    mobilenet_v2.mobilenet(
        tf.placeholder(tf.float32, (10, 224, 224, 16)),
        conv_defs=mobilenet_v2.V2_DEF,
        divisible_by=16,
        min_depth=32)
    s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
    s = set(s)
    self.assertSameElements([32, 64, 96, 160, 192, 320, 384, 576, 960, 1280,
                             1001], s) 
Example 42
Project: AutoDL   Author: tanguofu   File: mobilenet_test.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def testDivisibleByWithArgScope(self):
    tf.reset_default_graph()
    # Verifies that depth_multiplier arg scope actually works
    # if no default min_depth is provided.
    with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
      mobilenet_v2.mobilenet(
          tf.placeholder(tf.float32, (10, 224, 224, 2)),
          conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)
      s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
      s = set(s)
      self.assertSameElements(s, [32, 192, 128, 1001]) 
Example 43
Project: AutoDL   Author: tanguofu   File: mobilenet_test.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def testFineGrained(self):
    tf.reset_default_graph()
    # Verifies that depth_multiplier arg scope actually works
    # if no default min_depth is provided.

    mobilenet_v2.mobilenet(
        tf.placeholder(tf.float32, (10, 224, 224, 2)),
        conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01,
        finegrain_classification_mode=True)
    s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
    s = set(s)
    # All convolutions will be 8->48, except for the last one.
    self.assertSameElements(s, [8, 48, 1001, 1280]) 
Example 44
Project: AutoDL   Author: tanguofu   File: mobilenet_test.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def testWithOutputStride16(self):
    tf.reset_default_graph()
    out, _ = mobilenet.mobilenet_base(
        tf.placeholder(tf.float32, (10, 224, 224, 16)),
        conv_defs=mobilenet_v2.V2_DEF,
        output_stride=16)
    self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) 
Example 45
Project: AutoDL   Author: tanguofu   File: mobilenet_test.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def testWithOutputStride8AndExplicitPadding(self):
    tf.reset_default_graph()
    out, _ = mobilenet.mobilenet_base(
        tf.placeholder(tf.float32, (10, 224, 224, 16)),
        conv_defs=mobilenet_v2.V2_DEF,
        output_stride=8,
        use_explicit_padding=True,
        scope='MobilenetV2')
    self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) 
Example 46
Project: AutoDL   Author: tanguofu   File: mobilenet_test.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def testWithOutputStride16AndExplicitPadding(self):
    tf.reset_default_graph()
    out, _ = mobilenet.mobilenet_base(
        tf.placeholder(tf.float32, (10, 224, 224, 16)),
        conv_defs=mobilenet_v2.V2_DEF,
        output_stride=16,
        use_explicit_padding=True)
    self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) 
Example 47
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: help.py    MIT License 5 votes vote down vote up
def load_old_graph(self, ckpt): 
    ckpt_loader = create_loader(ckpt)
    self.say(old_graph_msg.format(ckpt))
    
    for var in tf.global_variables():
        name = var.name.split(':')[0]
        args = [name, var.get_shape()]
        val = ckpt_loader(args)
        assert val is not None, \
        'Cannot find and load {}'.format(var.name)
        shp = val.shape
        plh = tf.placeholder(tf.float32, shp)
        op = tf.assign(var, plh)
        self.sess.run(op, {plh: val}) 
Example 48
Project: disentangling_conditional_gans   Author: zalandoresearch   File: tfutil.py    MIT License 5 votes vote down vote up
def set_vars(var_to_value_dict):
    ops = []
    feed_dict = {}
    for var, value in var_to_value_dict.items():
        assert is_tf_expression(var)
        try:
            setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/setter:0')) # look for existing op
        except KeyError:
            with absolute_name_scope(var.name.split(':')[0]):
                with tf.control_dependencies(None): # ignore surrounding control_dependencies
                    setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, 'new_value'), name='setter') # create new setter
        ops.append(setter)
        feed_dict[setter.op.inputs[1]] = value
    run(ops, feed_dict)

#----------------------------------------------------------------------------
# Autosummary creates an identity op that internally keeps track of the input
# values and automatically shows up in TensorBoard. The reported value
# represents an average over input components. The average is accumulated
# constantly over time and flushed when save_summaries() is called.
#
# Notes:
# - The output tensor must be used as an input for something else in the
#   graph. Otherwise, the autosummary op will not get executed, and the average
#   value will not get accumulated.
# - It is perfectly fine to include autosummaries with the same name in
#   several places throughout the graph, even if they are executed concurrently.
# - It is ok to also pass in a python scalar or numpy array. In this case, it
#   is added to the average immediately. 
Example 49
Project: disentangling_conditional_gans   Author: zalandoresearch   File: tfutil.py    MIT License 5 votes vote down vote up
def _init_graph(self):
        # Collect inputs.
        self.input_names = []
        for param in inspect.signature(self._build_func).parameters.values():
            if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
                self.input_names.append(param.name)
        self.num_inputs = len(self.input_names)
        assert self.num_inputs >= 1

        # Choose name and scope.
        if self.name is None:
            self.name = self._build_func_name
        self.scope = tf.get_default_graph().unique_name(self.name.replace('/', '_'), mark_as_used=False)
        
        # Build template graph.
        with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
            assert tf.get_variable_scope().name == self.scope
            with absolute_name_scope(self.scope): # ignore surrounding name_scope
                with tf.control_dependencies(None): # ignore surrounding control_dependencies
                    self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
                    out_expr = self._build_func(*self.input_templates, is_template_graph=True, **self.static_kwargs)
            
        # Collect outputs.
        assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)
        self.output_templates = [out_expr] if is_tf_expression(out_expr) else list(out_expr)
        self.output_names = [t.name.split('/')[-1].split(':')[0] for t in self.output_templates]
        self.num_outputs = len(self.output_templates)
        assert self.num_outputs >= 1
        
        # Populate remaining fields.
        self.input_shapes   = [shape_to_list(t.shape) for t in self.input_templates]
        self.output_shapes  = [shape_to_list(t.shape) for t in self.output_templates]
        self.input_shape    = self.input_shapes[0]
        self.output_shape   = self.output_shapes[0]
        self.vars           = OrderedDict([(self.get_var_localname(var), var) for var in tf.global_variables(self.scope + '/')])
        self.trainables     = OrderedDict([(self.get_var_localname(var), var) for var in tf.trainable_variables(self.scope + '/')])

    # Run initializers for all variables defined by this network. 
Example 50
Project: Electrolyte_Analysis_FTIR   Author: Samuel-Buteau   File: Constant_run.py    MIT License 5 votes vote down vote up
def __init__(self, trainable, num_concentrations, num_samples):
        self.num_concentrations = num_concentrations
        self.num_samples = num_samples
        self.trainable = trainable
        self.dropout = tf.placeholder(dtype=tf.float32)
        self.prediction_coeff = tf.placeholder(dtype=tf.float32)
        self.positivity_coeff = tf.placeholder(dtype=tf.float32)
        self.normalization_coeff = tf.placeholder(dtype=tf.float32)
        self.small_x_coeff = tf.placeholder(dtype=tf.float32)

        # the log-magnitude of X
        self.x = tf.get_variable(
            name='x',
            shape=[1],
            dtype=tf.float32,
            initializer=tf.initializers.constant(value=[1], dtype=tf.float32),
            trainable=trainable,
        )

        self.X_0 = tf.get_variable(
            name='X_0',
            shape=[num_concentrations, num_samples],
            dtype= tf.float32,
            initializer=tf.initializers.orthogonal(),
            trainable=trainable,
        )

        self.A_0 = tf.get_variable(
            name='A_0',
            shape=[num_samples, num_concentrations],
            dtype=tf.float32,
            initializer=tf.initializers.orthogonal(),
            trainable=trainable,
        )
        self.drop = tf.layers.Dropout(name='dropout_layer', rate=self.dropout)