Python tflearn.input_data() Examples

The following are 30 code examples of tflearn.input_data(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tflearn , or try the search function .
Example #1
Source File: models.py    From pygta5 with GNU General Public License v3.0 8 votes vote down vote up
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    net = input_data(shape=[None, width, height, 3], name='input')
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, output, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=opt,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model 
Example #2
Source File: a3c.py    From QARC with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def create_critic_network(self):
        with tf.variable_scope('critic'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])
            _input = tf.expand_dims(inputs, -1)

            merge_net = tflearn.conv_2d(
                _input, FEATURE_NUM, KERNEL, activation='relu')
            merge_net = tflearn.conv_2d(
                merge_net, FEATURE_NUM, KERNEL, activation='relu')

            avg_net = tflearn.global_avg_pool(merge_net)
            # dense_net_0 = tflearn.fully_connected(
            #    merge_net, 64, activation='relu')
            #dense_net_0 = tflearn.dropout(dense_net_0, 0.8)
            out = tflearn.fully_connected(avg_net, 1, activation='linear')

            return inputs, out 
Example #3
Source File: vqn.py    From QARC with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def vqn_model(x):
    with tf.variable_scope('vqn'):
        inputs = tflearn.input_data(placeholder=x)
        _split_array = []

        for i in range(INPUT_SEQ):
            tmp_network = tf.reshape(
                inputs[:, i:i+1, :, :, :], [-1, INPUT_H, INPUT_W, INPUT_D])
            if i == 0:
                _split_array.append(CNN_Core(tmp_network))
            else:
                _split_array.append(CNN_Core(tmp_network,True))
            
        merge_net = tflearn.merge(_split_array, 'concat')
        merge_net = tflearn.flatten(merge_net)
        _count = merge_net.get_shape().as_list()[1]

        with tf.variable_scope('full-cnn'):
            net = tf.reshape(merge_net, [-1, _count / DENSE_SIZE, DENSE_SIZE, 1])
            out = vgg16(net, OUTPUT_DIM)

        return out 
Example #4
Source File: cnn.py    From QARC with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def vqn_model(x):
    with tf.variable_scope('vqn'):
        inputs = tflearn.input_data(placeholder=x)
        _split_array = []

        for i in range(INPUT_SEQ):
            tmp_network = tf.reshape(
                inputs[:, i:i+1, :, :, :], [-1, INPUT_H, INPUT_W, INPUT_D])
            if i == 0:
                _split_array.append(CNN_Core(tmp_network))
            else:
                _split_array.append(CNN_Core(tmp_network, True))

        merge_net = tflearn.merge(_split_array, 'concat')
        merge_net = tflearn.flatten(merge_net)
        _count = merge_net.get_shape().as_list()[1]

        with tf.variable_scope('full-cnn'):
            net = tf.reshape(
                merge_net, [-1, _count / DENSE_SIZE, DENSE_SIZE, 1])
            out = vgg16(net, OUTPUT_DIM)

        return out 
Example #5
Source File: inet.py    From QARC with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def create_network(self):
        with tf.variable_scope('innovation'):
            inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])
            split_array = []
            for i in xrange(self.s_dim[0] - 1):
                split = tflearn.conv_1d(inputs[:, i:i + 1, :], FEATURE_NUM, KERNEL, activation='relu')
                flattern = tflearn.flatten(split)
                split_array.append(flattern)
            
            #dense_net= tflearn.fully_connected(inputs[:, -1:, 0:5], FEATURE_NUM, activation='relu')
            split_array.append(inputs[:, -1, 0:5])
            merge_net = tflearn.merge(split_array, 'concat')
            dense_net_0 = tflearn.fully_connected(merge_net, 64, activation='relu')

            out = tflearn.fully_connected(dense_net_0, self.a_dim, activation='softmax')

            return inputs, out 
Example #6
Source File: models.py    From pygta5 with GNU General Public License v3.0 6 votes vote down vote up
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    net = input_data(shape=[None, width, height, 3], name='input')
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, output, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=opt,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model 
Example #7
Source File: a3c.py    From QARC with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])
            split_array = []
            for i in xrange(self.s_dim[0] - 1):
                split = tflearn.conv_1d(inputs[:, i:i + 1, :], FEATURE_NUM, KERNEL, activation='relu')
                flattern = tflearn.flatten(split)
                split_array.append(flattern)
            
            dense_net= tflearn.fully_connected(inputs[:, -1:, :], FEATURE_NUM, activation='relu')
            split_array.append(dense_net)
            merge_net = tflearn.merge(split_array, 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net, 64, activation='relu')
           # dense_net_0 = tflearn.dropout(dense_net_0, 0.8)
            out = tflearn.fully_connected(dense_net_0, self.a_dim, activation='softmax')

            return inputs, out 
Example #8
Source File: a3c.py    From QARC with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def create_critic_network(self):
        with tf.variable_scope('critic'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])
            _input = tf.expand_dims(inputs, -1)

            merge_net = tflearn.conv_2d(
                _input, FEATURE_NUM, KERNEL, activation='relu')
            merge_net = tflearn.conv_2d(
                merge_net, FEATURE_NUM, KERNEL, activation='relu')

            avg_net = tflearn.global_avg_pool(merge_net)
            # dense_net_0 = tflearn.fully_connected(
            #    merge_net, 64, activation='relu')
            #dense_net_0 = tflearn.dropout(dense_net_0, 0.8)
            out = tflearn.fully_connected(avg_net, 1, activation='linear')

            return inputs, out 
Example #9
Source File: models.py    From pygta5 with GNU General Public License v3.0 6 votes vote down vote up
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    net = input_data(shape=[None, width, height, 3], name='input')
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, output, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=opt,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model 
Example #10
Source File: a3c.py    From QARC with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])
            _input = tf.expand_dims(inputs, -1)

            merge_net = tflearn.conv_2d(
                _input, FEATURE_NUM, KERNEL, activation='relu')
            merge_net = tflearn.conv_2d(
                merge_net, FEATURE_NUM, KERNEL, activation='relu')

            avg_net = tflearn.global_avg_pool(merge_net)
            out = tflearn.fully_connected(
                avg_net, self.a_dim, activation='softmax')

            return inputs, out 
Example #11
Source File: cnn.py    From QARC with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def vqn_model(x):
    with tf.variable_scope('vqn'):
        inputs = tflearn.input_data(placeholder=x)
        _split_array = []

        for i in range(INPUT_SEQ):
            tmp_network = tf.reshape(
                inputs[:, i:i+1, :, :, :], [-1, INPUT_H, INPUT_W, INPUT_D])
            if i == 0:
                _split_array.append(CNN_Core(tmp_network))
            else:
                _split_array.append(CNN_Core(tmp_network, True))

        merge_net = tflearn.merge(_split_array, 'concat')
        merge_net = tflearn.flatten(merge_net)
        _count = merge_net.get_shape().as_list()[1]

        with tf.variable_scope('full-cnn'):
            net = tf.reshape(
                merge_net, [-1, _count / DENSE_SIZE, DENSE_SIZE, 1])
            out = vgg16(net, OUTPUT_DIM)

        return out 
Example #12
Source File: test_layers.py    From FRU with MIT License 6 votes vote down vote up
def test_feed_dict_no_None(self):

        X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
        Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4], name="X_in")
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2)
            g = tflearn.conv_2d(g, 4, 1)
            g = tflearn.max_pool_2d(g, 2)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)

            def do_fit():
                m.fit({"X_in": X, 'non_existent': X}, Y, n_epoch=30, snapshot_epoch=False)
            self.assertRaisesRegexp(Exception, "Feed dict asks for variable named 'non_existent' but no such variable is known to exist", do_fit) 
Example #13
Source File: models.py    From pygta5 with GNU General Public License v3.0 6 votes vote down vote up
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    net = input_data(shape=[None, width, height, 3], name='input')
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, output, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=opt,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model 
Example #14
Source File: Model.py    From NLU with MIT License 6 votes vote down vote up
def createDNNLayers(self, x, y):

        ###############################################################
        #
        # Sets up the DNN layers, configuration in required/confs.json
        #
        ###############################################################
        
        net = tflearn.input_data(shape=[None, len(x[0])])

        for i in range(self._confs["NLU"]['FcLayers']):
            net = tflearn.fully_connected(net, self._confs["NLU"]['FcUnits'])

        net = tflearn.fully_connected(net, len(y[0]), activation=str(self._confs["NLU"]['Activation']))

        if self._confs["NLU"]['Regression']:
            net = tflearn.regression(net)

        return net 
Example #15
Source File: my_seq2seq.py    From ChatBotCourse with MIT License 5 votes vote down vote up
def model(self, feed_previous=False):
        # 通过输入的XY生成encoder_inputs和带GO头的decoder_inputs
        input_data = tflearn.input_data(shape=[None, self.max_seq_len*2, self.word_vec_dim], dtype=tf.float32, name = "XY")
        encoder_inputs = tf.slice(input_data, [0, 0, 0], [-1, self.max_seq_len, self.word_vec_dim], name="enc_in")
        decoder_inputs_tmp = tf.slice(input_data, [0, self.max_seq_len, 0], [-1, self.max_seq_len-1, self.word_vec_dim], name="dec_in_tmp")
        go_inputs = tf.ones_like(decoder_inputs_tmp)
        go_inputs = tf.slice(go_inputs, [0, 0, 0], [-1, 1, self.word_vec_dim])
        decoder_inputs = tf.concat(1, [go_inputs, decoder_inputs_tmp], name="dec_in")

        # 编码器
        # 把encoder_inputs交给编码器,返回一个输出(预测序列的第一个值)和一个状态(传给解码器)
        (encoder_output_tensor, states) = tflearn.lstm(encoder_inputs, self.word_vec_dim, return_state=True, scope='encoder_lstm')
        encoder_output_sequence = tf.pack([encoder_output_tensor], axis=1)

        # 解码器
        # 预测过程用前一个时间序的输出作为下一个时间序的输入
        # 先用编码器的最后一个输出作为第一个输入
        if feed_previous:
            first_dec_input = go_inputs
        else:
            first_dec_input = tf.slice(decoder_inputs, [0, 0, 0], [-1, 1, self.word_vec_dim])
        decoder_output_tensor = tflearn.lstm(first_dec_input, self.word_vec_dim, initial_state=states, return_seq=False, reuse=False, scope='decoder_lstm')
        decoder_output_sequence_single = tf.pack([decoder_output_tensor], axis=1)
        decoder_output_sequence_list = [decoder_output_tensor]
        # 再用解码器的输出作为下一个时序的输入
        for i in range(self.max_seq_len-1):
            if feed_previous:
                next_dec_input = decoder_output_sequence_single
            else:
                next_dec_input = tf.slice(decoder_inputs, [0, i+1, 0], [-1, 1, self.word_vec_dim])
            decoder_output_tensor = tflearn.lstm(next_dec_input, self.word_vec_dim, return_seq=False, reuse=True, scope='decoder_lstm')
            decoder_output_sequence_single = tf.pack([decoder_output_tensor], axis=1)
            decoder_output_sequence_list.append(decoder_output_tensor)

        decoder_output_sequence = tf.pack(decoder_output_sequence_list, axis=1)
        real_output_sequence = tf.concat(1, [encoder_output_sequence, decoder_output_sequence])

        net = tflearn.regression(real_output_sequence, optimizer='sgd', learning_rate=0.1, loss='mean_square')
        model = tflearn.DNN(net)
        return model 
Example #16
Source File: test_models_loading_scope.py    From FRU with MIT License 5 votes vote down vote up
def test_dnn_loading_scope(self):

        with tf.Graph().as_default():
            X = [3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1]
            Y = [1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3]
            input = tflearn.input_data(shape=[None])
            linear = tflearn.single_unit(input)
            regression = tflearn.regression(linear, optimizer='sgd', loss='mean_square',
                                            metric='R2', learning_rate=0.01)
            m = tflearn.DNN(regression)
            # Testing fit and predict
            m.fit(X, Y, n_epoch=1000, show_metric=True, snapshot_epoch=False)
            res = m.predict([3.2])[0]
            self.assertGreater(res, 1.3, "DNN test (linear regression) failed! with score: " + str(res) + " expected > 1.3")
            self.assertLess(res, 1.8, "DNN test (linear regression) failed! with score: " + str(res) + " expected < 1.8")

            # Testing save method
            m.save("test_dnn.tflearn")
            self.assertTrue(os.path.exists("test_dnn.tflearn.index"))

        # Testing loading, with change of variable scope (saved with no scope, now loading into scopeA)
        with tf.Graph().as_default():	# start with clear graph
            with tf.variable_scope("scopeA") as scope:
                input = tflearn.input_data(shape=[None])
                linear = tflearn.single_unit(input)
                regression = tflearn.regression(linear, optimizer='sgd', loss='mean_square',
                                                metric='R2', learning_rate=0.01)
                m = tflearn.DNN(regression)
                def try_load():
                    m.load("test_dnn.tflearn")
                self.assertRaises(tf.errors.NotFoundError, try_load)	# fails, since names in file don't have "scopeA"

                m.load("test_dnn.tflearn", variable_name_map=("scopeA/", ""))	# succeeds, because variable names are rewritten
                res = m.predict([3.2])[0]
                self.assertGreater(res, 1.3, "DNN test (linear regression) failed after loading model! score: " + str(res) + " expected > 1.3")
                self.assertLess(res, 1.8, "DNN test (linear regression) failed after loading model! score: " + str(res) + " expected < 1.8") 
Example #17
Source File: test_layers.py    From FRU with MIT License 5 votes vote down vote up
def test_conv_layers(self):

        X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
        Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2, activation='relu')
            g = tflearn.max_pool_2d(g, 2)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)
            m.fit(X, Y, n_epoch=100, snapshot_epoch=False)
            # TODO: Fix test
            #self.assertGreater(m.predict([[1., 0., 0., 0.]])[0][0], 0.5)

        # Bulk Tests
        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2)
            g = tflearn.conv_2d(g, 4, 1)
            g = tflearn.conv_2d_transpose(g, 4, 2, [2, 2])
            g = tflearn.max_pool_2d(g, 2) 
Example #18
Source File: test_layers.py    From FRU with MIT License 5 votes vote down vote up
def test_recurrent_layers(self):

        X = [[1, 3, 5, 7], [2, 4, 8, 10], [1, 5, 9, 11], [2, 6, 8, 0]]
        Y = [[0., 1.], [1., 0.], [0., 1.], [1., 0.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.embedding(g, input_dim=12, output_dim=4)
            g = tflearn.lstm(g, 6)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)
            m.fit(X, Y, n_epoch=300, snapshot_epoch=False)
            self.assertGreater(m.predict([[5, 9, 11, 1]])[0][1], 0.9) 
Example #19
Source File: test_layers.py    From FRU with MIT License 5 votes vote down vote up
def test_regression_placeholder(self):
        '''
        Check that regression does not duplicate placeholders
        '''

        with tf.Graph().as_default():

            g = tflearn.input_data(shape=[None, 2])
            g_nand = tflearn.fully_connected(g, 1, activation='linear')
            with tf.name_scope("Y"):
                Y_in = tf.placeholder(shape=[None, 1], dtype=tf.float32, name="Y")
            tflearn.regression(g_nand, optimizer='sgd',
                               placeholder=Y_in,
                               learning_rate=2.,
                               loss='binary_crossentropy', 
                               op_name="regression1",
                               name="Y")
            # for this test, just use the same default trainable_vars
            # in practice, this should be different for the two regressions
            tflearn.regression(g_nand, optimizer='adam',
                               placeholder=Y_in,
                               learning_rate=2.,
                               loss='binary_crossentropy', 
                               op_name="regression2",
                               name="Y")

            self.assertEqual(len(tf.get_collection(tf.GraphKeys.TARGETS)), 1) 
Example #20
Source File: test_models.py    From FRU with MIT License 5 votes vote down vote up
def test_sequencegenerator(self):

        with tf.Graph().as_default():
            text = "123456789101234567891012345678910123456789101234567891012345678910"
            maxlen = 5

            X, Y, char_idx = \
                tflearn.data_utils.string_to_semi_redundant_sequences(text, seq_maxlen=maxlen, redun_step=3)

            g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
            g = tflearn.lstm(g, 32)
            g = tflearn.dropout(g, 0.5)
            g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
            g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                                   learning_rate=0.1)

            m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                                          seq_maxlen=maxlen,
                                          clip_gradients=5.0)
            m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False)
            res = m.generate(10, temperature=.5, seq_seed="12345")
            #self.assertEqual(res, "123456789101234", "SequenceGenerator test failed! Generated sequence: " + res + " expected '123456789101234'")

            # Testing save method
            m.save("test_seqgen.tflearn")
            self.assertTrue(os.path.exists("test_seqgen.tflearn.index"))

            # Testing load method
            m.load("test_seqgen.tflearn")
            res = m.generate(10, temperature=.5, seq_seed="12345")
            # TODO: Fix test
            #self.assertEqual(res, "123456789101234", "SequenceGenerator test failed after loading model! Generated sequence: " + res + " expected '123456789101234'") 
Example #21
Source File: models.py    From pygta5 with GNU General Public License v3.0 5 votes vote down vote up
def sentnet_frames(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height,frame_count, 1], name='input')
    network = conv_3d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model 
Example #22
Source File: qarc.py    From QARC with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vqn_model(self, x):
        with tf.variable_scope('vqn'):
            inputs = tflearn.input_data(placeholder=x)
            _split_array = []

            for i in range(INPUT_SEQ):
                tmp_network = tf.reshape(
                    inputs[:, i:i+1, :, :, :], [-1, INPUT_H, INPUT_W, INPUT_D])
                if i == 0:
                    _split_array.append(self.CNN_Core(tmp_network))
                else:
                    _split_array.append(self.CNN_Core(tmp_network, True))

            merge_net = tflearn.merge(_split_array, 'concat')
            merge_net = tflearn.flatten(merge_net)
            _count = merge_net.get_shape().as_list()[1]

            with tf.variable_scope('full-cnn'):
                net = tf.reshape(
                    merge_net, [-1, INPUT_SEQ, _count / INPUT_SEQ, 1])
                network = tflearn.conv_2d(
                    net, KERNEL, 5, activation='relu', regularizer="L2", weight_decay=0.0001)
                network = tflearn.max_pool_2d(network, 3)
                network = tflearn.layers.normalization.batch_normalization(
                    network)
                network = tflearn.conv_2d(
                    network, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001)
                network = tflearn.max_pool_2d(network, 2)
                network = tflearn.layers.normalization.batch_normalization(
                    network)
                cnn_result = tflearn.fully_connected(
                    network, DENSE_SIZE, activation='relu')

        out = tflearn.fully_connected(
            cnn_result, OUTPUT_DIM, activation='sigmoid')

        return out 
Example #23
Source File: gray.py    From QARC with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vqn_model(x):
    with tf.variable_scope('vqn'):
        inputs = tflearn.input_data(placeholder=x)
        _split_array = []
        _cnn_array = []

        for i in range(INPUT_SEQ):
            tmp_network = tf.reshape(
                inputs[:, i:i+1, :, :, :], [-1, INPUT_H, INPUT_W, INPUT_D])
            if i == 0:
                _tmp_split, _tmp_cnn = CNN_Core(tmp_network)
            else:
                _tmp_split, _tmp_cnn = CNN_Core(tmp_network, True)
            _split_array.append(_tmp_split)
            _cnn_array.append(_tmp_cnn)

        merge_net = tflearn.merge(_split_array, 'concat')
        merge_net = tflearn.flatten(merge_net)
        _count = merge_net.get_shape().as_list()[1]

        with tf.variable_scope('full-lstm'):
            net = tf.reshape(merge_net, [-1, INPUT_SEQ, _count / INPUT_SEQ])
            net = tflearn.gru(net, HIDDEN_UNIT, return_seq=True)
            net = tflearn.gru(net, HIDDEN_UNIT, return_seq=True)
            net, alphas = attention(net, HIDDEN_UNIT)
            out = tflearn.fully_connected(
                net, OUTPUT_DIM, activation='sigmoid')

        return out, tf.stack(_cnn_array, axis=0), alphas 
Example #24
Source File: sentiment.py    From TaobaoAnalysis with MIT License 5 votes vote down vote up
def _create_model(self):
        reset_default_graph()
        net = input_data([None, SEQUENCE_LEN])
        net = embedding(net, input_dim=len(self._vocab.vocabulary_),
                        output_dim=WORD_FEATURE_DIM)
        net = lstm(net, DOC_FEATURE_DIM, dropout=0.8)
        net = fully_connected(net, 2, activation='softmax')
        net = regression(net, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy')
        return DNN(net) 
Example #25
Source File: usefulness.py    From TaobaoAnalysis with MIT License 5 votes vote down vote up
def _create_model():
        reset_default_graph()
        net = input_data([None, 5])
        net = fully_connected(net, N_HIDDEN_UNITS, bias=True, activation='tanh')
        net = fully_connected(net, 2, activation='softmax')
        net = regression(net, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy')
        return DNN(net) 
Example #26
Source File: models.py    From pygta5 with GNU General Public License v3.0 5 votes vote down vote up
def alexnet(width, height, lr, output=3):
    network = input_data(shape=[None, width, height, 1], name='input')
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model 
Example #27
Source File: models.py    From pygta5 with GNU General Public License v3.0 5 votes vote down vote up
def sentnet_v0(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height, frame_count, 1], name='input')
    network = conv_3d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    
    #network = local_response_normalization(network)
    
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)

    #network = local_response_normalization(network)
    
    network = conv_3d(network, 384, 3, 3, activation='relu')
    network = conv_3d(network, 384, 3, 3, activation='relu')
    network = conv_3d(network, 256, 3, 3, activation='relu')

    network = max_pool_3d(network, 3, strides=2)

    #network = local_response_normalization(network)
    
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model 
Example #28
Source File: models.py    From pygta5 with GNU General Public License v3.0 5 votes vote down vote up
def alexnet2(width, height, lr, output=3):
    network = input_data(shape=[None, width, height, 1], name='input')
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model 
Example #29
Source File: models.py    From pygta5 with GNU General Public License v3.0 5 votes vote down vote up
def sentnet(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height, frame_count, 1], name='input')
    network = conv_3d(network, 96, 11, strides=4, activation='relu')
    network = avg_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 256, 5, activation='relu')
    network = avg_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    network = conv_3d(network, 256, 5, activation='relu')
    network = avg_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = avg_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model 
Example #30
Source File: models.py    From pygta5 with GNU General Public License v3.0 5 votes vote down vote up
def sentnet_color(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    network = input_data(shape=[None, width, height,3, 1], name='input')
    network = conv_3d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path=model_name,
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model