Python net.Net() Examples

The following are 29 code examples of net.Net(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module net , or try the search function .
Example #1
Source File: topics.py    From ConvNetPy with MIT License 6 votes vote down vote up
def start():
    global training_data, network, t, N

    training_data = load_data()
    print 'Data loaded...'

    layers = []
    layers.append({'type': 'input', 'out_sx': 1, 'out_sy': 1, 'out_depth': N})
    layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'sigmoid'})
    layers.append({'type': 'fc', 'num_neurons': 10, 'activation': 'sigmoid'})
    layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'sigmoid'})
    layers.append({'type': 'regression', 'num_neurons': N})

    print 'Layers made...'

    network = Net(layers)

    print 'Net made...'
    print network

    t = Trainer(network, {'method': 'adadelta', 'batch_size': 4, 'l2_decay': 0.0001}); 
Example #2
Source File: main.py    From MXNet-Gluon-Style-Transfer with MIT License 6 votes vote down vote up
def evaluate(args):
    if args.cuda:
        ctx = mx.gpu(0)
    else:
        ctx = mx.cpu(0)
    # images
    content_image = utils.tensor_load_rgbimage(args.content_image,ctx, size=args.content_size, keep_asp=True)
    style_image = utils.tensor_load_rgbimage(args.style_image, ctx, size=args.style_size)
    style_image = utils.preprocess_batch(style_image)
    # model
    style_model = net.Net(ngf=args.ngf)
    style_model.load_params(args.model, ctx=ctx)
    # forward
    style_model.set_target(style_image)
    output = style_model(content_image)
    utils.tensor_save_bgrimage(output[0], args.output_image, args.cuda) 
Example #3
Source File: worker.py    From reinforce_py with MIT License 6 votes vote down vote up
def __init__(self, worker_id, env, global_ep, args):
        self.name = 'worker_' + str(worker_id)
        self.env = env
        self.global_ep = global_ep
        self.args = args
        self.learning_rate = 1e-4
        self.gamma = 0.99
        self.trainer = tf.train.AdamOptimizer(self.learning_rate)

        # create local copy of AC network
        self.local_net = Net(self.env.state_dim,
                             self.env.action_dim,
                             scope=self.name,
                             trainer=self.trainer)

        self.update_local_op = self._update_local_params() 
Example #4
Source File: main.py    From SNIPER-mxnet with Apache License 2.0 6 votes vote down vote up
def evaluate(args):
    if args.cuda:
        ctx = mx.gpu(0)
    else:
        ctx = mx.cpu(0)
    # images
    content_image = utils.tensor_load_rgbimage(args.content_image,ctx, size=args.content_size, keep_asp=True)
    style_image = utils.tensor_load_rgbimage(args.style_image, ctx, size=args.style_size)
    style_image = utils.preprocess_batch(style_image)
    # model
    style_model = net.Net(ngf=args.ngf)
    style_model.load_params(args.model, ctx=ctx)
    # forward
    style_model.setTarget(style_image)
    output = style_model(content_image)
    utils.tensor_save_bgrimage(output[0], args.output_image, args.cuda) 
Example #5
Source File: num2img.py    From ConvNetPy with MIT License 6 votes vote down vote up
def start():
    global training_data, testing_data, n, t

    training_data = load_data()
    testing_data = load_data(False)

    print 'Data loaded...'

    layers = []
    layers.append({'type': 'input', 'out_sx': 1, 'out_sy': 1, 'out_depth': 10})
    layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'})
    layers.append({'type': 'regression', 'num_neurons': 28 * 28})
    print 'Layers made...'

    n = Net(layers)
    print 'Net made...'
    print n

    t = Trainer(n, {'method': 'sgd', 'batch_size': 20, 'l2_decay': 0.001});
    print 'Trainer made...'
    print t 
Example #6
Source File: darkencoder.py    From ConvNetPy with MIT License 6 votes vote down vote up
def train2():
    global training_data2, n2, t2

    layers = []
    layers.append({'type': 'input', 'out_sx': 28, 'out_sy': 28, 'out_depth': 1})
    layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'})
    layers.append({'type': 'softmax', 'num_classes': 10})
    print 'Layers made...'

    n2 = Net(layers)
    print 'Net made...'
    print n2

    t2 = Trainer(n2, {'method': 'adadelta', 'batch_size': 20, 'l2_decay': 0.001});
    print 'Trainer made...' 

    print 'In training of smaller net...'
    print 'k', 'time\t\t  ', 'loss\t  ', 'training accuracy'
    print '----------------------------------------------------'
    try:
        for x, y in training_data2: 
            stats = t2.train(x, y)
            print stats['k'], stats['time'], stats['loss'], stats['accuracy']
    except: #hit control-c or other
        return 
Example #7
Source File: similarity.py    From ConvNetPy with MIT License 6 votes vote down vote up
def start():
    global training_data, network, t, N

    training_data = load_data()
    print 'Data loaded...'

    layers = []
    layers.append({'type': 'input', 'out_sx': 1, 'out_sy': 1, 'out_depth': N})
    layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'sigmoid'})
    layers.append({'type': 'fc', 'num_neurons': 10, 'activation': 'sigmoid'})
    layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'sigmoid'})
    layers.append({'type': 'regression', 'num_neurons': N})

    print 'Layers made...'

    network = Net(layers)

    print 'Net made...'
    print network

    t = Trainer(network, {'method': 'adadelta', 'batch_size': 4, 'l2_decay': 0.0001}); 
Example #8
Source File: next_word.py    From ConvNetPy with MIT License 6 votes vote down vote up
def start():
    global training_data, testing_data, network, t, N

    all_data = load_data()
    shuffle(all_data)
    size = int(len(all_data) * 0.1)
    training_data, testing_data = all_data[size:], all_data[:size]
    print 'Data loaded, size: {}...'.format(len(all_data))

    layers = []
    layers.append({'type': 'input', 'out_sx': 1, 'out_sy': 1, 'out_depth': N})
    layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'sigmoid'})
    layers.append({'type': 'fc', 'num_neurons': 10, 'activation': 'sigmoid'})
    layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'sigmoid'})
    layers.append({'type': 'softmax', 'num_classes': N})

    print 'Layers made...'

    network = Net(layers)

    print 'Net made...'
    print network

    t = Trainer(network, {'method': 'adadelta', 'batch_size': 10, 'l2_decay': 0.0001}); 
Example #9
Source File: autoencoder_vis.py    From ConvNetPy with MIT License 6 votes vote down vote up
def start():
    global training_data, testing_data, n, t

    training_data = load_data()
    testing_data = load_data(False)

    print 'Data loaded...'

    layers = []
    layers.append({'type': 'input', 'out_sx': 28, 'out_sy': 28, 'out_depth': 1})
    layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'tanh'})
    layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'tanh'})
    layers.append({'type': 'fc', 'num_neurons': 2, 'activation': 'tanh'})
    layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'tanh'})
    layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'tanh'})
    layers.append({'type': 'regression', 'num_neurons': 28 * 28})
    print 'Layers made...'

    n = Net(layers)
    print 'Net made...'
    print n

    t = Trainer(n, {'method': 'adadelta', 'learning_rate': 1.0, 'batch_size': 50, 'l2_decay': 0.001, 'l1_decay': 0.001});
    print 'Trainer made...' 
Example #10
Source File: main.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def evaluate(args):
    if args.cuda:
        ctx = mx.gpu(0)
    else:
        ctx = mx.cpu(0)
    # images
    content_image = utils.tensor_load_rgbimage(args.content_image,ctx, size=args.content_size, keep_asp=True)
    style_image = utils.tensor_load_rgbimage(args.style_image, ctx, size=args.style_size)
    style_image = utils.preprocess_batch(style_image)
    # model
    style_model = net.Net(ngf=args.ngf)
    style_model.load_parameters(args.model, ctx=ctx)
    # forward
    style_model.set_target(style_image)
    output = style_model(content_image)
    utils.tensor_save_bgrimage(output[0], args.output_image, args.cuda) 
Example #11
Source File: transforming_autoencoder.py    From ConvNetPy with MIT License 6 votes vote down vote up
def start():
    global training_data, testing_data, n, t

    training_data = load_data()
    testing_data = load_data(False)

    print 'Data loaded...'

    layers = []
    layers.append({'type': 'input', 'out_sx': 28, 'out_sy': 28, 'out_depth': 1})
    layers.append({
        'type': 'capsule', 'num_neurons': 30, 
        'num_recog': 3, 'num_gen': 4, 'num_pose': 2,
        'dx': 1, 'dy': 0
    })
    layers.append({'type': 'regression', 'num_neurons': 28 * 28})
    print 'Layers made...'

    n = Net(layers)

    print 'Net made...'
    print n

    t = Trainer(n, {'method': 'sgd', 'batch_size': 20, 'l2_decay': 0.001})
    print 'Trainer made...' 
Example #12
Source File: dialogue.py    From ConvNetPy with MIT License 6 votes vote down vote up
def start():
    global training_data, testing_data, network, t, N, labels

    data = load_data()
    shuffle(data)
    size = int(len(data) * 0.01)
    training_data, testing_data = data[size:], data[:size]
    print 'Data loaded...'

    layers = []
    layers.append({'type': 'input', 'out_sx': 1, 'out_sy': 1, 'out_depth': N})
    layers.append({'type': 'fc', 'num_neurons': 10, 'activation': 'sigmoid'})
    layers.append({'type': 'softmax', 'num_classes': len(labels)})
    print 'Layers made...'

    network = Net(layers)

    print 'Net made...'
    print network

    t = Trainer(network, {'method': 'adadelta', 'batch_size': 10, 'l2_decay': 0.0001}); 
Example #13
Source File: faces.py    From ConvNetPy with MIT License 6 votes vote down vote up
def start():
    global network, t

    layers = []
    layers.append({'type': 'input', 'out_sx': 30, 'out_sy': 30, 'out_depth': 1})
    layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'})
    layers.append({'type': 'softmax', 'num_classes': 7})
    print 'Layers made...'

    network = Net(layers)
    print 'Net made...'
    print network

    t = Trainer(network, {'method': 'adadelta', 'batch_size': 20, 'l2_decay': 0.001})
    print 'Trainer made...'
    print t 
Example #14
Source File: titanic.py    From ConvNetPy with MIT License 6 votes vote down vote up
def start():
    global network, sgd

    layers = []
    layers.append({'type': 'input', 'out_sx': 1, 'out_sy': 1, 'out_depth': 7})
    #layers.append({'type': 'fc', 'num_neurons': 30, 'activation': 'relu'})
    #layers.append({'type': 'fc', 'num_neurons': 30, 'activation': 'relu'})
    layers.append({'type': 'softmax', 'num_classes': 2}) #svm works too
    print 'Layers made...'

    network = Net(layers)
    print 'Net made...'
    print network

    sgd = Trainer(network, {'momentum': 0.2, 'l2_decay': 0.001})
    print 'Trainer made...'
    print sgd 
Example #15
Source File: next_letter.py    From ConvNetPy with MIT License 6 votes vote down vote up
def start():
    global training_data, n, t

    training_data = load_data()

    print 'Data loaded...'

    layers = []
    layers.append({'type': 'input', 'out_sx': 1, 'out_sy': 1, 'out_depth': 255})
    layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'})
    layers.append({'type': 'softmax', 'num_classes': 255})

    print 'Layers made...'

    n = Net(layers)

    print 'Net made...'
    print n

    t = Trainer(n, {'method': 'adadelta', 'batch_size': 10, 'l2_decay': 0.0001});

    print 'Trainer made...' 
Example #16
Source File: sample.py    From Text-Generate-RNN with Apache License 2.0 6 votes vote down vote up
def __init__(self):
        super(Predictor, self).__init__()
        num_units = 512
        num_layer = 2
        batch_size = 1
        data_dir = 'data/'
        input_file = 'poetry.txt'
        vocab_file = 'vocab.pkl'
        tensor_file = 'tensor.npy'

        self.data = Data(data_dir, input_file, vocab_file, tensor_file, 
                        is_train=False, batch_size=batch_size)
        self.model = Net(self.data, num_units, num_layer, batch_size)
        self.sess = tf.Session()

        saver = tf.train.Saver(tf.global_variables())
        saver.restore(self.sess, 'model/model')
        print('Load model done.' + '\n') 
Example #17
Source File: main.py    From training_results_v0.6 with Apache License 2.0 6 votes vote down vote up
def evaluate(args):
    if args.cuda:
        ctx = mx.gpu(0)
    else:
        ctx = mx.cpu(0)
    # images
    content_image = utils.tensor_load_rgbimage(args.content_image,ctx, size=args.content_size, keep_asp=True)
    style_image = utils.tensor_load_rgbimage(args.style_image, ctx, size=args.style_size)
    style_image = utils.preprocess_batch(style_image)
    # model
    style_model = net.Net(ngf=args.ngf)
    style_model.load_parameters(args.model, ctx=ctx)
    # forward
    style_model.set_target(style_image)
    output = style_model(content_image)
    utils.tensor_save_bgrimage(output[0], args.output_image, args.cuda) 
Example #18
Source File: next_word_embeddings.py    From ConvNetPy with MIT License 5 votes vote down vote up
def start():
    global training_data, testing_data, network, t, N

    all_data = load_data()
    shuffle(all_data)
    size = int(len(all_data) * 0.1)
    training_data, testing_data = all_data[size:], all_data[:size]
    print 'Data loaded, size: {}...'.format(len(all_data))

    layers = []
    layers.append({'type': 'input', 'out_sx': 1, 'out_sy': 1, 'out_depth': 240})
    
    layers.append({'type': 'fc', 'num_neurons': 200, 'activation': 'sigmoid'})
    layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'})
    layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'sigmoid'})
    layers.append({'type': 'fc', 'num_neurons': 10, 'activation': 'sigmoid'})
    layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'sigmoid'})
    layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'})
    
    #layers.append({'type': 'conv', 'sx': 1, 'filters': 240, 'pad': 0}) #lookup table like
    #layers.append({'type': 'fc', 'num_neurons': 200, 'activation': 'tanh', 'drop_prob': 0.5})
    #layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'tanh', 'drop_prob': 0.5})
    
    layers.append({'type': 'softmax', 'num_classes': N})

    print 'Layers made...'

    network = Net(layers)

    print 'Net made...'
    print network

    t = Trainer(network, {'method': 'adadelta', 'batch_size': 10, 'l2_decay': 0.0001}); 
Example #19
Source File: iris.py    From ConvNetPy with MIT License 5 votes vote down vote up
def start():
    global network, sgd

    layers = []
    layers.append({'type': 'input', 'out_sx': 1, 'out_sy': 1, 'out_depth': 4})
    layers.append({'type': 'softmax', 'num_classes': 3}) #svm works too
    print 'Layers made...'

    network = Net(layers)
    print 'Net made...'
    print network

    sgd = Trainer(network, {'momentum': 0.1, 'l2_decay': 0.001})
    print 'Trainer made...'
    print sgd 
Example #20
Source File: autoencoder.py    From ConvNetPy with MIT License 5 votes vote down vote up
def start(conv, crop, gray):
    global training_data, testing_data, network, t

    training_data = load_data(crop, gray)
    testing_data = load_data(crop, gray, False)

    print 'Data loaded...'

    layers = []

    dim = 24 if crop else 32
    depth = 1 if gray else 3
    layers.append({'type': 'input', 'out_sx': dim, 'out_sy': dim, 'out_depth': depth})

    if conv:
        layers.append({'type': 'conv', 'sx': 5, 'filters': 16, 'stride': 1, 'pad': 2, 'activation': 'relu'}) #, 'drop_prob': 0.5})
        layers.append({'type': 'pool', 'sx': 3, 'stride': 2}) #, 'drop_prob': 0.5})
        layers.append({'type': 'conv', 'sx': 5, 'filters': 20, 'stride': 1, 'pad': 2, 'activation': 'relu'}) #, 'drop_prob': 0.5})
        layers.append({'type': 'pool', 'sx': 2, 'stride': 2}) #, 'drop_prob': 0.5})
        #layers.append({'type': 'lrn', 'alpha': 5 * (10 ** -5), 'beta': 0.75, 'k': 1, 'n': 3, 'drop_prob': 0.5})
    else:
        layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'})
        #layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'})
        #layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'})
    layers.append({'type': 'regression', 'num_neurons': dim * dim * depth})

    print 'Layers made...'

    network = Net(layers)

    print 'Net made...'
    print network

    t = Trainer(network, {'method': 'sgd', 'batch_size': 4, 'l2_decay': 0.0001});

    print 'Trainer made...'
    print t 
Example #21
Source File: cifar10.py    From ConvNetPy with MIT License 5 votes vote down vote up
def start(conv, crop, gray):
    global training_data, testing_data, network, t

    training_data = load_data(crop, gray)
    testing_data = load_data(crop, gray, False)

    print 'Data loaded...'

    layers = []

    dim = 24 if crop else 32
    depth = 1 if gray else 3
    layers.append({'type': 'input', 'out_sx': dim, 'out_sy': dim, 'out_depth': depth})

    if conv:
        layers.append({'type': 'conv', 'sx': 5, 'filters': 16, 'stride': 1, 'pad': 2, 'activation': 'relu'}) #, 'drop_prob': 0.5})
        layers.append({'type': 'pool', 'sx': 3, 'stride': 2}) #, 'drop_prob': 0.5})
        layers.append({'type': 'conv', 'sx': 5, 'filters': 20, 'stride': 1, 'pad': 2, 'activation': 'relu'}) #, 'drop_prob': 0.5})
        layers.append({'type': 'pool', 'sx': 2, 'stride': 2}) #, 'drop_prob': 0.5})
        #layers.append({'type': 'lrn', 'alpha': 5 * (10 ** -5), 'beta': 0.75, 'k': 1, 'n': 3, 'drop_prob': 0.5})
    else:
        layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'})
        #layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'})
        #layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'})
    layers.append({'type': 'softmax', 'num_classes': 10})

    print 'Layers made...'

    network = Net(layers)

    print 'Net made...'
    print network

    t = Trainer(network, {'method': 'sgd', 'batch_size': 4, 'l2_decay': 0.0001});

    print 'Trainer made...' 
Example #22
Source File: sentiment.py    From ConvNetPy with MIT License 5 votes vote down vote up
def start():
    global training_data, testing_data, network, t

    all_data = load_data()
    shuffle(all_data)
    size = int(len(all_data) * 0.1)
    training_data, testing_data = all_data[size:], all_data[:size]
    print 'Data loaded, size: {}...'.format(len(all_data))

    layers = []
    layers.append({'type': 'input', 'out_sx': 1, 'out_sy': 1, 'out_depth': 80})
    layers.append({'type': 'fc', 'num_neurons': 200, 'activation': 'sigmoid'})
    layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'})
    layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'sigmoid'})
    layers.append({'type': 'fc', 'num_neurons': 10, 'activation': 'sigmoid'})
    layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'sigmoid'})
    layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'})
    layers.append({'type': 'softmax', 'num_classes': 4})

    print 'Layers made...'

    network = Net(layers)

    print 'Net made...'
    print network

    t = Trainer(network, {'method': 'adadelta', 'batch_size': 10, 'l2_decay': 0.0001}); 
Example #23
Source File: mnist.py    From ConvNetPy with MIT License 5 votes vote down vote up
def start(conv):
    global training_data, testing_data, n, t

    training_data = load_data()
    testing_data = load_data(False)

    print 'Data loaded...'

    layers = []
    layers.append({'type': 'input', 'out_sx': 24, 'out_sy': 24, 'out_depth': 1})
    if conv:
        layers.append({'type': 'conv', 'sx': 5, 'filters': 8, 'stride': 1, 'pad': 2, 'activation': 'relu', 'drop_prob': 0.5})
        layers.append({'type': 'pool', 'sx': 2, 'stride': 2, 'drop_prob': 0.5})
    else:
        layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'relu'})
        #layers.append({'type': 'sim', 'num_neurons': 100, 'activation': 'mex'})
    layers.append({'type': 'softmax', 'num_classes': 10})

    print 'Layers made...'

    n = Net(layers)

    print 'Net made...'
    print n

    t = Trainer(n, {'method': 'adadelta', 'batch_size': 20, 'l2_decay': 0.001})
    print 'Trainer made...' 
Example #24
Source File: play.py    From torch-light with MIT License 5 votes vote down vote up
def __init__(self):
        net = Net()
        if USECUDA:
            net = net.cuda()
        net.load_model("model.pt", cuda=USECUDA)
        self.net = net
        self.net.eval() 
Example #25
Source File: worker.py    From reinforce_py with MIT License 5 votes vote down vote up
def __init__(
            self, worker_id, env, global_steps_counter, summary_writer, args):
        self.name = 'worker_' + str(worker_id)
        self.env = env
        self.args = args
        self.local_steps = 0
        self.global_steps_counter = global_steps_counter
        # each worker has its own optimizer and learning_rate
        self.learning_rate = tf.Variable(args.init_learning_rate,
                                         dtype=tf.float32,
                                         trainable=False,
                                         name=self.name + '_lr')
        self.delta_lr = \
            args.init_learning_rate / (args.max_steps / args.threads)
        self.trainer = tf.train.RMSPropOptimizer(self.learning_rate,
                                                 decay=args.decay,
                                                 epsilon=args.epsilon)
        self.summary_writer = summary_writer

        self.local_net = Net(S_DIM,
                             A_DIM,
                             scope=self.name,
                             args=self.args,
                             trainer=self.trainer)

        self.update_local_op = self._update_local_vars()
        self.anneal_learning_rate = self._anneal_learning_rate() 
Example #26
Source File: brain.py    From cwcf with MIT License 5 votes vote down vote up
def __init__(self, pool):
        self.pool = pool

        self.model  = Net()
        self.model_ = Net()

        self.epsilon = config.PI_EPSILON_START
        self.lr = config.OPT_LR

        print("Network architecture:\n" + str(self.model)) 
Example #27
Source File: test_pg.py    From tianshou with MIT License 4 votes vote down vote up
def test_pg(args=get_args()):
    env = gym.make(args.task)
    args.state_shape = env.observation_space.shape or env.observation_space.n
    args.action_shape = env.action_space.shape or env.action_space.n
    # train_envs = gym.make(args.task)
    # you can also use tianshou.env.SubprocVectorEnv
    train_envs = VectorEnv(
        [lambda: gym.make(args.task) for _ in range(args.training_num)])
    # test_envs = gym.make(args.task)
    test_envs = VectorEnv(
        [lambda: gym.make(args.task) for _ in range(args.test_num)])
    # seed
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    train_envs.seed(args.seed)
    test_envs.seed(args.seed)
    # model
    net = Net(
        args.layer_num, args.state_shape, args.action_shape,
        device=args.device, softmax=True)
    net = net.to(args.device)
    optim = torch.optim.Adam(net.parameters(), lr=args.lr)
    dist = torch.distributions.Categorical
    policy = PGPolicy(net, optim, dist, args.gamma,
                      reward_normalization=args.rew_norm)
    # collector
    train_collector = Collector(
        policy, train_envs, ReplayBuffer(args.buffer_size))
    test_collector = Collector(policy, test_envs)
    # log
    log_path = os.path.join(args.logdir, args.task, 'pg')
    writer = SummaryWriter(log_path)

    def save_fn(policy):
        torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth'))

    def stop_fn(x):
        return x >= env.spec.reward_threshold

    # trainer
    result = onpolicy_trainer(
        policy, train_collector, test_collector, args.epoch,
        args.step_per_epoch, args.collect_per_step, args.repeat_per_collect,
        args.test_num, args.batch_size, stop_fn=stop_fn, save_fn=save_fn,
        writer=writer)
    assert stop_fn(result['best_reward'])
    train_collector.close()
    test_collector.close()
    if __name__ == '__main__':
        pprint.pprint(result)
        # Let's watch its performance!
        env = gym.make(args.task)
        collector = Collector(policy, env)
        result = collector.collect(n_episode=1, render=args.render)
        print(f'Final reward: {result["rew"]}, length: {result["len"]}')
        collector.close() 
Example #28
Source File: train_A3C.py    From reinforce_py with MIT License 4 votes vote down vote up
def main(args):
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    summary_writer = tf.summary.FileWriter(os.path.join(args.save_path, 'log'))
    global_steps_counter = itertools.count()  # thread-safe

    global_net = Net(S_DIM, A_DIM, 'global', args)
    num_workers = args.threads
    workers = []

    # create workers
    for i in range(1, num_workers + 1):
        worker_summary_writer = summary_writer if i == 0 else None
        worker = Worker(i, make_env(args), global_steps_counter,
                        worker_summary_writer, args)
        workers.append(worker)

    saver = tf.train.Saver(max_to_keep=5)

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        if args.model_path is not None:
            print('Loading model...\n')
            ckpt = tf.train.get_checkpoint_state(args.model_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print('Initializing a new model...\n')
            sess.run(tf.global_variables_initializer())
        print_params_nums()
        # Start work process for each worker in a separated thread
        worker_threads = []
        for worker in workers:
            t = threading.Thread(target=lambda: worker.run(sess, coord, saver))
            t.start()
            time.sleep(0.5)
            worker_threads.append(t)

        if args.eval_every > 0:
            evaluator = Evaluate(
                global_net, summary_writer, global_steps_counter, args)
            evaluate_thread = threading.Thread(
                target=lambda: evaluator.run(sess, coord))
            evaluate_thread.start()

        coord.join(worker_threads) 
Example #29
Source File: dark_knowledge.py    From ConvNetPy with MIT License 4 votes vote down vote up
def run_big_net():
    global training_data, testing_data, n, t, training_data2

    training_data = load_data()
    testing_data = load_data(False)
    training_data2 = []

    print 'Data loaded...'

    layers = []
    layers.append({'type': 'input', 'out_sx': 24, 'out_sy': 24, 'out_depth': 1})
    layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'relu', 'drop_prob': 0.5})
    #layers.append({'type': 'fc', 'num_neurons': 800, 'activation': 'relu', 'drop_prob': 0.5})
    layers.append({'type': 'softmax', 'num_classes': 10})
    print 'Layers made...'

    n = Net(layers)
    print 'Net made...'
    print n

    t = Trainer(n, {'method': 'sgd', 'momentum': 0.0})
    print 'Trainer made...'

    print 'In training...'
    print 'k', 'time\t\t  ', 'loss\t  ', 'training accuracy'
    print '----------------------------------------------------'
    try:
        for x, y in training_data: 
            stats = t.train(x, y)
            print stats['k'], stats['time'], stats['loss'], stats['accuracy']
            training_data2.append((x, n.getPrediction()))
    except: #hit control-c or other
        pass

    print 'In testing: 5000 trials'
    right = 0
    count = 5000
    for x, y in sample(testing_data, count):
        n.forward(x)
        right += n.getPrediction() == y
    accuracy = float(right) / count * 100
    print accuracy