Python keras.backend.tensorflow_backend.set_session() Examples

The following are code examples for showing how to use keras.backend.tensorflow_backend.set_session(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: EasyPR-python   Author: SunskyF   File: model.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, mode, config, model_dir):
        """
        mode: Either "training" or "inference"
        config: A Sub-class of the Config class
        model_dir: Directory to save training logs and trained weights
        """
        assert mode in ['training', 'inference']
        if mode == 'training':
            import keras.backend.tensorflow_backend as KTF
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            session = tf.Session(config=config)
            KTF.set_session(session)
        self.mode = mode
        self.config = config
        self.model_dir = model_dir
        self.set_log_dir()
        self.keras_model = self.build(mode=mode, config=config) 
Example 2
Project: EasyPR-python   Author: SunskyF   File: plate_detect.py    Apache License 2.0 6 votes vote down vote up
def detect(self, src, model_dir):
        first = self.eval_sess is None
        if first:
            # if first load
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            self.eval_sess = tf.Session(graph=self.graph, config=config)
        with self.graph.as_default():
            if first:
                plate_config = PlateInferenceConfig()
                # plate_config.display()
                self.model = modellib.MaskRCNN(mode="inference", config=plate_config, model_dir=model_dir)
            KTF.set_session(self.eval_sess)
            model_path = self.model.find_last()[1]
            self.model.load_weights(model_path, by_name=True)
            result = self.model.detect([src])
        return self._post_process(result, src) 
Example 3
Project: AutoSleepScorerDev   Author: skjerns   File: models.py    GNU General Public License v3.0 6 votes vote down vote up
def largeann(input_shape, n_classes, layers=3, neurons=2000, dropout=0.35 ):
    """
    for working with extracted features
    """
#    gpu = switch_gpu()
#    with K.tf.device('/gpu:{}'.format(gpu)):
#        K.set_session(K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)))
    model = Sequential(name='ann')
#    model.gpu = gpu
    for l in range(layers):
        model.add(Dense (neurons, input_shape=input_shape, activation='elu', kernel_initializer='he_normal'))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy])
    return model

#%% everyhing recurrent for ANN 
Example 4
Project: denoiser   Author: cdiazbas   File: profilePrediction.py    MIT License 6 votes vote down vote up
def __init__(self, inputFile, depth, model, activation, output, number):

        # Only allocate needed memory
        config = tf.ConfigProto()
        config.gpu_options.allow_growth=True
        session = tf.Session(config=config)
        ktf.set_session(session)

        self.input = inputFile
        self.depth = depth
        self.network_type = model
        self.activation = activation
        self.number = number
        self.output = output
        self.nfilter = 32

        self.model = nn_model.unet(start_ch=self.nfilter)
        print("Loading weights... {0}_weights.hdf5".format(self.network_type))
        self.model.load_weights("{0}_weights.hdf5".format(self.network_type)) 
Example 5
Project: evo-pawness   Author: haryoa   File: main.py    GNU General Public License v3.0 5 votes vote down vote up
def main_alpha_zero_train():
    """
    Main option to train the alpha zero model from start
    :return:
    """
    import tensorflow as tf
    from keras.backend.tensorflow_backend import set_session
    from reinforcement_learning_train.alpha_zero.train_module import fit_train
    from reinforcement_learning_train.util.action_encoder import ActionEncoder
    from reinforcement_learning_train.alpha_zero.deep_net_architecture import PawnNet, PawnNetZero
    from reinforcement_learning_train.util.alphazero_util import action_spaces_new
    from collections import deque

    # config = tf.ConfigProto()
    # config.gpu_options.allow_growth = True  # dynamically grow the memory used on the GPU
    # config.log_device_placement = True  # to log device placement (on which device the operation ran)
    # # (nothing gets printed in Jupyter, only if you run it standalone)
    # sess = tf.Session(config=config)
    # set_session(sess)  # set this TensorFlow session as the default session for Keras
    all_action_spaces = action_spaces_new()

    deepnet_model = PawnNetZero(len(all_action_spaces))
    global_list_training = deque(maxlen=9000)
    ae = ActionEncoder()
    ae.fit(list_all_action=all_action_spaces)
    print(deepnet_model.model.summary())
    fit_train(global_list_training,ae, deepnet_model)
    deepnet_model.model.save("best_model.hdf5") 
Example 6
Project: evo-pawness   Author: haryoa   File: main.py    GNU General Public License v3.0 5 votes vote down vote up
def main_alpha_zero_train_continue():
    """
    Main option to play to continue training the model of alpha zero
    :return:
    """
    from collections import deque

    from keras.models import load_model
    from reinforcement_learning_train.alpha_zero.train_module import fit_train
    from reinforcement_learning_train.util.action_encoder import ActionEncoder
    from reinforcement_learning_train.alpha_zero.deep_net_architecture import PawnNet, PawnNetZero
    from reinforcement_learning_train.util.alphazero_util import action_spaces_new
    import pickle

    # config = tf.ConfigProto()
    # config.gpu_options.allow_growth = True  # dynamically grow the memory used on the GPU
    # config.log_device_placement = True  # to log device placement (on which device the operation ran)
    # # (nothing gets printed in Jupyter, only if you run it standalone)
    # sess = tf.Session(config=config)
    # set_session(sess)  # set this TensorFlow session as the default session for Keras
    all_action_spaces = action_spaces_new()

    MODEL_PATH = "checkpoint.hdf5"
    BEST_MODEL = "best_model.hdf5"
    GLOBAL_LIST_TRAINING_PATH = "global_list_training.p"
    # Import Model
    deepnet_model = PawnNetZero(len(all_action_spaces))
    deepnet_model.model = load_model(MODEL_PATH)
    best_model = load_model(BEST_MODEL)
    global_list_training = pickle.load(open(GLOBAL_LIST_TRAINING_PATH, "rb"))
    print("GLOBAL LIST SHAPE : {}".format(len(global_list_training)))
    ae = ActionEncoder()
    ae.fit(list_all_action=all_action_spaces)
    fit_train(global_list_training, ae, deepnet_model, best_model=best_model) 
Example 7
Project: sicon   Author: aasensio   File: evaluate_concat.py    MIT License 5 votes vote down vote up
def __init__(self):
        # Only allocate needed memory
        config = tf.ConfigProto()
        config.gpu_options.allow_growth=True
        session = tf.Session(config=config)
        ktf.set_session(session)
        self.nq = 49
        self.nlambda = 448 
Example 8
Project: sicon   Author: aasensio   File: train_concat.py    MIT License 5 votes vote down vote up
def __init__(self, root, noise, lr, lr_multiplier, batch_size,l2_regularization,datasize):

        # Only allocate needed memory
        config = tf.ConfigProto()
        config.gpu_options.allow_growth=True
        session = tf.Session(config=config)
        ktf.set_session(session)


        self.root = root
        self.noise = noise
        self.lr = lr
        self.lr_multiplier = lr_multiplier
        self.batch_size = batch_size
        self.l2_regularization = l2_regularization
        self.n_training_orig = datasize
        self.n_validation_orig = int(2000)

        self.input_file_images_training = dire+'hinode1.h5'
        f = h5py.File(self.input_file_images_training, 'r')
        self.ns, self.nl, self.nx, self.ny = f['stokes'].shape        
        self.nq, self.ntau, self.nx, self.ny = f['cube'].shape
        f.close()

        #Size of batch:
        self.dx = 32
        self.nx, self.ny = self.dx, self.dx
        self.batchs_per_epoch_training = int(self.n_training_orig / self.batch_size)
        self.batchs_per_epoch_validation = int(self.n_validation_orig / self.batch_size)
        self.n_training = self.batchs_per_epoch_training * self.batch_size
        self.n_validation = self.batchs_per_epoch_validation * self.batch_size 
Example 9
Project: denoiser   Author: cdiazbas   File: bayesPrediction.py    MIT License 5 votes vote down vote up
def __init__(self, network_type, output, number):

# Only allocate needed memory
        config = tf.ConfigProto()
        config.gpu_options.allow_growth=True
        session = tf.Session(config=config)
        ktf.set_session(session)

        self.network_type = network_type
        self.number = number
        self.output = output
        self.nfilter = 32 
Example 10
Project: denoiser   Author: cdiazbas   File: prediction_sst.py    MIT License 5 votes vote down vote up
def __init__(self, network_type, output, number):

# Only allocate needed memory
        config = tf.ConfigProto()
        config.gpu_options.allow_growth=True
        session = tf.Session(config=config)
        ktf.set_session(session)

        self.network_type = network_type
        self.number = number
        self.output = output
        self.nfilter = 32 
Example 11
Project: denoiser   Author: cdiazbas   File: profilePrediction.py    MIT License 5 votes vote down vote up
def crisp_load(fname, verb = True):
    import lptools as lp
    f1 = fname.format('')
    f2 = fname.format('_sp')
    nx, ny, ndum, nstokes, dtype, dum1 = lp.lphead(f1, verb)
    nw, nt, ndum, nstokes, dtype, dum1 = lp.lphead(f2, verb)
    io = np.memmap(f1, shape=(nt,nstokes,nw,ny,nx), offset=512,
                   dtype= dtype, mode='r')
    return io


# ncores = 10
# from keras import backend as K
# K.set_session(K.tf.Session(config=K.tf.ConfigProto(intra_op_parallelism_threads=ncores, inter_op_parallelism_threads=ncores))) 
Example 12
Project: Carla-RL   Author: Sentdex   File: trainer.py    MIT License 5 votes vote down vote up
def check_weights_size(model_path, weights_size):

    # Memory fraction
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=settings.TRAINER_MEMORY_FRACTION)
    backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)))

    # create a model and save serialized weights' size
    trainer = ARTDQNTrainer(model_path)
    weights_size.value = len(trainer.serialize_weights())


# Runs trainer process 
Example 13
Project: deepvel   Author: aasensio   File: deepvel.py    MIT License 5 votes vote down vote up
def __init__(self, observations, output, border=0):
        """

        Parameters
        ----------
        observations : array
            Array of size (n_times, nx, ny) with the n_times consecutive images of size nx x ny
        output : string
            Filename were the output is saved
        border : int (optional)
            Portion of the borders to be removed during computations. This is useful if images are
            apodized
        """

# Only allocate needed memory with Tensorflow
        config = tf.ConfigProto()
        config.gpu_options.allow_growth=True
        session = tf.Session(config=config)
        ktf.set_session(session)

        self.border = border
        n_timesteps, nx, ny = observations.shape

        self.n_frames = n_timesteps - 1

        self.nx = nx - 2*self.border
        self.ny = ny - 2*self.border
        
        self.n_times = 2
        self.n_filters = 64
        self.batch_size = 1
        self.n_conv_layers = 20        
        self.observations = observations
        self.output = output

        print("Images without border are of size: {0}x{1}".format(self.nx, self.ny))
        print("Number of predictions to be made: {0}".format(self.n_frames)) 
Example 14
Project: deepvel   Author: aasensio   File: deepvel_k2.py    MIT License 5 votes vote down vote up
def __init__(self, observations, output, border=0):
        """
        Class used to predict horizontal velocities from two consecutive continuum images

        Parameters
        ----------
        observations : array
            Array of size (n_times, nx, ny) with the n_times consecutive images of size nx x ny
        output : string
            Filename were the output is saved
        border : int (optional)
            Portion of the borders to be removed during computations. This is useful if images are
            apodized
        """

# Only allocate needed memory with Tensorflow
        config = tf.ConfigProto()
        config.gpu_options.allow_growth=True
        session = tf.Session(config=config)
        ktf.set_session(session)

        self.border = border
        n_timesteps, nx, ny = observations.shape

        self.n_frames = n_timesteps - 1

        self.nx = nx - 2*self.border
        self.ny = ny - 2*self.border
        
        self.n_times = 2
        self.n_filters = 64
        self.batch_size = 1
        self.n_conv_layers = 20        
        self.observations = observations
        self.output = output

        print("Images without border are of size: {0}x{1}".format(self.nx, self.ny))
        print("Number of predictions to be made: {0}".format(self.n_frames)) 
Example 15
Project: IBATS_Common   Author: IBATS   File: utils.py    GNU General Public License v3.0 5 votes vote down vote up
def use_cup_only():
    import os
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
    # os.environ["CUDA_VISIBLE_DEVICES"] = ""
    os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

    import keras.backend.tensorflow_backend as ktf

    import tensorflow as tf
    ktf.set_session(tf.Session(config=tf.ConfigProto(device_count={'gpu': 0}))) 
Example 16
Project: HPLS-HFCN-openset   Author: rafaelvareto   File: HFCN_openset_load.py    GNU General Public License v3.0 5 votes vote down vote up
def main():
    fscores = []
    prs = []
    rocs = []
    with Parallel(n_jobs=1, verbose=15, backend='multiprocessing') as parallel_pool:
        for index in range(ITERATIONS):
            keras_backend.clear_session()
            keras_session = tensorflow.Session()
            keras_backend.set_session(keras_session)

            print('ITERATION #%s' % str(index+1))
            pr, roc, fscore = fcnhface(args, parallel_pool)
            fscores.append(fscore)
            prs.append(pr)
            rocs.append(roc)

            with open('./files/plot_' + OUTPUT_NAME + '.file', 'w') as outfile:
                pickle.dump([prs, rocs], outfile)

            plot_precision_recall(prs, OUTPUT_NAME)
            plot_roc_curve(rocs, OUTPUT_NAME)
    
    means = mean_results(fscores)
    with open('./values/' + OUTPUT_NAME + '.txt', 'a') as outvalue:
        for item in fscores:
            outvalue.write(str(item) + '\n')
        for item in means:
            outvalue.write(str(item) + '\n') 
    print(fscores) 
Example 17
Project: enhance   Author: cdiazbas   File: train.py    MIT License 4 votes vote down vote up
def __init__(self, root, noise, option, depth, network_type, activation, lr, lr_multiplier, batch_size,l2_regularization):

# Only allocate needed memory
        config = tf.ConfigProto()
        config.gpu_options.allow_growth=True
        session = tf.Session(config=config)
        ktf.set_session(session)


        self.root = root
        self.option = option
        self.noise = noise
        self.depth = depth
        self.network_type = network_type        
        self.activation = activation        
        self.lr = lr
        self.lr_multiplier = lr_multiplier
        self.batch_size = batch_size
        self.l2_regularization = l2_regularization

        tmp = np.loadtxt('/net/vena/scratch/Dropbox/GIT/DeepLearning/hmi_super/training/normalization.txt')
        self.median_HMI, self.median_SST = tmp[0], tmp[1]
        # self.median_HMI, self.median_SST = 1.0, 1.0        

        self.input_file_images_training = "/net/viga/scratch1/cdiazbas/DATABASE/database_training_x2_PSF2.h5"
        # self.input_file_images_training = "/net/viga/scratch1/cdiazbas/DATABASE/database_training_x2_BLOS.h5"

        f = h5py.File(self.input_file_images_training, 'r')
        self.n_training_orig, self.nx, self.ny, _ = f['imHMI'].shape        
        f.close()

        self.input_file_images_validation = "/net/viga/scratch1/cdiazbas/DATABASE/database_validation_x2_PSF2.h5"
        # self.input_file_images_validation = "/net/viga/scratch1/cdiazbas/DATABASE/database_validation_x2_BLOS.h5"

        f = h5py.File(self.input_file_images_validation, 'r')
        self.n_validation_orig, self.nx, self.ny, _ = f['imHMI'].shape        
        f.close()        
        
        self.batchs_per_epoch_training = int(self.n_training_orig / self.batch_size)
        self.batchs_per_epoch_validation = int(self.n_validation_orig / self.batch_size)

        self.n_training = self.batchs_per_epoch_training * self.batch_size
        self.n_validation = self.batchs_per_epoch_validation * self.batch_size

        print("Original training set size: {0}".format(self.n_training_orig))
        print("   - Final training set size: {0}".format(self.n_training))
        print("   - Batch size: {0}".format(self.batch_size))
        print("   - Batches per epoch: {0}".format(self.batchs_per_epoch_training))

        print("Original validation set size: {0}".format(self.n_validation_orig))
        print("   - Final validation set size: {0}".format(self.n_validation))
        print("   - Batch size: {0}".format(self.batch_size))
        print("   - Batches per epoch: {0}".format(self.batchs_per_epoch_validation)) 
Example 18
Project: deepvel   Author: aasensio   File: train_deepvel.py    MIT License 4 votes vote down vote up
def __init__(self, root, noise, option):
        """
        Class used to train DeepVel

        Parameters
        ----------
        root : string
            Name of the output files. Some extensions will be added for different files (weights, configuration, etc.)
        noise : float
            Noise standard deviation to be added during training. This helps avoid overfitting and
            makes the training more robust
        option : string
            Indicates what needs to be done
        """

# Only allocate needed memory
        config = tf.ConfigProto()
        config.gpu_options.allow_growth=True
        session = tf.Session(config=config)
        ktf.set_session(session)

        self.root = root
        self.option = option

        self.n_filters = 64
        self.kernel_size = 3        
        self.batch_size = 32
        self.n_conv_layers = 20
        
        self.input_file_images_training = "/scratch1/aasensio/deepLearning/opticalFlow/database/database_images.h5"
        self.input_file_velocity_training = "/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity.h5"

        self.input_file_images_validation = "/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5"
        self.input_file_velocity_validation = "/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity_validation.h5"

        f = h5py.File(self.input_file_images_training, 'r')
        self.n_training_orig, self.nx, self.ny, self.n_times = f.get("intensity").shape        
        f.close()

        f = h5py.File(self.input_file_images_validation, 'r')
        self.n_validation_orig, _, _, _ = f.get("intensity").shape        
        f.close()
        
        self.batchs_per_epoch_training = int(self.n_training_orig / self.batch_size)
        self.batchs_per_epoch_validation = int(self.n_validation_orig / self.batch_size)

        self.n_training = self.batchs_per_epoch_training * self.batch_size
        self.n_validation = self.batchs_per_epoch_validation * self.batch_size

        print("Original training set size: {0}".format(self.n_training_orig))
        print("   - Final training set size: {0}".format(self.n_training))
        print("   - Batch size: {0}".format(self.batch_size))
        print("   - Batches per epoch: {0}".format(self.batchs_per_epoch_training))

        print("Original validation set size: {0}".format(self.n_validation_orig))
        print("   - Final validation set size: {0}".format(self.n_validation))
        print("   - Batch size: {0}".format(self.batch_size))
        print("   - Batches per epoch: {0}".format(self.batchs_per_epoch_validation)) 
Example 19
Project: deepvel   Author: aasensio   File: train_deepvel_k2.py    MIT License 4 votes vote down vote up
def __init__(self, root, noise, option):
        """
        """

# Only allocate needed memory
        config = tf.ConfigProto()
        config.gpu_options.allow_growth=True
        session = tf.Session(config=config)
        ktf.set_session(session)

        self.root = root
        self.option = option

        self.n_filters = 64
        self.kernel_size = 3        
        self.batch_size = 32
        self.n_conv_layers = 20
        
        self.input_file_images_training = "/scratch1/aasensio/deepLearning/opticalFlow/database/database_images.h5"
        self.input_file_velocity_training = "/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity.h5"

        self.input_file_images_validation = "/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5"
        self.input_file_velocity_validation = "/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity_validation.h5"

        f = h5py.File(self.input_file_images_training, 'r')
        self.n_training_orig, self.nx, self.ny, self.n_times = f.get("intensity").shape        
        f.close()

        f = h5py.File(self.input_file_images_validation, 'r')
        self.n_validation_orig, _, _, _ = f.get("intensity").shape        
        f.close()
        
        self.batchs_per_epoch_training = int(self.n_training_orig / self.batch_size)
        self.batchs_per_epoch_validation = int(self.n_validation_orig / self.batch_size)

        self.n_training = self.batchs_per_epoch_training * self.batch_size
        self.n_validation = self.batchs_per_epoch_validation * self.batch_size

        print("Original training set size: {0}".format(self.n_training_orig))
        print("   - Final training set size: {0}".format(self.n_training))
        print("   - Batch size: {0}".format(self.batch_size))
        print("   - Batches per epoch: {0}".format(self.batchs_per_epoch_training))

        print("Original validation set size: {0}".format(self.n_validation_orig))
        print("   - Final validation set size: {0}".format(self.n_validation))
        print("   - Batch size: {0}".format(self.batch_size))
        print("   - Batches per epoch: {0}".format(self.batchs_per_epoch_validation)) 
Example 20
Project: MalmoRL   Author: petrosgk   File: ddpglearner.py    MIT License 4 votes vote down vote up
def __init__(self, name, env, grayscale, width, height):
        super(DDPGLearner, self).__init__(name=name, env=env)

        self.nb_actions = env.available_actions
        self.abs_max_reward = env.abs_max_reward
        self.mission_name = env.mission_name

        self.grayscale = grayscale
        self.width = width
        self.height = height

        self.recurrent = False  # Use LSTM
        self.batch_size = 32
        self.window_length = 4

        if tf:
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            sess = tf.Session(config=config)
            tensorflow_backend.set_session(session=sess)

        if not self.recurrent:
            self.actor, self.critic, self.action_input = Minecraft_DDPG(self.window_length, self.grayscale, self.width,
                                                                        self.height, self.nb_actions)
        else:
            self.actor, self.critic, self.action_input = Minecraft_DDPG_LSTM(self.window_length, self.grayscale,
                                                                             self.width, self.height, self.nb_actions)

        # Replay memory
        self.memory = SequentialMemory(limit=1000000, window_length=self.window_length)

        # Add random noise for exploration
        self.random_process = GaussianWhiteNoiseProcess(mu=0.0, sigma=0.5, size=self.nb_actions)

        '''
        # We can also generate exploration noise with different parameters for each action. This is because we may want
        # eg. the agent to be more likely to explore moving forward than backward. In that case, a list or tuple of
        # random processes, one for each action, must be passed to the agent.
        # For example:

        self.random_process = []
        self.random_process.append(GaussianWhiteNoiseProcess(mu=1.5, sigma=1.0))  # For moving
        self.random_process.append(GaussianWhiteNoiseProcess(mu=0.0, sigma=1.0))  # For turning
        '''

        self.processor = MalmoProcessor(self.grayscale, self.window_length, self.recurrent, self.abs_max_reward)
        self.agent = DDPGAgent(actor=self.actor, critic=self.critic, critic_action_input=self.action_input,
                               nb_actions=self.nb_actions, memory=self.memory, batch_size=self.batch_size,
                               processor=self.processor, random_process=self.random_process, gamma=0.99,
                               nb_steps_warmup_actor=10000, nb_steps_warmup_critic=10000, target_model_update=1e-3)
        self.agent.compile([Adam(lr=1e-4), Adam(lr=1e-3)], metrics=['mae']) 
Example 21
Project: MalmoRL   Author: petrosgk   File: qlearner.py    MIT License 4 votes vote down vote up
def __init__(self, name, env, grayscale, width, height):
        super(QLearner, self).__init__(name=name, env=env)

        self.nb_actions = env.available_actions
        self.abs_max_reward = env.abs_max_reward
        self.mission_name = env.mission_name

        self.grayscale = grayscale
        self.width = width
        self.height = height

        self.recurrent = False  # Use LSTM
        self.batch_size = 32
        self.window_length = 4

        if tf:
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            sess = tf.Session(config=config)
            tensorflow_backend.set_session(session=sess)

        if not self.recurrent:
            self.model = Minecraft(self.window_length, self.grayscale, self.width, self.height, self.nb_actions)
        else:
            self.model = Minecraft_LSTM(self.window_length, self.grayscale, self.width, self.height, self.nb_actions)

        # Replay memory
        self.memory = SequentialMemory(limit=1000000, window_length=self.window_length)

        '''
        Select a policy. We use eps-greedy action selection, which means that a random action is selected
        with probability eps. We can specify a custom biased probability distribution p for selecting random action,
        so that the agent is more likely to choose some actions when exploring over others. For example,
        if the possible actions are [move forward, move backward, turn right, turn left] and p = [0.6, 0.0, 0.2, 0.2] 
        the agent will go 60% forward, 0% backward, 20% left and 20% right when exploring.
        If p == None, the default uniform distribution is used.
        '''
        self.policy = LinearAnnealedPolicy(BiasedEpsGreedyQPolicy(nb_actions=self.nb_actions, p=None),
                                           attr='eps', value_max=1., value_min=.05, value_test=.005, nb_steps=1000000)

        self.processor = MalmoProcessor(self.grayscale, self.window_length, self.recurrent, self.abs_max_reward)
        self.agent = DQNAgent(model=self.model, nb_actions=self.nb_actions, policy=self.policy, test_policy=self.policy,
                              memory=self.memory, batch_size=self.batch_size, processor=self.processor,
                              nb_steps_warmup=50000, gamma=.99, target_model_update=10000, enable_double_dqn=True,
                              enable_dueling_network=True)
        self.agent.compile(Adam(lr=.00025), metrics=['mae']) 
Example 22
Project: encephalic_vasculature_mapping   Author: enry12   File: train_vascular.py    MIT License 4 votes vote down vote up
def main():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('model_prefix', type=str, help='Model folder')
    parser.add_argument('-h5', '--h5_file', type= str, help="path to h5 file used for training", required= True)
    parser.add_argument('-s', '--h5_set', type= str, help="choose the set inside the h5 file",
                        choices=['train', 'test', 'all_predict', 'all_train'], required= True)
    parser.add_argument('-p', '--patch_size', type=tuple, help='Size of the patch. It can be either a single integer or a tuple.', required=True)
    parser.add_argument('-d', '--conv_dim', type=int, help='Convolution dimension', choices=[2], default=2)
    parser.add_argument('-e', '--epochs', type=int, help='Number of epochs', default=20)
    parser.add_argument('-n', '--nb_minibatches', type=int, help='Number of minibatch per epoch', default=25000)
    args = parser.parse_args()   

    model_prefix = args.model_prefix
    patch_size = get_int_from_string_tuple(args.patch_size)
    conv_dim = args.conv_dim
    epochs = args.epochs
    nb_minibatches = args.nb_minibatches

    if not patch_size:
        print 'Type of patch_size not understood'
        return None

    if len(patch_size) == 1:
        patch_size = patch_size*args.conv_dim
     

    if len(patch_size) != conv_dim:
        print 'Patch size must be either 1-dimension or %d-dimension' % conv_dim
        return None


    if check_equal(patch_size):
        str_patch_size = str(patch_size[0])
    else:
        str_patch_size = '_'.join([str(x) for x in patch_size])

    model_folder = model_prefix + '/model_conv_%d_%s_nfc_%s' % (conv_dim, str_patch_size, time.strftime("%b-%d-%H-%M-%S", time.localtime()))
    while(os.path.isdir(model_folder)):
        model_folder = model_prefix + '/model_%d_%s_nfc_%s' % (conv_dim, str_patch_size, time.strftime("%b-%d-%H-%M-%S", time.localtime()))
       
    try:
        os.makedirs(model_folder)
    except:
        print 'Folder %s can not be created. Check permission rights.' % model_prefix
        return None

    KTF.set_session(get_session(gpu_fraction=0.8))
    data = VascularData(  args.h5_file,
                          p_shape= patch_size,
                          which_set= args.h5_set,
                          batchsize= 128 )

    model = create_autoencoder(model_folder, patch_size)
    train(model, model_folder, data, epochs=epochs, nb_minibatches=nb_minibatches)