Python keras.backend.set_session() Examples

The following are code examples for showing how to use keras.backend.set_session(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: mercari-price-suggestion   Author: aerdem4   File: nn_model.py    MIT License 6 votes vote down vote up
def __init__(self, train_df, word_count, batch_size, epochs):
        tf.set_random_seed(4)
        session_conf = tf.ConfigProto(intra_op_parallelism_threads=2, inter_op_parallelism_threads=8)
        backend.set_session(tf.Session(graph=tf.get_default_graph(), config=session_conf))

        self.batch_size = batch_size
        self.epochs = epochs

        self.max_name_seq = 10
        self.max_item_desc_seq = 75
        self.max_text = word_count + 1
        self.max_brand = np.max(train_df.brand_name.max()) + 1
        self.max_condition = np.max(train_df.item_condition_id.max()) + 1
        self.max_subcat0 = np.max(train_df.subcat_0.max()) + 1
        self.max_subcat1 = np.max(train_df.subcat_1.max()) + 1
        self.max_subcat2 = np.max(train_df.subcat_2.max()) + 1 
Example 2
Project: OSCAR   Author: Xaxetrov   File: brain.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, num_state, num_actions, none_state):
        self.session = tf.Session()
        K.set_session(self.session)
        K.manual_variable_initialization(True)
        global NUM_ACTIONS, NUM_STATE, NONE_STATE
        NUM_ACTIONS = num_actions
        NUM_STATE = num_state
        NONE_STATE = none_state

        self.model = self._build_model()
        self.graph = self._build_graph(self.model)

        self.session.run(tf.global_variables_initializer())
        self.default_graph = tf.get_default_graph()

        self.default_graph.finalize()  # avoid modifications 
Example 3
Project: reinforcement-learning   Author: buyizhiyou   File: breakout_a3c.py    MIT License 6 votes vote down vote up
def __init__(self, action_size):
        # environment settings
        self.state_size = (84, 84, 4)
        self.action_size = action_size

        self.discount_factor = 0.99
        self.no_op_steps = 30

        # optimizer parameters
        self.actor_lr = 2.5e-4
        self.critic_lr = 2.5e-4
        self.threads = 8

        # create model for actor and critic network
        self.actor, self.critic = self.build_model()

        # method for training actor and critic network
        self.optimizer = [self.actor_optimizer(), self.critic_optimizer()]

        self.sess = tf.InteractiveSession()
        K.set_session(self.sess)
        self.sess.run(tf.global_variables_initializer())

        self.summary_placeholders, self.update_ops, self.summary_op = self.setup_summary()
        self.summary_writer = tf.summary.FileWriter('summary/breakout_a3c', self.sess.graph) 
Example 4
Project: ycml   Author: skylander86   File: neural_networks.py    Apache License 2.0 6 votes vote down vote up
def set_session(self, *, tf_config=None, init_op=True, **sess_args):
        if tf_config is None:
            tf_config = self.tf_config

        if tf_config is None:
            n_jobs = getattr(self, 'n_jobs', 1)
            log_device_placement = getattr(self, 'log_device_placement', logger.getEffectiveLevel() <= logging.DEBUG)
            tf_config = tf.ConfigProto(inter_op_parallelism_threads=n_jobs, intra_op_parallelism_threads=n_jobs, log_device_placement=log_device_placement, allow_soft_placement=True)
        #end if

        self.graph = tf.Graph()
        tf_session = tf.Session(config=tf_config, graph=self.graph, **sess_args)
        K.set_session(tf_session)

        if init_op:
            tf_session.run(tf.global_variables_initializer())

        self.session = tf_session

        return tf_session
    #end def 
Example 5
Project: ycml   Author: skylander86   File: neural_networks.py    Apache License 2.0 6 votes vote down vote up
def load_from_tarfile(self, tar_file):
        self.set_session(init_op=False)

        fname = None
        try:
            with NamedTemporaryFile(suffix='.h5', delete=False) as f:
                timer = Timer()
                shutil.copyfileobj(tar_file.extractfile('nn_model.h5'), f)
                fname = f.name
            #end with

            with self.graph.as_default(), self.session.as_default():
                self.nn_model_ = load_model(fname, custom_objects=self.custom_objects)
                self.nn_model_._make_predict_function()
                # print(self.graph, tf.get_default_graph())
            # self.graph = tf.get_default_graph()
            logger.debug('Loaded neural network model weights {}.'.format(timer))

        finally:
            if fname:
                os.remove(fname)
        #end try

        return self
    #end def 
Example 6
Project: reinforcement-learning   Author: rlcode   File: breakout_a3c.py    MIT License 6 votes vote down vote up
def __init__(self, action_size):
        # environment settings
        self.state_size = (84, 84, 4)
        self.action_size = action_size

        self.discount_factor = 0.99
        self.no_op_steps = 30

        # optimizer parameters
        self.actor_lr = 2.5e-4
        self.critic_lr = 2.5e-4
        self.threads = 8

        # create model for actor and critic network
        self.actor, self.critic = self.build_model()

        # method for training actor and critic network
        self.optimizer = [self.actor_optimizer(), self.critic_optimizer()]

        self.sess = tf.InteractiveSession()
        K.set_session(self.sess)
        self.sess.run(tf.global_variables_initializer())

        self.summary_placeholders, self.update_ops, self.summary_op = self.setup_summary()
        self.summary_writer = tf.summary.FileWriter('summary/breakout_a3c', self.sess.graph) 
Example 7
Project: RLContinuousActionSpace   Author: hchkaiban   File: Models.py    MIT License 6 votes vote down vote up
def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE):
        self.sess = sess
        self.BATCH_SIZE = BATCH_SIZE
        self.TAU = TAU
        self.LEARNING_RATE = LEARNING_RATE

        K.set_session(sess)

        #Now create the model
        self.model , self.weights, self.state = self.create_actor_network(state_size)   
        self.target_model, self.target_weights, self.target_state = self.create_actor_network(state_size) 
        self.action_gradient = tf.placeholder(tf.float32,[None, action_size])
        self.params_grad = tf.gradients(self.model.output, self.weights, -self.action_gradient)
        grads = zip(self.params_grad, self.weights)
        self.optimize = tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(grads)
        self.sess.run(tf.initialize_all_variables()) 
Example 8
Project: models   Author: IntelAI   File: model.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, mode, config, model_dir):
        """
        mode: Either "training" or "inference"
        config: A Sub-class of the Config class
        model_dir: Directory to save training logs and trained weights
        """
        assert mode in ['training', 'inference']
        session_config = tf.ConfigProto(intra_op_parallelism_threads=config.NUM_INTRA,\
                 inter_op_parallelism_threads=config.NUM_INTER)
        session = tf.Session(config=session_config)
        K.set_session(session)
        K.set_image_data_format('channels_last')

        self.mode = mode
        self.config = config
        self.model_dir = model_dir
        self.set_log_dir()
        self.keras_model = self.build(mode=mode, config=config) 
Example 9
Project: yolov3-3dcarbox   Author: zoujialong9012   File: yolo3d.py    MIT License 6 votes vote down vote up
def __init__(self):
#        self.model_path = 'model_data/yolo.h5' # model path or trained weights path
#        self.model_path = 'logs/000/trained_weights_final.h5' # model path or trained weights path
        self.model_path = 'logs/000/trained_weights_finalback.h5' # model path or trained weights path
    #    self.model_path3d = '/home/cidi/dl/3dobject/3D-Deepbox/model'
        self.anchors_path = 'model_data/yolo_anchors.txt'
        #self.anchors_path = 'model_data/kitti_yolo_anchors.txt'
        #self.classes_path = 'model_data/coco_classes.txt'
        self.classes_path = 'model_data/kitti_classes.txt'
    #    self.graph = tf.Graph()
        self.score = 0.3
        self.iou = 0.45
        self.class_names = self._get_class()
        self.anchors = self._get_anchors()

    #    with self.graph.as_default():
    #        print('tf session tf graph')
    #        self.sess = tf.Session(graph = self.graph)#K.get_session()
        self.sess = K.get_session()
        #K.set_session(self.sess)
        #self.model_image_size = (416, 416) # fixed size or (None, None), hw
        self.model_image_size = (384, 960) # fixed size or (None, None), hw
        self.boxes, self.scores, self.classes = self.generate() 
Example 10
Project: rl_puyopuyo   Author: frostburn   File: mp_train.py    MIT License 6 votes vote down vote up
def get_initial_weights():
    register()
    with tf.device("/cpu:0"):
         with tf.Session(config=tf.ConfigProto(device_count={"GPU": 0})) as session:
            K.set_session(session)
            agent = get_simple_agent(session, 1)
            session.run(tf.global_variables_initializer())
            # Initial guess by Keras
            variables = tf.trainable_variables()
            weigths = session.run(variables)
            w = []
            w_shape = []
            placeholders = []
            assigns = []
            for values, variable in zip(weigths, variables):
                placeholders.append(tf.placeholder(tf.float32, values.shape))
                assigns.append(variable.assign(placeholders[-1]))
                w_shape.append(values.shape)
                w.extend(values.flatten())
            print(w_shape)
            w = np.array(w)
    return w 
Example 11
Project: Learning-to-navigate-without-a-map   Author: ToniRV   File: ddpg.py    MIT License 6 votes vote down vote up
def __init__(self, sess, state_size, action_size,
                 batch_size, tau, learning_rate):
        """Init critic network."""
        self.sess = sess
        self.batch_size = batch_size
        self.tau = tau
        self.learning_rate = learning_rate
        self.action_size = action_size

        K.set_session(sess)

        self.model, self.action, self.state = \
            self.create_critic_network(state_size, action_size)
        self.target_model, self.target_action, self.target_state = \
            self.create_critic_network(state_size, action_size)
        self.action_grads = tf.gradients(self.model.output, self.action)
        self.sess.run(tf.initialize_all_variables()) 
Example 12
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_CartPole-A3C.py    MIT License 5 votes vote down vote up
def __init__(self):
        self.session = tf.Session()
        K.set_session(self.session)
        K.manual_variable_initialization(True)

        self.model = self._build_model()
        self.graph = self._build_graph(self.model)

        self.session.run(tf.global_variables_initializer())
        self.default_graph = tf.get_default_graph()

        self.default_graph.finalize()  # avoid modifications 
Example 13
Project: ppi_lstm_rnn_keras   Author: ylhsieh   File: train_keras.py    MIT License 5 votes vote down vote up
def main():

    def build_model():
        model = Sequential()
        model.add(Embedding(len(train_vocab), hidden_size, weights=[embedding_array],\
                            input_length=max_sequence_length))
        model.add(Dropout(dropout_rate))
        model.add(Bidirectional(LSTM(rnn_hidden_size)))
        model.add(Dropout(dropout_rate))
        model.add(Dense(2, activation='softmax'))
        model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
        print(model.summary())
        return model

    train_vocab = load_vocab_from(opt.data + '.vocab')
    embedding_array = load_pretrained_embeddings(train_vocab, pretrained_embeddings_file)
    for fold_id in range(10):
        tfsession = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.5)))
        K.set_session(tfsession)
        train_file = 'corpus/{}_f{}_train.txt'.format(opt.data, fold_id)
        test_file = 'corpus/{}_f{}_test.txt'.format(opt.data, fold_id)
        log_file = '{}_f{}.log'.format(opt.data, fold_id)
        x_train, x_test, y_train, y_test, _ = read_corpus(train_file, test_file, train_vocab)
        fscore_cb = FscoreLogCallback(log_file)
        model = build_model()
        print("Fold {}".format(fold_id))
        model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, \
                  callbacks=[fscore_cb], verbose=2)
        predicted = np.argmax(model.predict(x_test), axis=1)
        y_test_to_label = np.argmax(y_test, axis=1)
        prec, reca, fscore, sup = precision_recall_fscore_support(y_test_to_label, predicted, average='binary')
        print("Final Precision:{:2.2f}% Recall:{:2.2f}% Fscore:{:2.2f}%".format(prec*100, reca*100, fscore*100)) 
Example 14
Project: code_FLNN   Author: chasebk   File: traditional_ffnn.py    Apache License 2.0 5 votes vote down vote up
def _training__(self):
        self.model = Sequential()
        self.model.add(Dense(units=self.hidden_sizes[0], input_dim=self.X_train.shape[1], activation=self.activations[0]))
        self.model.add(Dense(1, activation=self.activations[1]))
        self.model.compile(loss=self.loss, optimizer=self.optimizer)
        backend.set_session(backend.tf.Session(config=backend.tf.ConfigProto(intra_op_parallelism_threads=2, inter_op_parallelism_threads = 2)))
        ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train)
        self.loss_train = ml.history["loss"] 
Example 15
Project: code_FLNN   Author: chasebk   File: traditional_ffnn.py    Apache License 2.0 5 votes vote down vote up
def _training__(self):
        self.model = Sequential()
        self.model.add(Dense(self.hidden_sizes[0], input_dim=self.X_train.shape[1], activation=self.activations[0]))
        self.model.add(Dense(self.hidden_sizes[1], activation=self.activations[1]))
        self.model.add(Dense(1, activation=self.activations[2]))
        self.model.compile(loss=self.loss, optimizer=self.optimizer)
        backend.set_session(backend.tf.Session(
            config=backend.tf.ConfigProto(intra_op_parallelism_threads=2, inter_op_parallelism_threads=2)))
        ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train)
        self.loss_train = ml.history["loss"] 
Example 16
Project: code_FLNN   Author: chasebk   File: traditional_rnn.py    Apache License 2.0 5 votes vote down vote up
def _training__(self):
        #  The RNN architecture
        self.model = Sequential()
        self.model.add(LSTM(units=self.hidden_sizes[0], activation=self.activations[0], input_shape=(self.X_train.shape[1], 1)))
        self.model.add(Dropout(self.dropouts[0]))
        self.model.add(Dense(units=1, activation=self.activations[1]))
        self.model.compile(loss=self.loss, optimizer=self.optimizer)
        backend.set_session(backend.tf.Session(config=backend.tf.ConfigProto(intra_op_parallelism_threads=2, inter_op_parallelism_threads=2)))
        ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train)
        self.loss_train = ml.history["loss"] 
Example 17
Project: code_FLNN   Author: chasebk   File: traditional_rnn.py    Apache License 2.0 5 votes vote down vote up
def _training__(self):
        #  The RNN architecture
        self.model = Sequential()
        self.model.add(LSTM(units=self.hidden_sizes[0], return_sequences=True, input_shape=(self.X_train.shape[1], 1), activation=self.activations[0]))
        self.model.add(Dropout(self.dropouts[0]))
        self.model.add(LSTM(units=self.hidden_sizes[1], activation=self.activations[1]))
        self.model.add(Dropout(self.dropouts[1]))
        self.model.add(Dense(units=1, activation=self.activations[2]))
        self.model.compile(loss=self.loss, optimizer=self.optimizer)
        backend.set_session(backend.tf.Session(config=backend.tf.ConfigProto(intra_op_parallelism_threads=2, inter_op_parallelism_threads=2)))
        ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train)
        self.loss_train = ml.history["loss"] 
Example 18
Project: code_FLNN   Author: chasebk   File: traditional_rnn.py    Apache License 2.0 5 votes vote down vote up
def _training__(self):
        #  The LSTM architecture
        self.model = Sequential()
        self.model.add(LSTM(units=self.hidden_sizes[0], input_shape=(None, 1), activation=self.activations[0]))
        self.model.add(Dense(units=1, activation=self.activations[1]))
        self.model.compile(loss=self.loss, optimizer=self.optimizer)
        backend.set_session(backend.tf.Session(config=backend.tf.ConfigProto(intra_op_parallelism_threads=2, inter_op_parallelism_threads=2)))
        ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train)
        self.loss_train = ml.history["loss"] 
Example 19
Project: spark-deep-learning   Author: databricks   File: keras_utils.py    Apache License 2.0 5 votes vote down vote up
def __enter__(self):
        # pylint: disable=attribute-defined-outside-init
        self.old_session = K.get_session()
        self.g = self.requested_graph or tf.Graph()     # pylint: disable=invalid-name
        self.current_session = tf.Session(graph=self.g)
        # pylint: enable=attribute-defined-outside-init
        K.set_session(self.current_session)
        return self.current_session, self.g 
Example 20
Project: spark-deep-learning   Author: databricks   File: keras_utils.py    Apache License 2.0 5 votes vote down vote up
def __exit__(self, exc_type, exc_val, exc_tb):
        # Restore the previous session
        K.set_session(self.old_session) 
Example 21
Project: spark-deep-learning   Author: databricks   File: builder.py    Apache License 2.0 5 votes vote down vote up
def __enter__(self):
        self.sess.__enter__()
        if self.using_keras:
            K.set_session(self.sess)
        return self 
Example 22
Project: spark-deep-learning   Author: databricks   File: builder.py    Apache License 2.0 5 votes vote down vote up
def __exit__(self, *args):
        if self.using_keras:
            K.set_session(self.keras_prev_sess)
        self.sess.__exit__(*args) 
Example 23
Project: reinforcement-learning-kr   Author: rlcode   File: breakout_dqn.py    MIT License 5 votes vote down vote up
def __init__(self, action_size):
        self.render = False
        self.load_model = False
        # 상태와 행동의 크기 정의
        self.state_size = (84, 84, 4)
        self.action_size = action_size
        # DQN 하이퍼파라미터
        self.epsilon = 1.
        self.epsilon_start, self.epsilon_end = 1.0, 0.1
        self.exploration_steps = 1000000.
        self.epsilon_decay_step = (self.epsilon_start - self.epsilon_end) \
                                  / self.exploration_steps
        self.batch_size = 32
        self.train_start = 50000
        self.update_target_rate = 10000
        self.discount_factor = 0.99
        # 리플레이 메모리, 최대 크기 400000
        self.memory = deque(maxlen=400000)
        self.no_op_steps = 30
        # 모델과 타겟모델을 생성하고 타겟모델 초기화
        self.model = self.build_model()
        self.target_model = self.build_model()
        self.update_target_model()

        self.optimizer = self.optimizer()

        # 텐서보드 설정
        self.sess = tf.InteractiveSession()
        K.set_session(self.sess)

        self.avg_q_max, self.avg_loss = 0, 0
        self.summary_placeholders, self.update_ops, self.summary_op = \
            self.setup_summary()
        self.summary_writer = tf.summary.FileWriter(
            'summary/breakout_dqn', self.sess.graph)
        self.sess.run(tf.global_variables_initializer())

        if self.load_model:
            self.model.load_weights("./save_model/breakout_dqn.h5")

    # Huber Loss를 이용하기 위해 최적화 함수를 직접 정의 
Example 24
Project: reinforcement-learning-kr   Author: rlcode   File: breakout_a3c.py    MIT License 5 votes vote down vote up
def __init__(self, action_size):
        # 상태크기와 행동크기를 갖고옴
        self.state_size = (84, 84, 4)
        self.action_size = action_size
        # A3C 하이퍼파라미터
        self.discount_factor = 0.99
        self.no_op_steps = 30
        self.actor_lr = 2.5e-4
        self.critic_lr = 2.5e-4
        # 쓰레드의 갯수
        self.threads = 8

        # 정책신경망과 가치신경망을 생성
        self.actor, self.critic = self.build_model()
        # 정책신경망과 가치신경망을 업데이트하는 함수 생성
        self.optimizer = [self.actor_optimizer(), self.critic_optimizer()]

        # 텐서보드 설정
        self.sess = tf.InteractiveSession()
        K.set_session(self.sess)
        self.sess.run(tf.global_variables_initializer())

        self.summary_placeholders, self.update_ops, self.summary_op = \
            self.setup_summary()
        self.summary_writer = \
            tf.summary.FileWriter('summary/breakout_a3c', self.sess.graph)

    # 쓰레드를 만들어 학습을 하는 함수 
Example 25
Project: reinforcement-learning-kr   Author: rlcode   File: play_dqn_model.py    MIT License 5 votes vote down vote up
def __init__(self, action_size):
        self.state_size = (84, 84, 4)
        self.action_size = action_size
        self.no_op_steps = 20

        self.model = self.build_model()

        self.sess = tf.InteractiveSession()
        K.set_session(self.sess)

        self.avg_q_max, self.avg_loss = 0, 0
        self.sess.run(tf.global_variables_initializer()) 
Example 26
Project: timeception   Author: noureldien   File: config_utils.py    GNU General Public License v3.0 5 votes vote down vote up
def __config_gpu_for_keras():
    import tensorflow as tf
    import keras.backend as K

    gpu_core_id = __parse_gpu_id()

    K.clear_session()
    config = tf.ConfigProto()
    config.gpu_options.visible_device_list = str(gpu_core_id)
    config.gpu_options.allow_growth = True
    session = tf.Session(config=config)
    K.set_session(session)

    # set which device to be used
    const.GPU_CORE_ID = gpu_core_id 
Example 27
Project: reinforcement-learning   Author: buyizhiyou   File: breakout_ddqn.py    MIT License 5 votes vote down vote up
def __init__(self, action_size):
        self.render = False
        self.load_model = False
        # environment settings
        self.state_size = (84, 84, 4)
        self.action_size = action_size
        # parameters about epsilon
        self.epsilon = 1.
        self.epsilon_start, self.epsilon_end = 1.0, 0.1
        self.exploration_steps = 1000000.
        self.epsilon_decay_step = (self.epsilon_start - self.epsilon_end) \
                                  / self.exploration_steps
        # parameters about training
        self.batch_size = 32
        self.train_start = 50000
        self.update_target_rate = 10000
        self.discount_factor = 0.99
        self.memory = deque(maxlen=400000)
        self.no_op_steps = 30
        # build
        self.model = self.build_model()
        self.target_model = self.build_model()
        self.update_target_model()

        self.optimizer = self.optimizer()

        self.sess = tf.InteractiveSession()
        K.set_session(self.sess)

        self.avg_q_max, self.avg_loss = 0, 0
        self.summary_placeholders, self.update_ops, self.summary_op = \
            self.setup_summary()
        self.summary_writer = tf.summary.FileWriter(
            'summary/breakout_ddqn', self.sess.graph)
        self.sess.run(tf.global_variables_initializer())

        if self.load_model:
            self.model.load_weights("./save_model/breakout_ddqn.h5")

    # if the error is in [-1, 1], then the cost is quadratic to the error
    # But outside the interval, the cost is linear to the error 
Example 28
Project: reinforcement-learning   Author: buyizhiyou   File: breakout_dqn.py    MIT License 5 votes vote down vote up
def __init__(self, action_size):
        self.render = False
        self.load_model = False
        # environment settings
        self.state_size = (84, 84, 4)
        self.action_size = action_size
        # parameters about epsilon
        self.epsilon = 1.
        self.epsilon_start, self.epsilon_end = 1.0, 0.1
        self.exploration_steps = 1000000.
        self.epsilon_decay_step = (self.epsilon_start - self.epsilon_end) \
                                  / self.exploration_steps
        # parameters about training
        self.batch_size = 32
        self.train_start = 50000
        self.update_target_rate = 10000
        self.discount_factor = 0.99
        self.memory = deque(maxlen=400000)
        self.no_op_steps = 30
        # build model
        self.model = self.build_model()
        self.target_model = self.build_model()
        self.update_target_model()

        self.optimizer = self.optimizer()

        self.sess = tf.InteractiveSession()
        K.set_session(self.sess)

        self.avg_q_max, self.avg_loss = 0, 0
        self.summary_placeholders, self.update_ops, self.summary_op = \
            self.setup_summary()
        self.summary_writer = tf.summary.FileWriter(
            'summary/breakout_dqn', self.sess.graph)
        self.sess.run(tf.global_variables_initializer())

        if self.load_model:
            self.model.load_weights("./save_model/breakout_dqn.h5")

    # if the error is in [-1, 1], then the cost is quadratic to the error
    # But outside the interval, the cost is linear to the error 
Example 29
Project: reinforcement-learning   Author: buyizhiyou   File: play_dqn_model.py    MIT License 5 votes vote down vote up
def __init__(self, action_size):
        self.state_size = (84, 84, 4)
        self.action_size = action_size
        self.no_op_steps = 20

        self.model = self.build_model()

        self.sess = tf.InteractiveSession()
        K.set_session(self.sess)

        self.avg_q_max, self.avg_loss = 0, 0
        self.sess.run(tf.global_variables_initializer()) 
Example 30
Project: reinforcement-learning   Author: buyizhiyou   File: breakout_dueling_ddqn.py    MIT License 5 votes vote down vote up
def __init__(self, action_size):
        self.render = False
        self.load_model = False
        # environment settings
        self.state_size = (84, 84, 4)
        self.action_size = action_size
        # parameters about epsilon
        self.epsilon = 1.
        self.epsilon_start, self.epsilon_end = 1.0, 0.1
        self.exploration_steps = 1000000.
        self.epsilon_decay_step = (self.epsilon_start - self.epsilon_end) \
                                  / self.exploration_steps
        # parameters about training
        self.batch_size = 32
        self.train_start = 50000
        self.update_target_rate = 10000
        self.discount_factor = 0.99
        self.memory = deque(maxlen=400000)
        self.no_op_steps = 30
        # build
        self.model = self.build_model()
        self.target_model = self.build_model()
        self.update_target_model()

        self.optimizer = self.optimizer()

        self.sess = tf.InteractiveSession()
        K.set_session(self.sess)

        self.avg_q_max, self.avg_loss = 0, 0
        self.summary_placeholders, self.update_ops, self.summary_op = \
            self.setup_summary()
        self.summary_writer = tf.summary.FileWriter(
            'summary/breakout_dueling_ddqn', self.sess.graph)
        self.sess.run(tf.global_variables_initializer())

        if self.load_model:
            self.model.load_weights("./save_model/breakout_dueling_ddqb.h5")

    # if the error is in [-1, 1], then the cost is quadratic to the error
    # But outside the interval, the cost is linear to the error 
Example 31
Project: rldurak   Author: janEbert   File: critic.py    MIT License 5 votes vote down vote up
def __init__(
            self, sess, state_shape, action_shape, load=True, optimizer='adam',
            alpha=0.001, epsilon=1e-8, tau=0.001, neurons_per_layer=[100, 50]):
        """Initialize a critic with the given session, learning rate,
        update factor and neurons in the hidden layers.

        If load is true, load the model instead of creating a new one.
        """
        self.sess = sess
        self.state_shape = state_shape
        self.action_shape = action_shape
        self.optimizer_choice = optimizer.lower()
        self.alpha = alpha
        self.tau = tau
        if len(neurons_per_layer) < 2:
            if not neurons_per_layer:
                self.neurons_per_layer = [100, 50]
            else:
                self.neurons_per_layer.append(50)
            print('Neurons per layer for the critic have been adjusted')
        else:
            self.neurons_per_layer = neurons_per_layer
        K.set_session(sess)
        self.model, self.state_input, self.action_input = self.create_model(
                epsilon)
        self.target_model = self.create_model(epsilon)[0]
        self.action_gradients = K.gradients(self.model.output,
                self.action_input)
        self.sess.run(tf.global_variables_initializer())
        if load:
            self.load_weights()
        self.model._make_predict_function()
        self.target_model._make_predict_function() 
Example 32
Project: rldurak   Author: janEbert   File: actor.py    MIT License 5 votes vote down vote up
def __init__(
            self, sess, state_shape, action_shape, load=True, optimizer='adam',
            alpha=0.001, epsilon=1e-8, tau=0.001, neurons_per_layer=[100, 50]):
        """Construct an actor with the given session, learning rate,
        update factor and neurons in the hidden layers.

        If load is true, load the model instead of creating a new one.
        """
        self.sess = sess
        self.state_shape = state_shape
        self.action_shape = action_shape
        self.optimizer_choice = optimizer.lower()
        self.alpha = alpha
        self.tau = tau
        if not neurons_per_layer:
            self.neurons_per_layer = [100]
            print('Neurons per layer for the actor have been adjusted')
        else:
            self.neurons_per_layer = neurons_per_layer
        K.set_session(sess)
        self.model, self.inputs, weights = self.create_model()
        self.target_model = self.create_model()[0]
        self.action_gradients = tf.placeholder(tf.float32,
                [None, self.action_shape])
        parameter_gradients = tf.gradients(self.model.output, weights,
                -self.action_gradients)
        gradients = zip(parameter_gradients, weights)
        assert self.optimizer_choice in ['adam', 'rmsprop']
        if self.optimizer_choice == 'adam':
            self.optimizer = tf.train.AdamOptimizer(
                    self.alpha, epsilon=epsilon).apply_gradients(gradients)
        else:
            self.optimizer = tf.train.RMSPropOptimizer(
                    self.alpha, epsilon=epsilon).apply_gradients(gradients)
        self.sess.run(tf.global_variables_initializer())
        if load:
            self.load_weights()
        self.model._make_predict_function()
        self.target_model._make_predict_function() 
Example 33
Project: kutils   Author: subpic   File: tensor_ops.py    MIT License 5 votes vote down vote up
def GPUMemoryCap(fraction=1):
    """
    Limit the amount of GPU memory that can be used by an active kernel.

    :param fraction: in [0, 1], 1 = the entire available GPU memory.
    """
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = fraction
    K.set_session(K.tf.Session(config=config))


# Metrics and losses 
Example 34
Project: ycml   Author: skylander86   File: neural_networks.py    Apache License 2.0 5 votes vote down vote up
def __init__(
        self,
        tf_config=None, set_session=True,  # set_session should be False when you are initializing a second classifier...
        epochs=10, batch_size=128, passes_per_epoch=1,
        initial_weights=None, initial_epoch=0,
        validation_size=0.2, verbose=0,
        early_stopping=None, save_best=None, save_weights=None,
        log_device_placement=False,
        **kwargs
    ):
        self.tf_config = tf_config

        self.epochs = epochs
        self.batch_size = batch_size
        self.passes_per_epoch = passes_per_epoch

        self.initial_weights = initial_weights
        self.initial_epoch = initial_epoch

        self.validation_size = validation_size
        self.verbose = verbose

        self.early_stopping = early_stopping
        self.save_weights = save_weights
        self.save_best = save_best

        self.log_device_placement = log_device_placement

        if set_session: self.set_session(tf_config)
    #end def 
Example 35
Project: labelImg   Author: keyuncheng   File: MaskRCNNInference.py    MIT License 5 votes vote down vote up
def inference(imagePath, labelFilePath, session, model):

    global class_names

    # set session when in use
    K.set_session(session)

    with session.graph.as_default() as g:
        print("start auto labeling using Mask-RCNN model")
        # get a new session

        start_time = time.time()

        image = skimage.io.imread(imagePath)

        # Run detection
        results = model.detect([image], verbose=1)

        r = results[0]

    # clear it when not in use
    K.clear_session()

    r_rois, r_contours, r_class_names = handleResults(r['rois'], r['masks'], r['class_ids'], class_names)
    
    print("--- %s seconds ---" % (time.time() - start_time))

    return r_rois, r_contours, r_class_names 
Example 36
Project: reinforcement-learning   Author: rlcode   File: breakout_ddqn.py    MIT License 5 votes vote down vote up
def __init__(self, action_size):
        self.render = False
        self.load_model = False
        # environment settings
        self.state_size = (84, 84, 4)
        self.action_size = action_size
        # parameters about epsilon
        self.epsilon = 1.
        self.epsilon_start, self.epsilon_end = 1.0, 0.1
        self.exploration_steps = 1000000.
        self.epsilon_decay_step = (self.epsilon_start - self.epsilon_end) \
                                  / self.exploration_steps
        # parameters about training
        self.batch_size = 32
        self.train_start = 50000
        self.update_target_rate = 10000
        self.discount_factor = 0.99
        self.memory = deque(maxlen=400000)
        self.no_op_steps = 30
        # build
        self.model = self.build_model()
        self.target_model = self.build_model()
        self.update_target_model()

        self.optimizer = self.optimizer()

        self.sess = tf.InteractiveSession()
        K.set_session(self.sess)

        self.avg_q_max, self.avg_loss = 0, 0
        self.summary_placeholders, self.update_ops, self.summary_op = \
            self.setup_summary()
        self.summary_writer = tf.summary.FileWriter(
            'summary/breakout_ddqn', self.sess.graph)
        self.sess.run(tf.global_variables_initializer())

        if self.load_model:
            self.model.load_weights("./save_model/breakout_ddqn.h5")

    # if the error is in [-1, 1], then the cost is quadratic to the error
    # But outside the interval, the cost is linear to the error 
Example 37
Project: reinforcement-learning   Author: rlcode   File: breakout_dqn.py    MIT License 5 votes vote down vote up
def __init__(self, action_size):
        self.render = False
        self.load_model = False
        # environment settings
        self.state_size = (84, 84, 4)
        self.action_size = action_size
        # parameters about epsilon
        self.epsilon = 1.
        self.epsilon_start, self.epsilon_end = 1.0, 0.1
        self.exploration_steps = 1000000.
        self.epsilon_decay_step = (self.epsilon_start - self.epsilon_end) \
                                  / self.exploration_steps
        # parameters about training
        self.batch_size = 32
        self.train_start = 50000
        self.update_target_rate = 10000
        self.discount_factor = 0.99
        self.memory = deque(maxlen=400000)
        self.no_op_steps = 30
        # build model
        self.model = self.build_model()
        self.target_model = self.build_model()
        self.update_target_model()

        self.optimizer = self.optimizer()

        self.sess = tf.InteractiveSession()
        K.set_session(self.sess)

        self.avg_q_max, self.avg_loss = 0, 0
        self.summary_placeholders, self.update_ops, self.summary_op = \
            self.setup_summary()
        self.summary_writer = tf.summary.FileWriter(
            'summary/breakout_dqn', self.sess.graph)
        self.sess.run(tf.global_variables_initializer())

        if self.load_model:
            self.model.load_weights("./save_model/breakout_dqn.h5")

    # if the error is in [-1, 1], then the cost is quadratic to the error
    # But outside the interval, the cost is linear to the error 
Example 38
Project: reinforcement-learning   Author: rlcode   File: play_dqn_model.py    MIT License 5 votes vote down vote up
def __init__(self, action_size):
        self.state_size = (84, 84, 4)
        self.action_size = action_size
        self.no_op_steps = 20

        self.model = self.build_model()

        self.sess = tf.InteractiveSession()
        K.set_session(self.sess)

        self.avg_q_max, self.avg_loss = 0, 0
        self.sess.run(tf.global_variables_initializer()) 
Example 39
Project: reinforcement-learning   Author: rlcode   File: breakout_dueling_ddqn.py    MIT License 5 votes vote down vote up
def __init__(self, action_size):
        self.render = False
        self.load_model = False
        # environment settings
        self.state_size = (84, 84, 4)
        self.action_size = action_size
        # parameters about epsilon
        self.epsilon = 1.
        self.epsilon_start, self.epsilon_end = 1.0, 0.1
        self.exploration_steps = 1000000.
        self.epsilon_decay_step = (self.epsilon_start - self.epsilon_end) \
                                  / self.exploration_steps
        # parameters about training
        self.batch_size = 32
        self.train_start = 50000
        self.update_target_rate = 10000
        self.discount_factor = 0.99
        self.memory = deque(maxlen=400000)
        self.no_op_steps = 30
        # build
        self.model = self.build_model()
        self.target_model = self.build_model()
        self.update_target_model()

        self.optimizer = self.optimizer()

        self.sess = tf.InteractiveSession()
        K.set_session(self.sess)

        self.avg_q_max, self.avg_loss = 0, 0
        self.summary_placeholders, self.update_ops, self.summary_op = \
            self.setup_summary()
        self.summary_writer = tf.summary.FileWriter(
            'summary/breakout_dueling_ddqn', self.sess.graph)
        self.sess.run(tf.global_variables_initializer())

        if self.load_model:
            self.model.load_weights("./save_model/breakout_dueling_ddqb.h5")

    # if the error is in [-1, 1], then the cost is quadratic to the error
    # But outside the interval, the cost is linear to the error 
Example 40
Project: backdoor   Author: bolunwang   File: utils_backdoor.py    MIT License 5 votes vote down vote up
def fix_gpu_memory(mem_fraction=1):
    import keras.backend as K

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_fraction)
    tf_config = tf.ConfigProto(gpu_options=gpu_options)
    tf_config.gpu_options.allow_growth = True
    tf_config.log_device_placement = False
    tf_config.allow_soft_placement = True
    init_op = tf.global_variables_initializer()
    sess = tf.Session(config=tf_config)
    sess.run(init_op)
    K.set_session(sess)

    return sess 
Example 41
Project: RLContinuousActionSpace   Author: hchkaiban   File: Models.py    MIT License 5 votes vote down vote up
def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE):
        self.sess = sess
        self.BATCH_SIZE = BATCH_SIZE
        self.TAU = TAU
        self.LEARNING_RATE = LEARNING_RATE
        self.action_size = action_size
        
        K.set_session(sess)

        #Now create the model
        self.model, self.action, self.state = self.create_critic_network(state_size, action_size)  
        self.target_model, self.target_action, self.target_state = self.create_critic_network(state_size, action_size)  
        self.action_grads = tf.gradients(self.model.output, self.action)  #GRADIENTS for policy update
        self.sess.run(tf.initialize_all_variables()) 
Example 42
Project: SparseSC   Author: microsoft   File: match_space.py    MIT License 5 votes vote down vote up
def keras_reproducible(seed=1234, verbose=0, TF_CPP_MIN_LOG_LEVEL='3'):
    import random
    random.seed(seed) 
    np.random.seed(seed)
    #https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
    import os
    os.environ['PYTHONHASHSEED'] = '0' #might need to do this outside the script

    if verbose==0:
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = TF_CPP_MIN_LOG_LEVEL #2 will print warnings
        
    import tensorflow
    if verbose==0:
        #https://github.com/tensorflow/tensorflow/issues/27023
        try:
            from tensorflow.python.util import deprecation
            deprecation._PRINT_DEPRECATION_WARNINGS = False
        except ImportError:
            try:
                from tensorflow.python.util import module_wrapper as deprecation
            except ImportError:
                from tensorflow.python.util import deprecation_wrapper as deprecation
            deprecation._PER_MODULE_WARNING_LIMIT = 0
        tensorflow.compat.v1.logging.set_verbosity(tensorflow.compat.v1.logging.ERROR)
        
    session_conf = tensorflow.ConfigProto(intra_op_parallelism_threads=1,
                                inter_op_parallelism_threads=1)
    with capture_all() as _: #doesn't have quiet option
        from keras import backend as K
    tensorflow.set_random_seed(seed)
    sess = tensorflow.Session(graph=tensorflow.get_default_graph(), config=session_conf)
    K.set_session(sess) 
Example 43
Project: Deep-Learning-for-Computer-Vision   Author: PacktPublishing   File: utils.py    MIT License 5 votes vote down vote up
def limit_mem():
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg)) 
Example 44
Project: sisy   Author: qorrect   File: tf_utils.py    Apache License 2.0 5 votes vote down vote up
def setup_tf_session(device):
    import tensorflow as tf
    config = tf.ConfigProto()
    if hasattr(config, 'gpu_options'):
        config.allow_soft_placement = True
        config.gpu_options.allow_growth = True
    if is_gpu_device(device):
        config.gpu_options.visible_device_list = str(
            get_device_idx(device))
    elif is_cpu_device(device):
        config.gpu_options.visible_device_list = ''
    from keras import backend
    session = tf.Session(config=config)
    session.run(tf.global_variables_initializer())
    backend.set_session(session) 
Example 45
Project: async-deeprl   Author: dbobrenko   File: agent.py    MIT License 5 votes vote down vote up
def __init__(self, session, action_size, h, w, channels, opt=tf.train.AdamOptimizer(1e-4)):
        """Creates Q-Learning agent
        :param session: tensorflow session
        :param action_size: (int) length of action space
        :param h: (int) input image height
        :param w: (int) input image width
        :param channels: (int) number of image channels
        :param opt: tensorflow optimizer (by default: Adam optimizer)"""
        self.action_size = action_size
        self.opt = opt
        self.global_step = tf.Variable(0, name='frame', trainable=False)
        self.frame_inc_op = self.global_step.assign_add(1, use_locking=True)
        K.set_session(session)
        self.sess = session
        with tf.variable_scope('network'):
            self.action = tf.placeholder('int32', [None], name='action')
            self.reward = tf.placeholder('float32', [None], name='reward')
            model, self.state, self.q_values = self._build_model(h, w, channels)
            self.weights = model.trainable_weights
        with tf.variable_scope('optimizer'):
            # Zero all actions, except one that was performed
            action_onehot = tf.one_hot(self.action, self.action_size, 1.0, 0.0)
            # Predict expected future reward for performed action
            q_value = tf.reduce_sum(tf.multiply(self.q_values, action_onehot), reduction_indices=1)
            # Define squared mean loss function: (y - y_)^2
            self.loss = tf.reduce_mean(tf.square(self.reward - q_value))
            # Compute gradients w.r.t. weights
            grads = tf.gradients(self.loss, self.weights)
            # Apply gradient norm clipping
            grads, _ = tf.clip_by_global_norm(grads, 40.)
            grads_vars = list(zip(grads, self.weights))
            self.train_op = opt.apply_gradients(grads_vars)
        with tf.variable_scope('target_network'):
            target_m, self.target_state, self.target_q_values = self._build_model(h, w, channels)
            target_w = target_m.trainable_weights
        with tf.variable_scope('target_update'):
            self.target_update = [target_w[i].assign(self.weights[i])
                                  for i in range(len(target_w))] 
Example 46
Project: spacy_zh_model   Author: algteam   File: __main__.py    MIT License 5 votes vote down vote up
def set_keras_backend(backend):
    if K.backend() != backend:
        os.environ['KERAS_BACKEND'] = backend
        importlib.reload(K)
        assert K.backend() == backend
    if backend == "tensorflow":
        K.get_session().close()
        cfg = K.tf.ConfigProto()
        cfg.gpu_options.allow_growth = True
        K.set_session(K.tf.Session(config=cfg))
        K.clear_session() 
Example 47
Project: Meta-Open-World-Learning   Author: howardhsu   File: validate.py    MIT License 5 votes vote down vote up
def pred_evaluate(config):
    db=config["db"] #"amazon" 
    out_dir=config["out_dir"]
    model_type=config["model_type"]
    top_n=config["top_n"] #10
    vote_n=1 #1 #typically 1, we disable manual vote; when top_n=1, we optionally vote

    scores={}

    data=np.load("../"+db+"/data/valid_50_idx.npz")
    sess=tf.Session()
    K.set_session(sess)
    model_fn=out_dir+"eval.h5"
    model=keras.models.load_model(model_fn)
    model.get_layer("embedding_1").set_weights([np.vstack([data['train_rep'], np.zeros((95000, 512))]) ])

    thres=0.5
    y_pred=l2ac_predict(model, data, top_n, vote_n)

    weighted_f1, _, _=evaluate(data['test_Y'], y_pred, thres=thres, rejection=True, mode="weighted")
    macro_f1, _, _=evaluate(data['test_Y'], y_pred, thres=thres, rejection=True, mode="macro")
    micro_f1, _, _=evaluate(data['test_Y'], y_pred, thres=thres, rejection=True, mode="micro")
    scores={'weighted_f1': weighted_f1, 'macro_f1': macro_f1, 'micro_f1': micro_f1}

    K.clear_session() 
    print scores["weighted_f1"]
    with open(out_dir+"valid.json", "w") as fw:
        json.dump(scores, fw) 
Example 48
Project: async-rl   Author: coreylynch   File: async_dqn.py    MIT License 5 votes vote down vote up
def main(_):
  g = tf.Graph()
  session = tf.Session(graph=g)
  with g.as_default(), session.as_default():
    K.set_session(session)
    num_actions = get_num_actions()
    graph_ops = build_graph(num_actions)
    saver = tf.train.Saver()

    if FLAGS.testing:
        evaluation(session, graph_ops, saver)
    else:
        train(session, graph_ops, num_actions, saver) 
Example 49
Project: async-rl   Author: coreylynch   File: a3c.py    MIT License 5 votes vote down vote up
def main(_):
  g = tf.Graph()
  with g.as_default(), tf.Session() as session:
    K.set_session(session)
    graph_ops = build_graph()
    saver = tf.train.Saver()

    if TRAINING:
        train(session, graph_ops, saver)
    else:
        evaluation(session, graph_ops, saver) 
Example 50
Project: ner-bilstm-cnn   Author: rishiabhishek   File: Model.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, word_length, labels, case_embeddings, pos_embedings, word_embeddings,
                 char_embedding,
                 char_case_embedding,
                 rnn_size=275, filters=53, pool_size=3, kernel_size=3, dropout=0.68, leaky_alpha=0.3,
                 learning_rate=0.0105):

        self.word_length = word_length  # 52

        self.leaky_alpha = leaky_alpha
        self.dropout = dropout
        self.filters = filters
        self.kernel_size = kernel_size
        self.pool_size = pool_size
        self.rnn_size = rnn_size
        self.learning_rate = learning_rate

        self.labels = np.array(labels)
        self.case_embeddings = np.array(case_embeddings)
        self.pos_embedings = np.array(pos_embedings)
        self.word_embeddings = np.array(word_embeddings)
        self.char_embedding = np.array(char_embedding)
        self.char_case_embedding = np.array(char_case_embedding)

        self.num_labels = self.labels.shape[0]
        self.model = None

        # GPU Config
        cfg = K.tf.ConfigProto()
        cfg.gpu_options.allow_growth = True
        K.set_session(K.tf.Session(config=cfg)) 
Example 51
Project: perceptron-benchmark   Author: advboxes   File: func.py    Apache License 2.0 5 votes vote down vote up
def clear_keras_session():
    "Magic code."
    print('Clearing keras session.')
    from keras import backend as K
    import tensorflow as tf
    default_sess = K.get_session()
    sess = tf.Session()
    K.set_session(sess)
    K.clear_session()
    tf.reset_default_graph()
    del sess 
Example 52
Project: neural-style-keras   Author: robertomest   File: utils.py    MIT License 5 votes vote down vote up
def config_gpu(gpu, allow_growth):
    # Choosing gpu
    if gpu == '-1':
        config = tf.ConfigProto(device_count ={'GPU': 0})
    else:
        if gpu == 'all' or gpu == '':
            gpu = ''
        config = tf.ConfigProto()
        config.gpu_options.visible_device_list = gpu
    if allow_growth == True:
        config.gpu_options.allow_growth = True
    session = tf.Session(config=config)
    K.set_session(session) 
Example 53
Project: yolov3-3dcarbox   Author: zoujialong9012   File: yolo3d2process19.py    MIT License 5 votes vote down vote up
def __init__(self):
#        self.model_path = 'model_data/yolo.h5' # model path or trained weights path
        #self.model_path = 'logs/000/ep003-loss15.711-val_loss14.994.h5' # model path or trained weights path
        self.model_path = 'logs/000/trained_weights_final.h5' # model path or trained weights path
        #self.model_path = 'logs/000/trained_weights_stage_1.h5' # model path or trained weights path
       # self.model_path = 'logs/000/trained_weights_stage_70.h5' # model path or trained weights path
    #    self.model_path3d = '/home/cidi/dl/3dobject/3D-Deepbox/model'
        #self.anchors_path = 'model_data/yolo_anchors.txt'
        self.anchors_path = 'model_data/kitti_yolo_anchors.txt'
        #self.classes_path = 'model_data/coco_classes.txt'
        self.classes_path = 'model_data/kitti_classes.txt'
        self.graph = tf.Graph()
        self.score = 0.3
        self.iou = 0.45
        self.class_names = self._get_class()
        self.anchors = self._get_anchors()

        with self.graph.as_default():
            print('tf session tf graph')
            self.sess = tf.Session(graph = self.graph)#K.get_session()
    #    self.sess = K.get_session()
        #K.set_session(self.sess)
        #self.model_image_size = (416, 416) # fixed size or (None, None), hw
        self.model_image_size = (384, 960) # fixed size or (None, None), hw
        with self.sess.as_default():
            with self.graph.as_default():
                self.boxes, self.scores, self.classes, self.boxdim, self.box3dconf, self.boxorient = self.generate() 
Example 54
Project: yolov3-3dcarbox   Author: zoujialong9012   File: yolo3d2processrosmerge.py    MIT License 5 votes vote down vote up
def __init__(self):
        tfconfig = tf.ConfigProto(allow_soft_placement=True)
        tfconfig.gpu_options.allow_growth = True
    
        #graph=tf.Graph()
#        self.d3sess = tf.Session(config=tfconfig) #, graph=graph)
#        init = tf.global_variables_initializer()
#        self.d3sess.run(init)
        #with sess.as_default():
        #    with graph.as_default():
#        self.d3dimension, self.d3orientation, self.d3confidence, self.d3loss, self.d3optimizer, self.d3loss_d, self.d3loss_o, self.d3loss_c = build_model()
#        saver = tf.train.Saver()
#        model_file=tf.train.latest_checkpoint('/home/cidi/dl/3dobject/3D-Deepbox/model')
#        saver.restore(self.d3sess, model_file)
#        self.model_path = 'model_data/yolo.h5' # model path or trained weights path
        #self.model_path = 'logs/000/ep018-loss36.945-val_loss34.935.h5' #trained_weights_final.h5 # model path or trained weights path
        self.model_path = 'logs/000/trained_weights_final.h5' # model path or trained weights path
        #self.model_path = 'logs/000/trained_weights_stage_1.h5' # model path or trained weights path
      #  self.model_path = 'logs/000/trained_weights_stage_70.h5' # model path or trained weights path
    #    self.model_path3d = '/home/cidi/dl/3dobject/3D-Deepbox/model'
        #self.anchors_path = 'model_data/yolo_anchors.txt'
        self.anchors_path = 'model_data/kitti_yolo_anchors.txt'
        #self.classes_path = 'model_data/coco_classes.txt'
        self.classes_path = 'model_data/kitti_classes.txt'
        self.graph = tf.Graph()
        self.score = 0.3
        self.iou = 0.45
        self.class_names = self._get_class()
        self.anchors = self._get_anchors()

        with self.graph.as_default():
            print('tf session tf graph')
            self.sess = tf.Session(graph = self.graph)#K.get_session()
    #    self.sess = K.get_session()
        #K.set_session(self.sess)
        #self.model_image_size = (416, 416) # fixed size or (None, None), hw
        self.model_image_size = (384, 960) # fixed size or (None, None), wh
        with self.sess.as_default():
            with self.graph.as_default():
                self.boxes, self.scores, self.classes, self.boxdim, self.box3dconf, self.boxorient = self.generate() 
Example 55
Project: yolov3-3dcarbox   Author: zoujialong9012   File: yolo3d2processros.py    MIT License 5 votes vote down vote up
def __init__(self):
        tfconfig = tf.ConfigProto(allow_soft_placement=True)
        tfconfig.gpu_options.allow_growth = True
    
        #graph=tf.Graph()
        self.d3sess = tf.Session(config=tfconfig) #, graph=graph)
        init = tf.global_variables_initializer()
        self.d3sess.run(init)
        #with sess.as_default():
        #    with graph.as_default():
        self.d3dimension, self.d3orientation, self.d3confidence, self.d3loss, self.d3optimizer, self.d3loss_d, self.d3loss_o, self.d3loss_c = build_model()
        saver = tf.train.Saver()
        model_file=tf.train.latest_checkpoint('/home/cidi/dl/3dobject/3D-Deepbox/model')
        saver.restore(self.d3sess, model_file)
 
#        self.model_path = 'model_data/yolo.h5' # model path or trained weights path
        #self.model_path = 'logs/000/trained_weights_final.h5' # model path or trained weights path
        #self.model_path = 'logs/000/trained_weights_stage_1.h5' # model path or trained weights path
        self.model_path = 'logs/000/trained_weights_stage_70.h5' # model path or trained weights path
    #    self.model_path3d = '/home/cidi/dl/3dobject/3D-Deepbox/model'
        self.anchors_path = 'model_data/yolo_anchors.txt'
        #self.classes_path = 'model_data/coco_classes.txt'
        self.classes_path = 'model_data/kitti_classes.txt'
        self.graph = tf.Graph()
        self.score = 0.3
        self.iou = 0.45
        self.class_names = self._get_class()
        self.anchors = self._get_anchors()

        with self.graph.as_default():
            print('tf session tf graph')
            self.sess = tf.Session(graph = self.graph)#K.get_session()
    #    self.sess = K.get_session()
        #K.set_session(self.sess)
        #self.model_image_size = (416, 416) # fixed size or (None, None), hw
        self.model_image_size = (960, 384) # fixed size or (None, None), hw
        with self.sess.as_default():
            with self.graph.as_default():
                self.boxes, self.scores, self.classes = self.generate() 
Example 56
Project: yolov3-3dcarbox   Author: zoujialong9012   File: yolo3d2processvideo19.py    MIT License 5 votes vote down vote up
def __init__(self):
        tfconfig = tf.ConfigProto(allow_soft_placement=True)
        tfconfig.gpu_options.allow_growth = True
    
        #graph=tf.Graph()
#        self.d3sess = tf.Session(config=tfconfig) #, graph=graph)
#        init = tf.global_variables_initializer()
#        self.d3sess.run(init)
        #with sess.as_default():
        #    with graph.as_default():
#        self.d3dimension, self.d3orientation, self.d3confidence, self.d3loss, self.d3optimizer, self.d3loss_d, self.d3loss_o, self.d3loss_c = build_model()
#        saver = tf.train.Saver()
#        model_file=tf.train.latest_checkpoint('/home/cidi/dl/3dobject/3D-Deepbox/model')
#        saver.restore(self.d3sess, model_file)
#        self.model_path = 'model_data/yolo.h5' # model path or trained weights path
        #self.model_path = 'logs/000/ep006-loss32.080-val_loss30.400.h5' #trained_weights_final.h5 # model path or trained weights path
        self.model_path = 'logs/000/trained_weights_final.h5' # model path or trained weights path
        #self.model_path = 'logs/000/trained_weights_stage_1.h5' # model path or trained weights path
      #  self.model_path = 'logs/000/trained_weights_stage_70.h5' # model path or trained weights path
    #    self.model_path3d = '/home/cidi/dl/3dobject/3D-Deepbox/model'
        #self.anchors_path = 'model_data/yolo_anchors.txt'
        self.anchors_path = 'model_data/kitti_yolo_anchors.txt'
        #self.classes_path = 'model_data/coco_classes.txt'
        self.classes_path = 'model_data/kitti_classes.txt'
        self.graph = tf.Graph()
        self.score = 0.3
        self.iou = 0.45
        self.class_names = self._get_class()
        self.anchors = self._get_anchors()

        with self.graph.as_default():
            print('tf session tf graph')
            self.sess = tf.Session(graph = self.graph)#K.get_session()
    #    self.sess = K.get_session()
        #K.set_session(self.sess)
        #self.model_image_size = (416, 416) # fixed size or (None, None), hw
        self.model_image_size = (384, 960) # fixed size or (None, None), hw
        with self.sess.as_default():
            with self.graph.as_default():
                self.boxes, self.scores, self.classes, self.boxdim, self.box3dconf, self.boxorient = self.generate() 
Example 57
Project: yolov3-3dcarbox   Author: zoujialong9012   File: yolo3d2processvideo.py    MIT License 5 votes vote down vote up
def __init__(self):
        tfconfig = tf.ConfigProto(allow_soft_placement=True)
        tfconfig.gpu_options.allow_growth = True
    
        #graph=tf.Graph()
        self.d3sess = tf.Session(config=tfconfig) #, graph=graph)
        init = tf.global_variables_initializer()
        self.d3sess.run(init)
        #with sess.as_default():
        #    with graph.as_default():
        self.d3dimension, self.d3orientation, self.d3confidence, self.d3loss, self.d3optimizer, self.d3loss_d, self.d3loss_o, self.d3loss_c = build_model()
        saver = tf.train.Saver()
        model_file=tf.train.latest_checkpoint('/home/cidi/dl/3dobject/3D-Deepbox/model')
        saver.restore(self.d3sess, model_file)
 
#        self.model_path = 'model_data/yolo.h5' # model path or trained weights path
        #self.model_path = 'logs/000/trained_weights_final.h5' # model path or trained weights path
        #self.model_path = 'logs/000/trained_weights_stage_1.h5' # model path or trained weights path
        self.model_path = 'logs/000/trained_weights_stage_70back.h5' # model path or trained weights path
    #    self.model_path3d = '/home/cidi/dl/3dobject/3D-Deepbox/model'
        self.anchors_path = 'model_data/yolo_anchors.txt'
        #self.classes_path = 'model_data/coco_classes.txt'
        self.classes_path = 'model_data/kitti_classes.txt'
        self.graph = tf.Graph()
        self.score = 0.3
        self.iou = 0.45
        self.class_names = self._get_class()
        self.anchors = self._get_anchors()

        with self.graph.as_default():
            print('tf session tf graph')
            self.sess = tf.Session(graph = self.graph)#K.get_session()
    #    self.sess = K.get_session()
        #K.set_session(self.sess)
        #self.model_image_size = (416, 416) # fixed size or (None, None), hw
        self.model_image_size = (960, 384) # fixed size or (None, None), hw
        with self.sess.as_default():
            with self.graph.as_default():
                self.boxes, self.scores, self.classes = self.generate() 
Example 58
Project: Voiceprint-Recognition   Author: SunYanCN   File: kws.py    Apache License 2.0 5 votes vote down vote up
def train_cnn(selected_lable):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    session = tf.Session(config=config)
    K.set_session(session)
    dest = app.config['PROCESSED_TRAIN_FOLDER']
    train_data_path = os.path.join(dest,'train_data.npy')
    train_label_path = os.path.join(dest,'train_label.npy')

    x_train = np.load(train_data_path, allow_pickle=True)
    y_train = np.load(train_label_path ,allow_pickle=True)

    epochs = 80
    batch_size = 64
    num_type = len(selected_lable)
    x_train = proress(x_train)
    # word label to number label
    y_train = label_to_category(y_train, selected_lable)

    # number label to onehot
    y_train = keras.utils.to_categorical(y_train, num_type)

    # shuffle data
    permutation = np.random.permutation(x_train.shape[0])
    x_train = x_train[permutation, :]
    y_train = y_train[permutation]

    history = train(x_train, y_train, type=num_type, batch_size=batch_size,
                    epochs=epochs, labels=selected_lable)

    # reload the best model
    model = load_model(model_path, custom_objects={'f1': f1, 'amsoftmax_loss': amsoftmax_loss})

    layerName = "flatten"
    targetModel = Model(inputs=model.input, outputs=model.get_layer(layerName).output)
    targetModel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    targetModel.save("dvector.h5")

    del model
    del targetModel
    K.clear_session() 
Example 59
Project: training_results_v0.6   Author: mlperf   File: test_keras.py    Apache License 2.0 5 votes vote down vote up
def test_load_model(self):
        hvd.init()

        with self.test_session() as sess:
            K.set_session(sess)

            opt = keras.optimizers.RMSprop(lr=0.0001)
            opt = hvd.DistributedOptimizer(opt)

            model = keras.models.Sequential()
            model.add(keras.layers.Dense(2, input_shape=(3,)))
            model.add(keras.layers.RepeatVector(3))
            model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
            model.compile(loss=keras.losses.MSE,
                          optimizer=opt,
                          metrics=[keras.metrics.categorical_accuracy],
                          sample_weight_mode='temporal')

            x = np.random.random((1, 3))
            y = np.random.random((1, 3, 3))
            model.train_on_batch(x, y)

            _, fname = tempfile.mkstemp('.h5')
            model.save(fname)

            new_model = hvd.load_model(fname)
            new_opt = new_model.optimizer
            os.remove(fname)

            self.assertEqual(type(new_opt).__module__, 'horovod.keras')
            self.assertEqual(type(new_opt).__name__, 'RMSprop')
            self.assertEqual(K.get_value(opt.lr), K.get_value(new_opt.lr))
            self.assertEqual(len(opt.get_weights()), len(new_opt.get_weights()))
            for weights, new_weights in zip(opt.get_weights(),
                                            new_opt.get_weights()):
                self.assertListEqual(weights.tolist(), new_weights.tolist()) 
Example 60
Project: training_results_v0.6   Author: mlperf   File: test_keras.py    Apache License 2.0 5 votes vote down vote up
def test_load_model_custom_optimizers(self):
        hvd.init()

        class TestOptimizer(keras.optimizers.RMSprop):
            def __init__(self, **kwargs):
                super(TestOptimizer, self).__init__(**kwargs)

        with self.test_session() as sess:
            K.set_session(sess)

            opt = TestOptimizer(lr=0.0001)
            opt = hvd.DistributedOptimizer(opt)

            model = keras.models.Sequential()
            model.add(keras.layers.Dense(2, input_shape=(3,)))
            model.add(keras.layers.RepeatVector(3))
            model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
            model.compile(loss=keras.losses.MSE,
                          optimizer=opt,
                          metrics=[keras.metrics.categorical_accuracy],
                          sample_weight_mode='temporal')

            x = np.random.random((1, 3))
            y = np.random.random((1, 3, 3))
            model.train_on_batch(x, y)

            _, fname = tempfile.mkstemp('.h5')
            model.save(fname)

            custom_optimizers = [TestOptimizer]
            new_model = hvd.load_model(fname, custom_optimizers=custom_optimizers)
            new_opt = new_model.optimizer
            os.remove(fname)

            self.assertEqual(type(new_opt).__module__, 'horovod.keras')
            self.assertEqual(type(new_opt).__name__, 'TestOptimizer')
            self.assertEqual(K.get_value(opt.lr), K.get_value(new_opt.lr))
            self.assertEqual(len(opt.get_weights()), len(new_opt.get_weights()))
            for weights, new_weights in zip(opt.get_weights(),
                                            new_opt.get_weights()):
                self.assertListEqual(weights.tolist(), new_weights.tolist()) 
Example 61
Project: rl_puyopuyo   Author: frostburn   File: mp_train.py    MIT License 5 votes vote down vote up
def worker(global_weights, work_queue, result_queue):
    register()
    with tf.device("/cpu:0"):
         with tf.Session(config=tf.ConfigProto(device_count={"GPU": 0})) as session:
            K.set_session(session)
            agent = get_simple_agent(session, 4)
            session.run(tf.global_variables_initializer())

            variables = tf.trainable_variables()
            weigths = session.run(variables)
            w_shape = []
            placeholders = []
            assigns = []
            for values, variable in zip(weigths, variables):
                placeholders.append(tf.placeholder(tf.float32, values.shape))
                assigns.append(variable.assign(placeholders[-1]))
                w_shape.append(values.shape)

            while True:
                seed = work_queue.get()
                if seed is None:
                    work_queue.task_done()
                    return

                gen = np.random.RandomState(seed)
                w = np.array(global_weights)
                delta = gen.randn(*w.shape)
                w += delta * 10

                feed_dict = {}
                for shape, placeholder in zip(w_shape, placeholders):
                    values = w[:np.prod(shape)].reshape(shape)
                    feed_dict[placeholder] = values
                    w = w[np.prod(shape):]
                session.run(assigns, feed_dict=feed_dict)
                agent.reset()

                value = agent_performance(agent, 100)

                result_queue.put((seed, value))
                work_queue.task_done() 
Example 62
Project: vergeml   Author: vergeml   File: libraries.py    MIT License 5 votes vote down vote up
def setup(env):
        stderr = sys.stderr
        sys.stderr = open(os.devnull, "w")
        # pylint: disable=W0612
        try:
            import keras
        except Exception as e:
            raise e
        finally:
            sys.stderr = stderr

        from keras import backend as K
        if K.backend() == 'tensorflow':
            TensorFlowLibrary.setup(env)
            K.set_session(TensorFlowLibrary.create_session(env)) 
Example 63
Project: Facial_Expression_Recognition   Author: XC-Li   File: transfer_learning.py    MIT License 5 votes vote down vote up
def ft_mobile_net():
    """
    Use pretrained mobile net as bottom

    :return:
    """
    # input = layers.Input((224,224,3), name='RGB')
    base_model = mobilenet.MobileNet(include_top=False, weights='imagenet', input_shape=(224, 224,3))
    base_out = base_model.output
    x = layers.Flatten()(base_out)
    x = layers.Dense(1024, activation='relu', kernel_initializer='random_uniform', bias_initializer='zeros', name='Dense1024')(x)
    x = layers.Dense(256, activation='relu',kernel_initializer='random_uniform', bias_initializer='zeros', name='Dense256')(x)
    x = layers.Dense(64, activation='relu',kernel_initializer='random_uniform', bias_initializer='zeros', name='Dense64')(x)
    x = layers.Dense(16, activation='relu',kernel_initializer='random_uniform', bias_initializer='zeros', name='Dense16')(x)
    predictions = layers.Dense(7, activation='softmax',kernel_initializer='random_uniform', bias_initializer='zeros')(x)

    model = Model(inputs=base_model.input, outputs=predictions)
    for layer in base_model.layers:
        layer.trainable = False
    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    with tf.Session() as sess:
        adam = tf.train.AdamOptimizer()
        print(adam.variables())
        sess.run(tf.variables_initializer(adam.variables()))
        model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=[metrics.categorical_accuracy])
        # model.compile(optimizer=sgd,
        #               loss='categorical_crossentropy',
        #               metrics=[tf.keras.metrics.categorical_accuracy])
        K.set_session(tf.Session(graph=model.output.graph))
        init = K.tf.global_variables_initializer()
        K.get_session().run(init)

    # tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value training/TFOptimizer/beta1_power
        return model 
Example 64
Project: Facial_Expression_Recognition   Author: XC-Li   File: transferlearning_models.py    MIT License 5 votes vote down vote up
def ft_mobile_net():
    """
    Use pretrained mobile net as bottom

    :return:
    """
    # input = layers.Input((224,224,3), name='RGB')
    base_model = mobilenet.MobileNet(include_top=False, weights='imagenet', input_shape=(224, 224,3))
    base_out = base_model.output
    x = layers.Flatten()(base_out)
    x = layers.Dense(1024, activation='relu', kernel_initializer='random_uniform', bias_initializer='zeros', name='Dense1024')(x)
    x = layers.Dense(256, activation='relu',kernel_initializer='random_uniform', bias_initializer='zeros', name='Dense256')(x)
    x = layers.Dense(64, activation='relu',kernel_initializer='random_uniform', bias_initializer='zeros', name='Dense64')(x)
    x = layers.Dense(16, activation='relu',kernel_initializer='random_uniform', bias_initializer='zeros', name='Dense16')(x)
    predictions = layers.Dense(7, activation='softmax',kernel_initializer='random_uniform', bias_initializer='zeros')(x)

    model = Model(inputs=base_model.input, outputs=predictions)
    for layer in base_model.layers:
        layer.trainable = False
    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    with tf.Session() as sess:
        adam = tf.train.AdamOptimizer()
        print(adam.variables())
        sess.run(tf.variables_initializer(adam.variables()))
        model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=[metrics.categorical_accuracy])
        # model.compile(optimizer=sgd,
        #               loss='categorical_crossentropy',
        #               metrics=[tf.keras.metrics.categorical_accuracy])
        K.set_session(tf.Session(graph=model.output.graph))
        init = K.tf.global_variables_initializer()
        K.get_session().run(init)

    # tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value training/TFOptimizer/beta1_power
        return model 
Example 65
Project: CONSNet   Author: MICLab-Unicamp   File: cnn_utils.py    MIT License 5 votes vote down vote up
def limit_mem():
    K.tf.Session.close
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg)) 
Example 66
Project: Deepbinner   Author: rrwick   File: classify.py    GNU General Public License v3.0 5 votes vote down vote up
def set_tensorflow_threads(args):
    os.environ['OMP_NUM_THREADS'] = str(args.omp_num_threads)
    config = tf.ConfigProto(intra_op_parallelism_threads=args.intra_op_parallelism_threads,
                            inter_op_parallelism_threads=args.inter_op_parallelism_threads,
                            allow_soft_placement=True,
                            device_count={'CPU': args.device_count})
    session = tf.Session(config=config)
    backend.set_session(session) 
Example 67
Project: DeepLearningForComputerVision   Author: rajacheers   File: utils.py    Apache License 2.0 5 votes vote down vote up
def limit_mem():
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg)) 
Example 68
Project: retain-keras   Author: Optum   File: retain_evaluation.py    Apache License 2.0 5 votes vote down vote up
def import_model(path):
    """Import model from given path and assign it to appropriate devices"""
    K.clear_session()
    config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
    config.gpu_options.allow_growth = True
    tfsess = tf.Session(config=config)
    K.set_session(tfsess)
    model = load_model(path, custom_objects={'FreezePadding':FreezePadding,
                                             'FreezePadding_Non_Negative':FreezePadding_Non_Negative})
    if len(get_available_gpus()) > 1:
        model = make_parallel(model)
    return model 
Example 69
Project: retain-keras   Author: Optum   File: retain_interpretations.py    Apache License 2.0 5 votes vote down vote up
def import_model(path):
    """Import model from given path and assign it to appropriate devices"""
    K.clear_session()
    config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
    config.gpu_options.allow_growth = True
    tfsess = tf.Session(config=config)
    K.set_session(tfsess)
    model = load_model(path, custom_objects={'FreezePadding':FreezePadding,
                                             'FreezePadding_Non_Negative':FreezePadding_Non_Negative})
    model_with_attention = Model(model.inputs, model.outputs +\
                                              [model.get_layer(name='softmax_1').output,\
                                               model.get_layer(name='beta_dense_0').output])
    return model, model_with_attention 
Example 70
Project: Learning-to-navigate-without-a-map   Author: ToniRV   File: ddpg.py    MIT License 5 votes vote down vote up
def __init__(self, sess, state_size, action_size, batch_size,
                 tau, learning_rate):
        """Init actor network.

        Parameters
        ----------
        state_size : tuple
            size of the state size
        """
        self.sess = sess
        self.batch_size = batch_size
        self.tau = tau
        self.learning_rate = learning_rate
        self.action_size = action_size

        K.set_session(sess)

        # create model
        self.model, self.weights, self.state = \
            self.create_actor_network(state_size, action_size)
        self.target_model, self.target_weights, self.target_state = \
            self.create_actor_network(state_size, action_size)
        self.action_gradient = tf.placeholder(tf.float32, [None, action_size])
        self.params_grad = \
            tf.gradients(self.model.output, self.weights,
                         -self.action_gradient)
        grads = zip(self.params_grad, self.weights)
        self.optimize = \
            tf.train.AdamOptimizer(learning_rate).apply_gradients(grads)
        self.sess.run(tf.initialize_all_variables()) 
Example 71
Project: neural-fingerprinting   Author: StephanZheng   File: attacks.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def main(args):
    assert args.dataset in ['mnist', 'cifar', 'svhn'], \
        "Dataset parameter must be either 'mnist', 'cifar' or 'svhn'"
    assert args.attack in ['fgsm', 'bim-a', 'bim-b', 'jsma', 'cw-l2', 'all', 'cw-lid'], \
        "Attack parameter must be either 'fgsm', 'bim-a', 'bim-b', " \
        "'jsma', 'cw-l2', 'all' or 'cw-lid' for attacking LID detector"
    model_file = os.path.join(PATH_DATA, "model_%s.h5" % args.dataset)
    # model_file = "../data_v1/model_%s.h5" % args.dataset
    print(model_file)
    assert os.path.isfile(model_file), \
        'model file not found... must first train model using train_model.py.'
    if args.dataset == 'svhn' and args.attack == 'cw-l2':
        assert args.batch_size == 16, \
        "svhn has 26032 test images, the batch_size for cw-l2 attack should be 16, " \
        "otherwise, there will be error at the last batch!"


    print('Dataset: %s. Attack: %s' % (args.dataset, args.attack))
    # Create TF session, set it as Keras backend
    sess = tf.Session()
    K.set_session(sess)
    if args.attack == 'cw-l2' or args.attack == 'cw-lid':
        warnings.warn("Important: remove the softmax layer for cw attacks!")
        # use softmax=False to load without softmax layer
        model = get_model(args.dataset, softmax=False)
        model.compile(
            loss=cross_entropy,
            optimizer='adadelta',
            metrics=['accuracy']
        )
        model.load_weights(model_file)
    else:
        model = load_model(model_file)

    _, _, X_test, Y_test = get_data(args.dataset)
    _, acc = model.evaluate(X_test, Y_test, batch_size=args.batch_size,
                            verbose=0)
    print("Accuracy on the test set: %0.2f%%" % (100*acc))

    if args.attack == 'cw-lid': # breaking LID detector - test
        X_test = X_test[:1000]
        Y_test = Y_test[:1000]

    if args.attack == 'all':
        # Cycle through all attacks
        for attack in ['fgsm']:
            craft_one_type(sess, model, X_test, Y_test, args.dataset, attack,
                           args.batch_size)
    else:
        # Craft one specific attack type
        craft_one_type(sess, model, X_test, Y_test, args.dataset, args.attack,
                       args.batch_size)
    print('Adversarial samples crafted and saved to %s ' % PATH_DATA)
    sess.close() 
Example 72
Project: keras_to_tensorflow   Author: alanswx   File: convertkeras.py    MIT License 4 votes vote down vote up
def convert(prevmodel,export_path,freeze_graph_binary):

   # open up a Tensorflow session
   sess = tf.Session()
   # tell Keras to use the session
   K.set_session(sess)

   # From this document: https://blog.keras.io/keras-as-a-simplified-interface-to-tensorflow-tutorial.html
   
   # let's convert the model for inference
   K.set_learning_phase(0)  # all new operations will be in test mode from now on
   # serialize the model and get its weights, for quick re-building
   previous_model = load_model(prevmodel)
   previous_model.summary()

   config = previous_model.get_config()
   weights = previous_model.get_weights()

   # re-build a model where the learning phase is now hard-coded to 0
   try:
     model= Sequential.from_config(config) 
   except:
     model= Model.from_config(config) 
   #model= model_from_config(config)
   model.set_weights(weights)

   print("Input name:")
   print(model.input.name)
   print("Output name:")
   print(model.output.name)
   output_name=model.output.name.split(':')[0]

   #  not sure what this is for
   export_version = 1 # version number (integer)

   graph_file=export_path+"_graph.pb"
   ckpt_file=export_path+".ckpt"
   # create a saver 
   saver = tf.train.Saver(sharded=True)
   tf.train.write_graph(sess.graph_def, '', graph_file)
   save_path = saver.save(sess, ckpt_file)
#~/tensorflow/bazel-bin/tensorflow/python/tools/freeze_graph --input_graph=./graph.pb  --input_checkpoint=./model.ckpt --output_node_names=add_72 --output_graph=frozen.pb
   command = freeze_graph_binary +" --input_graph=./"+graph_file+" --input_checkpoint=./"+ckpt_file+" --output_node_names="+output_name+" --output_graph=./"+export_path+".pb"
   print(command)
   os.system(command) 
Example 73
Project: neural-architecture-search   Author: titu1994   File: controller.py    MIT License 4 votes vote down vote up
def get_action(self, state):
        '''
        Gets a one hot encoded action list, either from random sampling or from
        the Controller RNN

        Args:
            state: a list of one hot encoded states, whose first value is used as initial
                state for the controller RNN

        Returns:
            A one hot encoded action list
        '''
        if np.random.random() < self.exploration:
            print("Generating random action to explore")
            actions = []

            for i in range(self.state_size * self.num_layers):
                state_ = self.state_space[i]
                size = state_['size']

                sample = np.random.choice(size, size=1)
                sample = state_['index_map_'][sample[0]]
                action = self.state_space.embedding_encode(i, sample)
                actions.append(action)
            return actions

        else:
            print("Prediction action from Controller")
            initial_state = self.state_space[0]
            size = initial_state['size']

            if state[0].shape != (1, size):
                state = state[0].reshape((1, size)).astype('int32')
            else:
                state = state[0]

            print("State input to Controller for Action : ", state.flatten())

            with self.policy_session.as_default():
                K.set_session(self.policy_session)

                with tf.name_scope('action_prediction'):
                    pred_actions = self.policy_session.run(self.policy_actions, feed_dict={self.state_input: state})

                return pred_actions 
Example 74
Project: neural-architecture-search   Author: titu1994   File: controller.py    MIT License 4 votes vote down vote up
def train_step(self):
        '''
        Perform a single train step on the Controller RNN

        Returns:
            the training loss
        '''
        states = self.state_buffer[-1]
        label_list = []

        # parse the state space to get real value of the states,
        # then one hot encode them for comparison with the predictions
        state_list = self.state_space.parse_state_space_list(states)
        for id, state_value in enumerate(state_list):
            state_one_hot = self.state_space.embedding_encode(id, state_value)
            label_list.append(state_one_hot)

        # the initial input to the controller RNN
        state_input_size = self.state_space[0]['size']
        state_input = states[0].reshape((1, state_input_size)).astype('int32')
        print("State input to Controller for training : ", state_input.flatten())

        # the discounted reward value
        reward = self.discount_rewards()
        reward = np.asarray([reward]).astype('float32')

        feed_dict = {
            self.state_input: state_input,
            self.discounted_rewards: reward
        }

        # prepare the feed dict with the values of all the policy labels for each
        # of the Controller outputs
        for i, label in enumerate(label_list):
            feed_dict[self.policy_labels[i]] = label

        with self.policy_session.as_default():
            K.set_session(self.policy_session)

            print("Training RNN (States ip) : ", state_list)
            print("Training RNN (Reward ip) : ", reward.flatten())
            _, loss, summary, global_step = self.policy_session.run([self.train_op, self.total_loss, self.summaries_op,
                                                                     self.global_step],
                                                                     feed_dict=feed_dict)

            self.summary_writer.add_summary(summary, global_step)
            self.saver.save(self.policy_session, save_path='weights/controller.ckpt', global_step=self.global_step)

            # reduce exploration after many train steps
            if global_step != 0 and global_step % 20 == 0 and self.exploration > 0.5:
                self.exploration *= 0.99

        return loss 
Example 75
Project: Meta-Open-World-Learning   Author: howardhsu   File: train_DOC.py    MIT License 4 votes vote down vote up
def train(config):
    batch_size=config['batch_size']
    model_type=config['model_type'] #["mlp_256"] #, "lstm_512"]
    set_modes=config["set_modes"] #["test_25", "test_50", "test_75"]
    db=config["db"] # "amazon"
    emb=config["emb"] #"../amazon/data/embedding.npy"
    out_dir=config["out_dir"]
    
    for set_mode in set_modes:
        eval_fn=out_dir+"eval_"+set_mode+".h5"
        data=np.load("../"+db+"/data/"+set_mode+"_idx.npz")
        if 'mlp' in model_type:
            train_X=data['train_set_X']
        else:
            train_X=data['train_set_idx_X']
            glove_pre_trained=np.load(emb)
        train_Y=np.zeros((data['train_set_Y'].shape[0], data['train_set_Y'].max()+1), dtype='int16')
        train_Y[np.arange(train_Y.shape[0]), data['train_set_Y'] ]=1

        sess=tf.Session()
        K.set_session(sess)
        if 'mlp' in model_type:
            x=keras.Input(shape=(data['train_set_X'].shape[1],), dtype="float32" )
            x_rep=x
            enc=keras.layers.Dense(256, activation="relu")(x_rep)
        else:
            x=keras.Input(shape=(data['train_set_idx_X'].shape[1],), dtype="int32" )
            emb_layer=keras.layers.Embedding(glove_pre_trained.shape[0], glove_pre_trained.shape[1], weights=[glove_pre_trained], trainable=False)
            x_rep=emb_layer(x)
        if "lstm_512" in model_type:
            x_rep=keras.layers.Dropout(0.5)(x_rep)
            lstm_layer2=keras.layers.Bidirectional(keras.layers.CuDNNLSTM(256) )
            enc=keras.layers.Activation('relu')(lstm_layer2(x_rep) )
        elif "cnn" in model_type:
            graph_in = keras.Input(shape=(data['train_set_idx_X'].shape[1],  glove_pre_trained.shape[1]))
            convs = []
            for fsz in [3, 4, 5]:
                conv = keras.layers.Conv1D(filters=128,
                                         kernel_size=fsz,
                                         padding='valid',
                                         activation='relu')(graph_in)
                pool = keras.layers.GlobalMaxPooling1D()(conv)
                convs.append(pool)
            out = keras.layers.Concatenate(axis=-1)(convs)
            graph = keras.models.Model(inputs=graph_in, outputs=out) #convolution layers

            x_rep=keras.layers.Dropout(0.5)(x_rep)
            x_conv = graph(x_rep)
            x_conv = keras.layers.Dropout(0.5)(x_conv)
            enc = keras.layers.Dense(256, activation="relu")(x_conv)

        x_rep=keras.layers.Dropout(0.5)(enc)
        output=keras.layers.Dense(data['train_set_Y'].max()+1, activation="sigmoid")(x_rep)
        model=keras.engine.Model(x, output)
        model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["acc"])
        history=model.fit(train_X, train_Y, 
                          validation_split=0.1, 
                          batch_size=batch_size, epochs=200, verbose=0,
                          callbacks=[
                             keras.callbacks.ModelCheckpoint(eval_fn, save_best_only=True) ]  )
        K.clear_session() 
Example 76
Project: Meta-Open-World-Learning   Author: howardhsu   File: eval.py    MIT License 4 votes vote down vote up
def pred_evaluate(config):
    set_modes=config["set_modes"] #["test_25", "test_50", "test_75"]
    db=config["db"] #"amazon" 
    out_dir=config["out_dir"]
    model_type=config["model_type"]
    doc_eval="DOC" in model_type 
    if not doc_eval:
        top_n=config["top_n"] #10
        vote_n=config["vote_n"] #1 #typically 1, we disable manual vote; when top_n=1, we optionally vote

    scores={}
    for set_mode in set_modes:
        data=np.load("../"+db+"/data/"+set_mode+"_idx.npz")
        sess=tf.Session()
        K.set_session(sess)
        if doc_eval:
            model_fn=out_dir+"eval_"+set_mode+".h5"
            model=keras.models.load_model(model_fn)
        else:
            model_fn=out_dir+"eval.h5"
            model=keras.models.load_model(model_fn)
            model.get_layer("embedding_1").set_weights([np.vstack([data['train_rep'], np.zeros((90000, 512))]) ])
        
        thres=0.5
        if doc_eval:
            y_pred=doc_predict(model, data, model_type)
            gaus_thres=doc_thres(model, data, model_type)
        else:
            y_pred=l2ac_predict(model, data, top_n, vote_n)
            
        weighted_f1, _, _=evaluate(data['test_Y'], y_pred, thres=thres, rejection=True, mode="weighted")
        macro_f1, _, _=evaluate(data['test_Y'], y_pred, thres=thres, rejection=True, mode="macro")
        micro_f1, _, _=evaluate(data['test_Y'], y_pred, thres=thres, rejection=True, mode="micro")
        scores[set_mode]={'weighted_f1': weighted_f1, 'macro_f1': macro_f1, 'micro_f1': micro_f1}
        if doc_eval:
            weighted_f1, _, _=evaluate(data['test_Y'], y_pred, thres=gaus_thres, rejection=True, mode="weighted")
            macro_f1, _, _=evaluate(data['test_Y'], y_pred, thres=gaus_thres, rejection=True, mode="macro")
            micro_f1, _, _=evaluate(data['test_Y'], y_pred, thres=gaus_thres, rejection=True, mode="micro")
            scores[set_mode+"_gaus"]={'weighted_f1': weighted_f1, 'macro_f1': macro_f1, 'micro_f1': micro_f1}
            
        K.clear_session() 
    with open(out_dir+"eval.json", "w") as fw:
        json.dump(scores, fw) 
Example 77
Project: training_results_v0.6   Author: mlperf   File: test_keras.py    Apache License 2.0 4 votes vote down vote up
def test_load_model_custom_objects(self):
        hvd.init()

        class TestOptimizer(keras.optimizers.RMSprop):
            def __init__(self, **kwargs):
                super(TestOptimizer, self).__init__(**kwargs)

        with self.test_session() as sess:
            K.set_session(sess)

            opt = TestOptimizer(lr=0.0001)
            opt = hvd.DistributedOptimizer(opt)

            model = keras.models.Sequential()
            model.add(keras.layers.Dense(2, input_shape=(3,)))
            model.add(keras.layers.RepeatVector(3))
            model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
            model.compile(loss=keras.losses.MSE,
                          optimizer=opt,
                          metrics=[keras.metrics.categorical_accuracy],
                          sample_weight_mode='temporal')

            x = np.random.random((1, 3))
            y = np.random.random((1, 3, 3))
            model.train_on_batch(x, y)

            _, fname = tempfile.mkstemp('.h5')
            model.save(fname)

            custom_objects = {
                'TestOptimizer': lambda **kwargs: hvd.DistributedOptimizer(
                    TestOptimizer(**kwargs))
            }
            new_model = hvd.load_model(fname, custom_objects=custom_objects)
            new_opt = new_model.optimizer
            os.remove(fname)

            self.assertEqual(type(new_opt).__module__, 'horovod.keras')
            self.assertEqual(type(new_opt).__name__, 'TestOptimizer')
            self.assertEqual(K.get_value(opt.lr), K.get_value(new_opt.lr))
            self.assertEqual(len(opt.get_weights()), len(new_opt.get_weights()))
            for weights, new_weights in zip(opt.get_weights(),
                                            new_opt.get_weights()):
                self.assertListEqual(weights.tolist(), new_weights.tolist()) 
Example 78
Project: training_results_v0.6   Author: mlperf   File: test_keras.py    Apache License 2.0 4 votes vote down vote up
def test_load_model_broadcast(self):
        hvd.init()

        def create_model():
            opt = keras.optimizers.SGD(lr=0.01 * hvd.size(), momentum=0.9)
            opt = hvd.DistributedOptimizer(opt)

            model = keras.models.Sequential()
            model.add(keras.layers.Dense(2, input_shape=(3,)))
            model.add(keras.layers.RepeatVector(3))
            model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
            model.compile(loss=keras.losses.MSE,
                          optimizer=opt,
                          metrics=[keras.metrics.categorical_accuracy],
                          sample_weight_mode='temporal')

            return model

        with self.test_session() as sess:
            K.set_session(sess)

            model = create_model()

            x = np.random.random((1, 3))
            y = np.random.random((1, 3, 3))
            model.train_on_batch(x, y)

            if hvd.rank() == 0:
                _, fname = tempfile.mkstemp('.h5')
                model.save(fname)

        K.clear_session()
        with self.test_session() as sess:
            K.set_session(sess)

            if hvd.rank() == 0:
                model = hvd.load_model(fname)
                os.remove(fname)
            else:
                model = create_model()

            def generator():
                while 1:
                    yield (x, y)

            if hvd.rank() == 0:
                self.assertEqual(len(model.optimizer.weights), 5)
            else:
                self.assertEqual(len(model.optimizer.weights), 0)

            # No assertions, we just need to verify that it doesn't hang
            callbacks = [hvd.callbacks.BroadcastGlobalVariablesCallback(0)]
            model.fit_generator(generator(),
                                steps_per_epoch=10,
                                callbacks=callbacks,
                                epochs=0,
                                verbose=0,
                                workers=4,
                                initial_epoch=1)

            self.assertEqual(len(model.optimizer.weights), 5) 
Example 79
Project: Keras-TextClassification   Author: yongzhuo   File: graph.py    MIT License 4 votes vote down vote up
def __init__(self, hyper_parameters):
        """
            模型初始化
        :param hyper_parameters:json, json['model'] and json['embedding']  
        """
        self.len_max = hyper_parameters.get('len_max', 50)           # 文本最大长度
        self.embed_size = hyper_parameters.get('embed_size', 300)  # 嵌入层尺寸
        self.trainable = hyper_parameters.get('trainable', False)  # 是否微调, 例如静态词向量、动态词向量、微调bert层等, random也可以
        self.embedding_type = hyper_parameters.get('embedding_type', 'word2vec')  # 词嵌入方式,可以选择'xlnet'、'bert'、'gpt-2'、'word2vec'或者'None'
        self.gpu_memory_fraction = hyper_parameters.get('gpu_memory_fraction', None) # gpu使用率, 默认不配置
        self.hyper_parameters = hyper_parameters
        hyper_parameters_model = hyper_parameters['model']
        self.label = hyper_parameters_model.get('label', 2)  # 类型
        self.batch_size = hyper_parameters_model.get('batch_size', 32)  # 批向量
        self.filters = hyper_parameters_model.get('filters', [3, 4, 5])  # 卷积核大小
        self.filters_num = hyper_parameters_model.get('filters_num', 300)  # 核数
        self.channel_size = hyper_parameters_model.get('channel_size', 1)  # 通道数
        self.dropout = hyper_parameters_model.get('dropout', 0.5)          # dropout层系数,舍弃
        self.decay_step = hyper_parameters_model.get('decay_step', 100)    # 衰减步数
        self.decay_rate = hyper_parameters_model.get('decay_rate', 0.9)    # 衰减系数
        self.epochs = hyper_parameters_model.get('epochs', 20)             # 训练轮次
        self.vocab_size = hyper_parameters_model.get('vocab_size', 20000)  # 字典词典大小
        self.lr = hyper_parameters_model.get('lr', 1e-3)                   # 学习率
        self.l2 = hyper_parameters_model.get('l2', 1e-6)                   # l2正则化系数
        self.activate_classify = hyper_parameters_model.get('activate_classify', 'softmax')  # 分类激活函数,softmax或者signod
        self.loss = hyper_parameters_model.get('loss', 'categorical_crossentropy') # 损失函数, mse, categorical_crossentropy, sparse_categorical_crossentropy, binary_crossentropy等
        self.metrics = hyper_parameters_model.get('metrics', 'accuracy') # acc, binary_accuracy, categorical_accuracy, sparse_categorical_accuracy, sparse_top_k_categorical_accuracy
        self.is_training = hyper_parameters_model.get('is_training', False)  # 是否训练, 保存时候为Flase,方便预测
        self.model_path = hyper_parameters_model.get('model_path', "model_path")  # 模型地址
        self.path_hyper_parameters = hyper_parameters_model.get('path_hyper_parameters', "path_hyper_parameters") # 超参数保存地址
        self.path_fineture = hyper_parameters_model.get('path_fineture', "path_fineture") # embedding层保存地址, 例如静态词向量、动态词向量、微调bert层等
        self.patience = hyper_parameters_model.get('patience', 3) # 早停, 2-3就可以了
        self.optimizer_name = hyper_parameters_model.get('optimizer_name', 'RAdam,Lookahead') # 早停, 2-3就可以了
        if self.gpu_memory_fraction:
            # keras, tensorflow控制GPU使用率等
            import tensorflow as tf
            config = tf.ConfigProto()
            config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
            # config.gpu_options.allow_growth = True
            sess = tf.Session(config=config)
            K.set_session(sess)
        self.create_model(hyper_parameters)
        if self.is_training: # 是否是训练阶段, 与预测区分开
            self.create_compile() 
Example 80
Project: lid_adversarial_subspace_detection   Author: xingjunm   File: craft_adv_examples.py    MIT License 4 votes vote down vote up
def main(args):
    assert args.dataset in ['mnist', 'cifar', 'svhn'], \
        "Dataset parameter must be either 'mnist', 'cifar' or 'svhn'"
    assert args.attack in ['fgsm', 'bim-a', 'bim-b', 'jsma', 'cw-l2', 'all', 'cw-lid'], \
        "Attack parameter must be either 'fgsm', 'bim-a', 'bim-b', " \
        "'jsma', 'cw-l2', 'all' or 'cw-lid' for attacking LID detector"
    model_file = os.path.join(PATH_DATA, "model_%s.h5" % args.dataset)
    assert os.path.isfile(model_file), \
        'model file not found... must first train model using train_model.py.'
    if args.dataset == 'svhn' and args.attack == 'cw-l2':
        assert args.batch_size == 16, \
        "svhn has 26032 test images, the batch_size for cw-l2 attack should be 16, " \
        "otherwise, there will be error at the last batch-- needs to be fixed."


    print('Dataset: %s. Attack: %s' % (args.dataset, args.attack))
    # Create TF session, set it as Keras backend
    sess = tf.Session()
    K.set_session(sess)
    if args.attack == 'cw-l2' or args.attack == 'cw-lid':
        warnings.warn("Important: remove the softmax layer for cw attacks!")
        # use softmax=False to load without softmax layer
        model = get_model(args.dataset, softmax=False)
        model.compile(
            loss=cross_entropy,
            optimizer='adadelta',
            metrics=['accuracy']
        )
        model.load_weights(model_file)
    else:
        model = load_model(model_file)

    _, _, X_test, Y_test = get_data(args.dataset)
    _, acc = model.evaluate(X_test, Y_test, batch_size=args.batch_size,
                            verbose=0)
    print("Accuracy on the test set: %0.2f%%" % (100*acc))

    if args.attack == 'cw-lid': # white box attacking LID detector - an example
        X_test = X_test[:1000]
        Y_test = Y_test[:1000]

    if args.attack == 'all':
        # Cycle through all attacks
        for attack in ['fgsm', 'bim-a', 'bim-b', 'jsma', 'cw-l2']:
            craft_one_type(sess, model, X_test, Y_test, args.dataset, attack,
                           args.batch_size)
    else:
        # Craft one specific attack type
        craft_one_type(sess, model, X_test, Y_test, args.dataset, args.attack,
                       args.batch_size)
    print('Adversarial samples crafted and saved to %s ' % PATH_DATA)
    sess.close()