Python tensorflow.ConfigProto() Examples
The following are 30
code examples of tensorflow.ConfigProto().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.

Example #1
Source File: agents.py From soccer-matlab with BSD 2-Clause "Simplified" License | 7 votes |
def __init__(self): self.session = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False)) self.actor = networks.Actor_MLP(scope="actor1",units=[settings.S_DIM,100,settings.A_DIM],activations=[None,'relu','tanh'],trainable=True) self.old_actor = networks.Actor_MLP(scope="actor0",units=[settings.S_DIM,100,settings.A_DIM],activations=[None,'relu','tanh'],trainable=False) self.critic = networks.Critic_MLP(scope="critic1",units=[settings.S_DIM,100,1],activations=[None,'relu',None],trainable=True) self.state_tf = tf.placeholder(dtype=tf.float32,shape=[None,settings.S_DIM]) self.action_tf = tf.placeholder(dtype=tf.float32,shape=[None,settings.A_DIM]) self.return_tf = tf.placeholder(dtype=tf.float32,shape=[None,1]) self.adv_tf = tf.placeholder(dtype=tf.float32,shape=[None,1]) # global steps to keep track of training self.actor_step = tf.get_variable('actor_global_step', [], initializer=tf.constant_initializer(0), trainable=False) self.critic_step = tf.get_variable('critic_global_step', [], initializer=tf.constant_initializer(0), trainable=False) # build computation graphs self.actor.build_graph(self.state_tf,self.actor_step) self.old_actor.build_graph(self.state_tf,0) self.critic.build_graph(self.state_tf,self.critic_step) self.build_graph()
Example #2
Source File: tfutil.py From disentangling_conditional_gans with MIT License | 6 votes |
def create_session(config_dict=dict(), force_as_default=False): config = tf.ConfigProto() for key, value in config_dict.items(): fields = key.split('.') obj = config for field in fields[:-1]: obj = getattr(obj, field) setattr(obj, fields[-1], value) session = tf.Session(config=config) if force_as_default: session._default_session = session.as_default() session._default_session.enforce_nesting = False session._default_session.__enter__() return session #---------------------------------------------------------------------------- # Initialize all tf.Variables that have not already been initialized. # Equivalent to the following, but more efficient and does not bloat the tf graph: # tf.variables_initializer(tf.report_unitialized_variables()).run()
Example #3
Source File: model_based_policy.py From cs294-112_hws with MIT License | 6 votes |
def _setup_graph(self): """ Sets up the tensorflow computation graph for training, prediction, and action selection The variables returned will be set as class attributes (see __init__) """ tf_config = tf.ConfigProto() tf_config.gpu_options.allow_growth = True sess = tf.Session(config=tf_config) ### PROBLEM 1 ### YOUR CODE HERE state_ph, action_ph, next_state_ph = self._setup_placeholders() next_state_pred = self._dynamics_func(state_ph, action_ph, False) loss, optimizer = self._setup_training(state_ph, next_state_ph, next_state_pred) ### PROBLEM 2 ### YOUR CODE HERE best_action = self._setup_action_selection(state_ph) sess.run(tf.global_variables_initializer()) return sess, state_ph, action_ph, next_state_ph, \ next_state_pred, loss, optimizer, best_action
Example #4
Source File: trainer_lib.py From fine-lm with MIT License | 6 votes |
def create_session_config(log_device_placement=False, enable_graph_rewriter=False, gpu_mem_fraction=0.95, use_tpu=False, inter_op_parallelism_threads=0, intra_op_parallelism_threads=0): """The TensorFlow Session config to use.""" if use_tpu: graph_options = tf.GraphOptions() else: if enable_graph_rewriter: rewrite_options = rewriter_config_pb2.RewriterConfig() rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON graph_options = tf.GraphOptions(rewrite_options=rewrite_options) else: graph_options = tf.GraphOptions( optimizer_options=tf.OptimizerOptions( opt_level=tf.OptimizerOptions.L1, do_function_inlining=False)) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_fraction) config = tf.ConfigProto( allow_soft_placement=True, graph_options=graph_options, gpu_options=gpu_options, log_device_placement=log_device_placement, inter_op_parallelism_threads=inter_op_parallelism_threads, intra_op_parallelism_threads=intra_op_parallelism_threads) return config
Example #5
Source File: run_mujoco.py From lirpg with MIT License | 6 votes |
def train(env_id, num_timesteps, seed): env = make_mujoco_env(env_id, seed) with tf.Session(config=tf.ConfigProto()): ob_dim = env.observation_space.shape[0] ac_dim = env.action_space.shape[0] with tf.variable_scope("vf"): vf = NeuralNetValueFunction(ob_dim, ac_dim) with tf.variable_scope("pi"): policy = GaussianMlpPolicy(ob_dim, ac_dim) learn(env, policy=policy, vf=vf, gamma=0.99, lam=0.97, timesteps_per_batch=2500, desired_kl=0.002, num_timesteps=num_timesteps, animate=False) env.close()
Example #6
Source File: run_atari.py From lirpg with MIT License | 6 votes |
def train(env_id, num_timesteps, seed, policy): ncpu = multiprocessing.cpu_count() if sys.platform == 'darwin': ncpu //= 2 config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=ncpu, inter_op_parallelism_threads=ncpu) config.gpu_options.allow_growth = True #pylint: disable=E1101 tf.Session(config=config).__enter__() env = VecFrameStack(make_atari_env(env_id, 8, seed), 4) policy = {'cnn' : CnnPolicy, 'lstm' : LstmPolicy, 'lnlstm' : LnLstmPolicy}[policy] ppo2.learn(policy=policy, env=env, nsteps=128, nminibatches=4, lam=0.95, gamma=0.99, noptepochs=4, log_interval=1, ent_coef=.01, lr=lambda f : f * 2.5e-4, cliprange=lambda f : f * 0.1, total_timesteps=int(num_timesteps * 1.1))
Example #7
Source File: run_mujoco.py From HardRLWithYoutube with MIT License | 6 votes |
def train(env_id, num_timesteps, seed): env = make_mujoco_env(env_id, seed) with tf.Session(config=tf.ConfigProto()): ob_dim = env.observation_space.shape[0] ac_dim = env.action_space.shape[0] with tf.variable_scope("vf"): vf = NeuralNetValueFunction(ob_dim, ac_dim) with tf.variable_scope("pi"): policy = GaussianMlpPolicy(ob_dim, ac_dim) learn(env, policy=policy, vf=vf, gamma=0.99, lam=0.97, timesteps_per_batch=2500, desired_kl=0.002, num_timesteps=num_timesteps, animate=False) env.close()
Example #8
Source File: nn_model.py From mercari-price-suggestion with MIT License | 6 votes |
def __init__(self, train_df, word_count, batch_size, epochs): tf.set_random_seed(4) session_conf = tf.ConfigProto(intra_op_parallelism_threads=2, inter_op_parallelism_threads=8) backend.set_session(tf.Session(graph=tf.get_default_graph(), config=session_conf)) self.batch_size = batch_size self.epochs = epochs self.max_name_seq = 10 self.max_item_desc_seq = 75 self.max_text = word_count + 1 self.max_brand = np.max(train_df.brand_name.max()) + 1 self.max_condition = np.max(train_df.item_condition_id.max()) + 1 self.max_subcat0 = np.max(train_df.subcat_0.max()) + 1 self.max_subcat1 = np.max(train_df.subcat_1.max()) + 1 self.max_subcat2 = np.max(train_df.subcat_2.max()) + 1
Example #9
Source File: ensemble_gpu.py From kaggle-carvana-2017 with MIT License | 6 votes |
def predictor(q, gpu, pq): config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) with sess.as_default(): model = create_model(gpu) while True: batch_fnames, x_batch = q.get() if x_batch is None: break preds = model.predict_on_batch(x_batch) for i, pred in enumerate(preds): filename = batch_fnames[i] pq.put((os.path.join(ensembling_dir, filename[:-4] + ".png"), pred))
Example #10
Source File: predict_multithreaded.py From kaggle-carvana-2017 with MIT License | 6 votes |
def predictor(q, gpu): config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) with sess.as_default(): model = create_model(gpu) while True: batch_fnames, x_batch = q.get() if x_batch is None: break preds = model.predict_on_batch(x_batch) if args.pred_tta: preds = undo_tta(preds, args.pred_tta) for i, pred in enumerate(preds): filename = batch_fnames[i] prediction = pred[:, 1:-1, :] array_to_img(prediction * 255).save(os.path.join(output_dir, filename.split('/')[-1][:-4] + ".png"))
Example #11
Source File: chptToBin.py From iAI with MIT License | 6 votes |
def chpt_to_dict_arrays_simple(file_name): """ Convert a checkpoint into into a dictionary of numpy arrays for later use in TensorRT NMT sample. """ config = tf.ConfigProto(allow_soft_placement=True) sess = tf.Session(config=config) saver = tf.train.import_meta_graph(file_name) dir_name = os.path.dirname(os.path.abspath(file_name)) saver.restore(sess, tf.train.latest_checkpoint(dir_name)) params = {} print ('\nFound the following trainable variables:') with sess.as_default(): variables = tf.trainable_variables() for v in variables: params[v.name] = v.eval(session=sess) print ("{0} {1}".format(v.name, params[v.name].shape)) #use default value params["forget_bias"] = 1.0 return params
Example #12
Source File: chptToBin.py From iAI with MIT License | 6 votes |
def chpt_to_dict_arrays_simple(file_name): """ Convert a checkpoint into into a dictionary of numpy arrays for later use in TensorRT NMT sample. """ config = tf.ConfigProto(allow_soft_placement=True) sess = tf.Session(config=config) saver = tf.train.import_meta_graph(file_name) dir_name = os.path.dirname(os.path.abspath(file_name)) saver.restore(sess, tf.train.latest_checkpoint(dir_name)) params = {} print ('\nFound the following trainable variables:') with sess.as_default(): variables = tf.trainable_variables() for v in variables: params[v.name] = v.eval(session=sess) print ("{0} {1}".format(v.name, params[v.name].shape)) #use default value params["forget_bias"] = 1.0 return params
Example #13
Source File: chptToBin.py From iAI with MIT License | 6 votes |
def chpt_to_dict_arrays_simple(file_name): """ Convert a checkpoint into into a dictionary of numpy arrays for later use in TensorRT NMT sample. """ config = tf.ConfigProto(allow_soft_placement=True) sess = tf.Session(config=config) saver = tf.train.import_meta_graph(file_name) dir_name = os.path.dirname(os.path.abspath(file_name)) saver.restore(sess, tf.train.latest_checkpoint(dir_name)) params = {} print ('\nFound the following trainable variables:') with sess.as_default(): variables = tf.trainable_variables() for v in variables: params[v.name] = v.eval(session=sess) print ("{0} {1}".format(v.name, params[v.name].shape)) #use default value params["forget_bias"] = 1.0 return params
Example #14
Source File: mdbt.py From ConvLab with MIT License | 6 votes |
def test_update(): os.environ["CUDA_VISIBLE_DEVICES"] = '0' _config = tf.ConfigProto() _config.gpu_options.allow_growth = True _config.allow_soft_placement = True start_time = time.time() mdbt = MDBTTracker() print('\tMDBT: model build time: {:.2f} seconds'.format(time.time() - start_time)) saver = tf.train.Saver() mdbt.restore_model(mdbt.sess, saver) # demo state history mdbt.state['history'] = [['null', 'I\'m trying to find an expensive restaurant in the centre part of town.'], [ 'The Cambridge Chop House is an good expensive restaurant in the centre of town. Would you like me to book it for you?', 'Yes, a table for 1 at 16:15 on sunday. I need the reference number.']] new_state = mdbt.update(None, 'hi, this is not good') print(json.dumps(new_state, indent=4)) print('all time: {:.2f} seconds'.format(time.time() - start_time))
Example #15
Source File: reaction.py From armchair-expert with MIT License | 6 votes |
def __init__(self, path: str = None, use_gpu=False): import tensorflow as tf from keras.models import Sequential from keras.layers import Dense from keras.backend import set_session self.model = Sequential() self.model.add(Dense(AOLReactionFeatureAnalyzer.NUM_FEATURES, activation='relu', input_dim=AOLReactionFeatureAnalyzer.NUM_FEATURES)) self.model.add(Dense(AOLReactionFeatureAnalyzer.NUM_FEATURES - 2, activation='relu')) self.model.add(Dense(1, activation='sigmoid')) self.model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) if use_gpu: config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config))
Example #16
Source File: structure.py From armchair-expert with MIT License | 6 votes |
def __init__(self, use_gpu: bool = False): import tensorflow as tf from keras.models import Sequential from keras.layers import Dense, Embedding from keras.layers import LSTM from keras.backend import set_session latent_dim = StructureModel.SEQUENCE_LENGTH * 8 model = Sequential() model.add( Embedding(StructureFeatureAnalyzer.NUM_FEATURES, StructureFeatureAnalyzer.NUM_FEATURES, input_length=StructureModel.SEQUENCE_LENGTH)) model.add(LSTM(latent_dim, dropout=0.2, return_sequences=False)) model.add(Dense(StructureFeatureAnalyzer.NUM_FEATURES, activation='softmax')) model.summary() model.compile(loss='sparse_categorical_crossentropy', optimizer='adam') self.model = model if use_gpu: config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config))
Example #17
Source File: interface.py From Generative-adversarial-Nets-in-NLP with Apache License 2.0 | 6 votes |
def train(self): # Construct model model = Transformer() print("Graph loaded") init = tf.global_variables_initializer() config = tf.ConfigProto() config.gpu_options.allow_growth = True # Start training sv = tf.train.Supervisor(logdir=pm.logdir, save_model_secs=0, init_op=init) saver = sv.saver with sv.managed_session(config=config) as sess: for epoch in range(1, pm.num_epochs + 1): if sv.should_stop(): break for _ in tqdm(range(model.num_batch), total=model.num_batch, ncols=70, leave=False, unit='b'): sess.run(model.optimizer) gs = sess.run(model.global_step) saver.save(sess, pm.logdir + '/model_epoch_{}_global_step_{}'.format(epoch, gs)) print("MSG : Done for training!")
Example #18
Source File: audio_feature_extractor.py From Tensorflow-Audio-Classification with Apache License 2.0 | 6 votes |
def __init__(self, checkpoint, pca_params, input_tensor_name, output_tensor_name): """Create a new Graph and a new Session for every VGGishExtractor object.""" super(VGGishExtractor, self).__init__() self.graph = tf.Graph() with self.graph.as_default(): vggish_slim.define_vggish_slim(training=False) sess_config = tf.ConfigProto(allow_soft_placement=True) sess_config.gpu_options.allow_growth = True self.sess = tf.Session(graph=self.graph, config=sess_config) vggish_slim.load_defined_vggish_slim_checkpoint(self.sess, checkpoint) # use the self.sess to init others self.input_tensor = self.graph.get_tensor_by_name(input_tensor_name) self.output_tensor = self.graph.get_tensor_by_name(output_tensor_name) # postprocessor self.postprocess = vggish_postprocess.Postprocessor(pca_params)
Example #19
Source File: server.py From convseg with MIT License | 5 votes |
def make_app(model_dir): config = tf.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 1.0 config.allow_soft_placement = True config.log_device_placement = True sess = tf.Session(config=config) tagger = Tagger(sess=sess, model_dir=model_dir, scope=TASK.scope, batch_size=200) return tornado.web.Application([ (r"/", MainHandler), (r"/%s" % TASK.scope, TaskHandler, {'tagger': tagger}) ])
Example #20
Source File: build.py From Traffic_sign_detection_YOLO with MIT License | 5 votes |
def setup_meta_ops(self): cfg = dict({ 'allow_soft_placement': False, 'log_device_placement': False }) utility = min(self.FLAGS.gpu, 1.) if utility > 0.0: self.say('GPU mode with {} usage'.format(utility)) cfg['gpu_options'] = tf.GPUOptions( per_process_gpu_memory_fraction = utility) cfg['allow_soft_placement'] = True else: self.say('Running entirely on CPU') cfg['device_count'] = {'GPU': 0} if self.FLAGS.train: self.build_train_op() if self.FLAGS.summary: self.summary_op = tf.summary.merge_all() self.writer = tf.summary.FileWriter(self.FLAGS.summary + 'train') self.sess = tf.Session(config = tf.ConfigProto(**cfg)) self.sess.run(tf.global_variables_initializer()) if not self.ntrain: return self.saver = tf.train.Saver(tf.global_variables(), max_to_keep = self.FLAGS.keep) if self.FLAGS.load != 0: self.load_from_ckpt() if self.FLAGS.summary: self.writer.add_graph(self.sess.graph)
Example #21
Source File: face_attack.py From Adversarial-Face-Attack with GNU General Public License v3.0 | 5 votes |
def __init__(self): self.graph = tf.Graph() with self.graph.as_default(): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) with sess.as_default(): self.pnet, self.rnet, self.onet = FaceDet.create_mtcnn(sess, None)
Example #22
Source File: trainer.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _init_session(self): # Set TF random seed to improve reproducibility self.rng = np.random.RandomState([2017, 8, 30]) tf.set_random_seed(1234) # Create TF session self.sess = tf.Session( config=tf.ConfigProto(allow_soft_placement=True)) # Object used to keep track of (and return) key accuracies if self.hparams.save: self.writer = tf.summary.FileWriter(self.hparams.save_dir, flush_secs=10) else: self.writer = None
Example #23
Source File: DeepFM.py From tensorflow-DeepFM with MIT License | 5 votes |
def _init_session(self): config = tf.ConfigProto(device_count={"gpu": 0}) config.gpu_options.allow_growth = True return tf.Session(config=config)
Example #24
Source File: test.py From Sound-Recognition-Tutorial with Apache License 2.0 | 5 votes |
def use_gpu(): """Configuration for GPU""" from keras.backend.tensorflow_backend import set_session os.environ['CUDA_VISIBLE_DEVICES'] = str(0) config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.5 config.gpu_options.allow_growth = True set_session(tf.InteractiveSession(config=config))
Example #25
Source File: train.py From Sound-Recognition-Tutorial with Apache License 2.0 | 5 votes |
def use_gpu(): """Configuration for GPU""" from keras.backend.tensorflow_backend import set_session os.environ['CUDA_VISIBLE_DEVICES'] = str(0) # 使用第一台GPU config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.5 # GPU使用率为50% config.gpu_options.allow_growth = True # 允许容量增长 set_session(tf.InteractiveSession(config=config))
Example #26
Source File: seq2seq_attention_decode.py From DOTA_models with Apache License 2.0 | 5 votes |
def DecodeLoop(self): """Decoding loop for long running process.""" sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) step = 0 while step < FLAGS.max_decode_steps: time.sleep(DECODE_LOOP_DELAY_SECS) if not self._Decode(self._saver, sess): continue step += 1
Example #27
Source File: seq2seq_attention.py From DOTA_models with Apache License 2.0 | 5 votes |
def _Train(model, data_batcher): """Runs model training.""" with tf.device('/cpu:0'): model.build_graph() saver = tf.train.Saver() # Train dir is different from log_root to avoid summary directory # conflict with Supervisor. summary_writer = tf.summary.FileWriter(FLAGS.train_dir) sv = tf.train.Supervisor(logdir=FLAGS.log_root, is_chief=True, saver=saver, summary_op=None, save_summaries_secs=60, save_model_secs=FLAGS.checkpoint_secs, global_step=model.global_step) sess = sv.prepare_or_wait_for_session(config=tf.ConfigProto( allow_soft_placement=True)) running_avg_loss = 0 step = 0 while not sv.should_stop() and step < FLAGS.max_run_steps: (article_batch, abstract_batch, targets, article_lens, abstract_lens, loss_weights, _, _) = data_batcher.NextBatch() (_, summaries, loss, train_step) = model.run_train_step( sess, article_batch, abstract_batch, targets, article_lens, abstract_lens, loss_weights) summary_writer.add_summary(summaries, train_step) running_avg_loss = _RunningAvgLoss( running_avg_loss, loss, summary_writer, train_step) step += 1 if step % 100 == 0: summary_writer.flush() sv.Stop() return running_avg_loss
Example #28
Source File: seq2seq_attention.py From DOTA_models with Apache License 2.0 | 5 votes |
def _Eval(model, data_batcher, vocab=None): """Runs model eval.""" model.build_graph() saver = tf.train.Saver() summary_writer = tf.summary.FileWriter(FLAGS.eval_dir) sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) running_avg_loss = 0 step = 0 while True: time.sleep(FLAGS.eval_interval_secs) try: ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) except tf.errors.OutOfRangeError as e: tf.logging.error('Cannot restore checkpoint: %s', e) continue if not (ckpt_state and ckpt_state.model_checkpoint_path): tf.logging.info('No model to eval yet at %s', FLAGS.train_dir) continue tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path) saver.restore(sess, ckpt_state.model_checkpoint_path) (article_batch, abstract_batch, targets, article_lens, abstract_lens, loss_weights, _, _) = data_batcher.NextBatch() (summaries, loss, train_step) = model.run_eval_step( sess, article_batch, abstract_batch, targets, article_lens, abstract_lens, loss_weights) tf.logging.info( 'article: %s', ' '.join(data.Ids2Words(article_batch[0][:].tolist(), vocab))) tf.logging.info( 'abstract: %s', ' '.join(data.Ids2Words(abstract_batch[0][:].tolist(), vocab))) summary_writer.add_summary(summaries, train_step) running_avg_loss = _RunningAvgLoss( running_avg_loss, loss, summary_writer, train_step) if step % 100 == 0: summary_writer.flush()
Example #29
Source File: alexnet_benchmark.py From DOTA_models with Apache License 2.0 | 5 votes |
def run_benchmark(): """Run the benchmark on AlexNet.""" with tf.Graph().as_default(): # Generate some dummy images. image_size = 224 # Note that our padding definition is slightly different the cuda-convnet. # In order to force the model to start with the same activations sizes, # we add 3 to the image_size and employ VALID padding above. images = tf.Variable(tf.random_normal([FLAGS.batch_size, image_size, image_size, 3], dtype=tf.float32, stddev=1e-1)) # Build a Graph that computes the logits predictions from the # inference model. pool5, parameters = inference(images) # Build an initialization operation. init = tf.global_variables_initializer() # Start running operations on the Graph. config = tf.ConfigProto() config.gpu_options.allocator_type = 'BFC' sess = tf.Session(config=config) sess.run(init) # Run the forward benchmark. time_tensorflow_run(sess, pool5, "Forward") # Add a simple objective so we can calculate the backward pass. objective = tf.nn.l2_loss(pool5) # Compute the gradient with respect to all the parameters. grad = tf.gradients(objective, parameters) # Run the backward benchmark. time_tensorflow_run(sess, grad, "Forward-backward")
Example #30
Source File: eval.py From DOTA_models with Apache License 2.0 | 5 votes |
def main(_): if not tf.gfile.Exists(FLAGS.eval_log_dir): tf.gfile.MakeDirs(FLAGS.eval_log_dir) dataset = common_flags.create_dataset(split_name=FLAGS.split_name) model = common_flags.create_model(dataset.num_char_classes, dataset.max_sequence_length, dataset.num_of_views, dataset.null_code) data = data_provider.get_data( dataset, FLAGS.batch_size, augment=False, central_crop_size=common_flags.get_crop_size()) endpoints = model.create_base(data.images, labels_one_hot=None) model.create_loss(data, endpoints) eval_ops = model.create_summaries( data, endpoints, dataset.charset, is_training=False) slim.get_or_create_global_step() session_config = tf.ConfigProto(device_count={"GPU": 0}) slim.evaluation.evaluation_loop( master=FLAGS.master, checkpoint_dir=FLAGS.train_log_dir, logdir=FLAGS.eval_log_dir, eval_op=eval_ops, num_evals=FLAGS.num_batches, eval_interval_secs=FLAGS.eval_interval_secs, max_number_of_evaluations=FLAGS.number_of_steps, session_config=session_config)