Python tensorflow.device() Examples
The following are 30
code examples of tensorflow.device().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: show_and_tell_model.py From DOTA_models with Apache License 2.0 | 6 votes |
def build_seq_embeddings(self): """Builds the input sequence embeddings. Inputs: self.input_seqs Outputs: self.seq_embeddings """ with tf.variable_scope("seq_embedding"), tf.device("/cpu:0"): embedding_map = tf.get_variable( name="map", shape=[self.config.vocab_size, self.config.embedding_size], initializer=self.initializer) seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.input_seqs) self.seq_embeddings = seq_embeddings
Example #2
Source File: tfutil.py From disentangling_conditional_gans with MIT License | 6 votes |
def undo_loss_scaling(self, value): assert is_tf_expression(value) if not self.use_loss_scaling: return value return value * exp2(-self.get_loss_scaling_var(value.device)) #---------------------------------------------------------------------------- # Generic network abstraction. # # Acts as a convenience wrapper for a parameterized network construction # function, providing several utility methods and convenient access to # the inputs/outputs/weights. # # Network objects can be safely pickled and unpickled for long-term # archival purposes. The pickling works reliably as long as the underlying # network construction function is defined in a standalone Python module # that has no side effects or application-specific imports.
Example #3
Source File: tfutil.py From disentangling_conditional_gans with MIT License | 6 votes |
def finalize_autosummaries(): global _autosummary_finalized if _autosummary_finalized: return _autosummary_finalized = True init_uninited_vars([var for vars in _autosummary_vars.values() for var in vars]) with tf.device(None), tf.control_dependencies(None): for name, vars in _autosummary_vars.items(): id = name.replace('/', '_') with absolute_name_scope('Autosummary/' + id): sum = tf.add_n(vars) avg = sum[0] / sum[1] with tf.control_dependencies([avg]): # read before resetting reset_ops = [tf.assign(var, tf.zeros(2)) for var in vars] with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting tf.summary.scalar(name, avg) # Internal helper for creating autosummary accumulators.
Example #4
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testPS(self): deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1) self.assertDeviceEqual(deploy_config.clone_device(0), '/job:worker/device:GPU:0') self.assertEqual(deploy_config.clone_scope(0), '') self.assertDeviceEqual(deploy_config.optimizer_device(), '/job:worker/device:CPU:0') self.assertDeviceEqual(deploy_config.inputs_device(), '/job:worker/device:CPU:0') with tf.device(deploy_config.variables_device()): a = tf.Variable(0) b = tf.Variable(0) c = tf.no_op() d = slim.variable('a', [], caching_device=deploy_config.caching_device()) self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0') self.assertDeviceEqual(a.device, a.value().device) self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0') self.assertDeviceEqual(b.device, b.value().device) self.assertDeviceEqual(c.device, '') self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0') self.assertDeviceEqual(d.value().device, '')
Example #5
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testVariablesPS(self): deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2) with tf.device(deploy_config.variables_device()): a = tf.Variable(0) b = tf.Variable(0) c = tf.no_op() d = slim.variable('a', [], caching_device=deploy_config.caching_device()) self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0') self.assertDeviceEqual(a.device, a.value().device) self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0') self.assertDeviceEqual(b.device, b.value().device) self.assertDeviceEqual(c.device, '') self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0') self.assertDeviceEqual(d.value().device, '')
Example #6
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateLogisticClassifier(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = LogisticClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) clone = clones[0] self.assertEqual(len(slim.get_variables()), 2) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(clone.outputs.op.name, 'LogisticClassifier/fully_connected/Sigmoid') self.assertEqual(clone.scope, '') self.assertDeviceEqual(clone.device, 'GPU:0') self.assertEqual(len(slim.losses.get_losses()), 1) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(update_ops, [])
Example #7
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateSingleclone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) clone = clones[0] self.assertEqual(len(slim.get_variables()), 5) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(clone.outputs.op.name, 'BatchNormClassifier/fully_connected/Sigmoid') self.assertEqual(clone.scope, '') self.assertDeviceEqual(clone.device, 'GPU:0') self.assertEqual(len(slim.losses.get_losses()), 1) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 2)
Example #8
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateOnecloneWithPS(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(clones), 1) clone = clones[0] self.assertEqual(clone.outputs.op.name, 'BatchNormClassifier/fully_connected/Sigmoid') self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:0') self.assertEqual(clone.scope, '') self.assertEqual(len(slim.get_variables()), 5) for v in slim.get_variables(): self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0') self.assertDeviceEqual(v.device, v.value().device)
Example #9
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateLogisticClassifier(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = LogisticClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 2) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(update_ops, []) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, 'GPU:0') self.assertDeviceEqual(v.device, 'CPU:0')
Example #10
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testCreateSingleclone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 5) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 2) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) total_loss, grads_and_vars = model_deploy.optimize_clones(clones, optimizer) self.assertEqual(len(grads_and_vars), len(tf.trainable_variables())) self.assertEqual(total_loss.op.name, 'total_loss') for g, v in grads_and_vars: self.assertDeviceEqual(g.device, 'GPU:0') self.assertDeviceEqual(v.device, 'CPU:0')
Example #11
Source File: model_deploy_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testNoSummariesOnGPUForEvals(self): with tf.Graph().as_default(): deploy_config = model_deploy.DeploymentConfig(num_clones=2) # clone function creates a fully_connected layer with a regularizer loss. def ModelFn(): inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32) reg = tf.contrib.layers.l2_regularizer(0.001) tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg) # No optimizer here, it's an eval. model = model_deploy.deploy(deploy_config, ModelFn) # The model summary op should have a few summary inputs and all of them # should be on the CPU. self.assertTrue(model.summary_op.op.inputs) for inp in model.summary_op.op.inputs: self.assertEqual('/device:CPU:0', inp.device)
Example #12
Source File: model_deploy.py From DOTA_models with Apache License 2.0 | 6 votes |
def _optimize_clone(optimizer, clone, num_clones, regularization_losses, **kwargs): """Compute losses and gradients for a single clone. Args: optimizer: A tf.Optimizer object. clone: A Clone namedtuple. num_clones: The number of clones being deployed. regularization_losses: Possibly empty list of regularization_losses to add to the clone losses. **kwargs: Dict of kwarg to pass to compute_gradients(). Returns: A tuple (clone_loss, clone_grads_and_vars). - clone_loss: A tensor for the total loss for the clone. Can be None. - clone_grads_and_vars: List of (gradient, variable) for the clone. Can be empty. """ sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses) clone_grad = None if sum_loss is not None: with tf.device(clone.device): clone_grad = optimizer.compute_gradients(sum_loss, **kwargs) return sum_loss, clone_grad
Example #13
Source File: word2vec_optimized.py From DOTA_models with Apache License 2.0 | 6 votes |
def main(_): """Train a word2vec model.""" if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path: print("--train_data --eval_data and --save_path must be specified.") sys.exit(1) opts = Options() with tf.Graph().as_default(), tf.Session() as session: with tf.device("/cpu:0"): model = Word2Vec(opts, session) model.read_analogies() # Read analogy questions for _ in xrange(opts.epochs_to_train): model.train() # Process one epoch model.eval() # Eval analogies. # Perform a final save. model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"), global_step=model.global_step) if FLAGS.interactive: # E.g., # [0]: model.analogy(b'france', b'paris', b'russia') # [1]: model.nearby([b'proton', b'elephant', b'maxwell']) _start_shell(locals())
Example #14
Source File: word2vec.py From DOTA_models with Apache License 2.0 | 6 votes |
def main(_): """Train a word2vec model.""" if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path: print("--train_data --eval_data and --save_path must be specified.") sys.exit(1) opts = Options() with tf.Graph().as_default(), tf.Session() as session: with tf.device("/cpu:0"): model = Word2Vec(opts, session) model.read_analogies() # Read analogy questions for _ in xrange(opts.epochs_to_train): model.train() # Process one epoch model.eval() # Eval analogies. # Perform a final save. model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"), global_step=model.global_step) if FLAGS.interactive: # E.g., # [0]: model.analogy(b'france', b'paris', b'russia') # [1]: model.nearby([b'proton', b'elephant', b'maxwell']) _start_shell(locals())
Example #15
Source File: memory.py From DOTA_models with Apache License 2.0 | 6 votes |
def get_hint_pool_idxs(self, normalized_query): """Get small set of idxs to compute nearest neighbor queries on. This is an expensive look-up on the whole memory that is used to avoid more expensive operations later on. Args: normalized_query: A Tensor of shape [None, key_dim]. Returns: A Tensor of shape [None, choose_k] of indices in memory that are closest to the queries. """ # look up in large memory, no gradients with tf.device(self.nn_device): similarities = tf.matmul(tf.stop_gradient(normalized_query), self.mem_keys, transpose_b=True, name='nn_mmul') _, hint_pool_idxs = tf.nn.top_k( tf.stop_gradient(similarities), k=self.choose_k, name='nn_topk') return hint_pool_idxs
Example #16
Source File: variables_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testVariableWithVariableDeviceChooser(self): with tf.Graph().as_default(): device_fn = variables.VariableDeviceChooser(num_parameter_servers=2) with scopes.arg_scope([variables.variable], device=device_fn): a = variables.variable('a', []) b = variables.variable('b', []) c = variables.variable('c', [], device='cpu:12') d = variables.variable('d', []) with tf.device('cpu:99'): e_init = tf.constant(12) e = variables.variable('e', initializer=e_init) # The values below highlight how the VariableDeviceChooser puts initial # values on the same device as the variable job. self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0') self.assertDeviceEqual(a.initial_value.device, a.device) self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0') self.assertDeviceEqual(b.initial_value.device, b.device) self.assertDeviceEqual(c.device, '/cpu:12') self.assertDeviceEqual(c.initial_value.device, c.device) self.assertDeviceEqual(d.device, '/job:ps/task:0/cpu:0') self.assertDeviceEqual(d.initial_value.device, d.device) self.assertDeviceEqual(e.device, '/job:ps/task:1/cpu:0') self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
Example #17
Source File: variables_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testVariableGPUPlacement(self): with tf.Graph().as_default(): device_fn = variables.VariableDeviceChooser(placement='gpu:0') with scopes.arg_scope([variables.variable], device=device_fn): a = variables.variable('a', []) b = variables.variable('b', []) c = variables.variable('c', [], device='cpu:12') d = variables.variable('d', []) with tf.device('cpu:99'): e_init = tf.constant(12) e = variables.variable('e', initializer=e_init) # The values below highlight how the VariableDeviceChooser puts initial # values on the same device as the variable job. self.assertDeviceEqual(a.device, '/gpu:0') self.assertDeviceEqual(a.initial_value.device, a.device) self.assertDeviceEqual(b.device, '/gpu:0') self.assertDeviceEqual(b.initial_value.device, b.device) self.assertDeviceEqual(c.device, '/cpu:12') self.assertDeviceEqual(c.initial_value.device, c.device) self.assertDeviceEqual(d.device, '/gpu:0') self.assertDeviceEqual(d.initial_value.device, d.device) self.assertDeviceEqual(e.device, '/gpu:0') self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
Example #18
Source File: variables_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testDeviceFn(self): class DevFn(object): def __init__(self): self.counter = -1 def __call__(self, op): self.counter += 1 return '/cpu:%d' % self.counter with tf.Graph().as_default(): with scopes.arg_scope([variables.global_step], device=DevFn()): gs = variables.global_step() gs2 = variables.global_step() self.assertDeviceEqual(gs.device, '/cpu:0') self.assertEquals(gs, gs2) self.assertDeviceEqual(gs2.device, '/cpu:0')
Example #19
Source File: variables.py From DOTA_models with Apache License 2.0 | 6 votes |
def __init__(self, num_parameter_servers=0, ps_device='/job:ps', placement='CPU:0'): """Initialize VariableDeviceChooser. Args: num_parameter_servers: number of parameter servers. ps_device: string representing the parameter server device. placement: string representing the placement of the variable either CPU:0 or GPU:0. When using parameter servers forced to CPU:0. """ self._num_ps = num_parameter_servers self._ps_device = ps_device self._placement = placement if num_parameter_servers == 0 else 'CPU:0' self._next_task_id = 0
Example #20
Source File: variables.py From DOTA_models with Apache License 2.0 | 6 votes |
def global_step(device=''): """Returns the global step variable. Args: device: Optional device to place the variable. It can be an string or a function that is called to get the device for the variable. Returns: the tensor representing the global step variable. """ global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP) if global_step_ref: return global_step_ref[0] else: collections = [ VARIABLES_TO_RESTORE, tf.GraphKeys.GLOBAL_VARIABLES, tf.GraphKeys.GLOBAL_STEP, ] # Get the device for the variable. with tf.device(variable_device(device, 'global_step')): return tf.get_variable('global_step', shape=[], dtype=tf.int64, initializer=tf.zeros_initializer(), trainable=False, collections=collections)
Example #21
Source File: seq2seq_attention_model.py From DOTA_models with Apache License 2.0 | 6 votes |
def _add_train_op(self): """Sets self._train_op, op to run for training.""" hps = self._hps self._lr_rate = tf.maximum( hps.min_lr, # min_lr_rate. tf.train.exponential_decay(hps.lr, self.global_step, 30000, 0.98)) tvars = tf.trainable_variables() with tf.device(self._get_gpu(self._num_gpus-1)): grads, global_norm = tf.clip_by_global_norm( tf.gradients(self._loss, tvars), hps.max_grad_norm) tf.summary.scalar('global_norm', global_norm) optimizer = tf.train.GradientDescentOptimizer(self._lr_rate) tf.summary.scalar('learning rate', self._lr_rate) self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=self.global_step, name='train_step')
Example #22
Source File: tfutil.py From disentangling_conditional_gans with MIT License | 6 votes |
def autosummary(name, value): id = name.replace('/', '_') if is_tf_expression(value): with tf.name_scope('summary_' + id), tf.device(value.device): update_op = _create_autosummary_var(name, value) with tf.control_dependencies([update_op]): return tf.identity(value) else: # python scalar or numpy array if name not in _autosummary_immediate: with absolute_name_scope('Autosummary/' + id), tf.device(None), tf.control_dependencies(None): update_value = tf.placeholder(tf.float32) update_op = _create_autosummary_var(name, update_value) _autosummary_immediate[name] = update_op, update_value update_op, update_value = _autosummary_immediate[name] run(update_op, {update_value: np.float32(value)}) return value # Create the necessary ops to include autosummaries in TensorBoard report. # Note: This should be done only once per graph.
Example #23
Source File: neural_gpu.py From DOTA_models with Apache License 2.0 | 6 votes |
def conv_linear(args, kw, kh, nin, nout, rate, do_bias, bias_start, prefix): """Convolutional linear map.""" if not isinstance(args, (list, tuple)): args = [args] with tf.variable_scope(prefix): with tf.device("/cpu:0"): k = tf.get_variable("CvK", [kw, kh, nin, nout]) if len(args) == 1: arg = args[0] else: arg = tf.concat(axis=3, values=args) res = tf.nn.convolution(arg, k, dilation_rate=(rate, 1), padding="SAME") if not do_bias: return res with tf.device("/cpu:0"): bias_term = tf.get_variable( "CvB", [nout], initializer=tf.constant_initializer(bias_start)) bias_term = tf.reshape(bias_term, [1, 1, 1, nout]) return res + bias_term
Example #24
Source File: image_processing.py From DOTA_models with Apache License 2.0 | 5 votes |
def distorted_inputs(dataset, batch_size=None, num_preprocess_threads=None): """Generate batches of distorted versions of ImageNet images. Use this function as the inputs for training a network. Distorting images provides a useful technique for augmenting the data set during training in order to make the network invariant to aspects of the image that do not effect the label. Args: dataset: instance of Dataset class specifying the dataset. batch_size: integer, number of examples in batch num_preprocess_threads: integer, total number of preprocessing threads but None defaults to FLAGS.num_preprocess_threads. Returns: images: Images. 4D tensor of size [batch_size, FLAGS.image_size, FLAGS.image_size, 3]. labels: 1-D integer Tensor of [batch_size]. """ if not batch_size: batch_size = FLAGS.batch_size # Force all input processing onto CPU in order to reserve the GPU for # the forward inference and back-propagation. with tf.device('/cpu:0'): images, labels = batch_inputs( dataset, batch_size, train=True, num_preprocess_threads=num_preprocess_threads, num_readers=FLAGS.num_readers) return images, labels
Example #25
Source File: cifar10_main.py From DOTA_models with Apache License 2.0 | 5 votes |
def _create_device_setter(is_cpu_ps, worker): """Create device setter object.""" if is_cpu_ps: return tf.train.replica_device_setter( worker_device=worker, ps_device='/cpu:0', ps_tasks=1) else: gpus = ['/gpu:%d' % i for i in range(FLAGS.num_gpus)] return ParamServerDeviceSetter(worker, gpus)
Example #26
Source File: cifar10_main.py From DOTA_models with Apache License 2.0 | 5 votes |
def input_fn(subset, num_shards): """Create input graph for model. Args: subset: one of 'train', 'validate' and 'eval'. num_shards: num of towers participating in data-parallel training. Returns: two lists of tensors for features and labels, each of num_shards length. """ if subset == 'train': batch_size = FLAGS.train_batch_size elif subset == 'validate' or subset == 'eval': batch_size = FLAGS.eval_batch_size else: raise ValueError('Subset must be one of \'train\', \'validate\' and \'eval\'') with tf.device('/cpu:0'): use_distortion = subset == 'train' and FLAGS.use_distortion_for_training dataset = cifar10.Cifar10DataSet(FLAGS.data_dir, subset, use_distortion) image_batch, label_batch = dataset.make_batch(batch_size) if num_shards <= 1: # No GPU available or only 1 GPU. return [image_batch], [label_batch] # Note that passing num=batch_size is safe here, even though # dataset.batch(batch_size) can, in some cases, return fewer than batch_size # examples. This is because it does so only when repeating for a limited # number of epochs, but our dataset repeats forever. image_batch = tf.unstack(image_batch, num=batch_size, axis=0) label_batch = tf.unstack(label_batch, num=batch_size, axis=0) feature_shards = [[] for i in range(num_shards)] label_shards = [[] for i in range(num_shards)] for i in xrange(batch_size): idx = i % num_shards feature_shards[idx].append(image_batch[i]) label_shards[idx].append(label_batch[i]) feature_shards = [tf.parallel_stack(x) for x in feature_shards] label_shards = [tf.parallel_stack(x) for x in label_shards] return feature_shards, label_shards
Example #27
Source File: cifar10.py From DOTA_models with Apache License 2.0 | 5 votes |
def _variable_on_cpu(name, shape, initializer): """Helper to create a Variable stored on CPU memory. Args: name: name of the variable shape: list of ints initializer: initializer for Variable Returns: Variable Tensor """ with tf.device('/cpu:0'): dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype) return var
Example #28
Source File: train.py From DOTA_models with Apache License 2.0 | 5 votes |
def main(_): prepare_training_dir() dataset = common_flags.create_dataset(split_name=FLAGS.split_name) model = common_flags.create_model(dataset.num_char_classes, dataset.max_sequence_length, dataset.num_of_views, dataset.null_code) hparams = get_training_hparams() # If ps_tasks is zero, the local device is used. When using multiple # (non-local) replicas, the ReplicaDeviceSetter distributes the variables # across the different devices. device_setter = tf.train.replica_device_setter( FLAGS.ps_tasks, merge_devices=True) with tf.device(device_setter): data = data_provider.get_data( dataset, FLAGS.batch_size, augment=hparams.use_augment_input, central_crop_size=common_flags.get_crop_size()) endpoints = model.create_base(data.images, data.labels_one_hot) total_loss = model.create_loss(data, endpoints) model.create_summaries(data, endpoints, dataset.charset, is_training=True) init_fn = model.create_init_fn_to_restore(FLAGS.checkpoint, FLAGS.checkpoint_inception) if FLAGS.show_graph_stats: logging.info('Total number of weights in the graph: %s', calculate_graph_metrics()) train(total_loss, init_fn, hparams)
Example #29
Source File: tfutil.py From disentangling_conditional_gans with MIT License | 5 votes |
def save_summaries(filewriter, global_step=None): global _summary_merge_op if _summary_merge_op is None: finalize_autosummaries() with tf.device(None), tf.control_dependencies(None): _summary_merge_op = tf.summary.merge_all() filewriter.add_summary(_summary_merge_op.eval(), global_step) #---------------------------------------------------------------------------- # Utilities for importing modules and objects by name.
Example #30
Source File: cifar10_main.py From DOTA_models with Apache License 2.0 | 5 votes |
def __call__(self, op): if op.device: return op.device if op.type not in ['Variable', 'VariableV2', 'VarHandleOp']: return self.worker_device device_index, _ = min(enumerate(self.ps_sizes), key=operator.itemgetter(1)) device_name = self.ps_devices[device_index] var_size = op.outputs[0].get_shape().num_elements() self.ps_sizes[device_index] += var_size return device_name