Python tensorflow.Variable() Examples

The following are 30 code examples of tensorflow.Variable(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: model_deploy_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testVariablesPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)

    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())

    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '') 
Example #2
Source File: accountant.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def __init__(self, total_examples, moment_orders=32):
    """Initialize a MomentsAccountant.

    Args:
      total_examples: total number of examples.
      moment_orders: the order of moments to keep.
    """

    assert total_examples > 0
    self._total_examples = total_examples
    self._moment_orders = (moment_orders
                           if isinstance(moment_orders, (list, tuple))
                           else range(1, moment_orders + 1))
    self._max_moment_order = max(self._moment_orders)
    assert self._max_moment_order < 100, "The moment order is too large."
    self._log_moments = [tf.Variable(numpy.float64(0.0),
                                     trainable=False,
                                     name=("log_moments-%d" % moment_order))
                         for moment_order in self._moment_orders] 
Example #3
Source File: model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def set_input_shape(self, input_shape):
        batch_size, rows, cols, input_channels = input_shape
        kernel_shape = tuple(self.kernel_shape) + (input_channels,
                                                   self.output_channels)
        assert len(kernel_shape) == 4
        assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
        init = tf.random_normal(kernel_shape, dtype=tf.float32)
        init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init),
                                                   axis=(0, 1, 2)))
        self.kernels = tf.Variable(init)
        self.b = tf.Variable(
            np.zeros((self.output_channels,)).astype('float32'))
        input_shape = list(input_shape)
        input_shape[0] = 1
        dummy_batch = tf.zeros(input_shape)
        dummy_output = self.fprop(dummy_batch)
        output_shape = [int(e) for e in dummy_output.get_shape()]
        output_shape[0] = batch_size
        self.output_shape = tuple(output_shape) 
Example #4
Source File: deep_cnn.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
Example #5
Source File: variables.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def get_unique_variable(name):
  """Gets the variable uniquely identified by that name.

  Args:
    name: a name that uniquely identifies the variable.

  Returns:
    a tensorflow variable.

  Raises:
    ValueError: if no variable uniquely identified by the name exists.
  """
  candidates = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name)
  if not candidates:
    raise ValueError('Couldnt find variable %s' % name)

  for candidate in candidates:
    if candidate.op.name == name:
      return candidate
  raise ValueError('Variable %s does not uniquely identify a variable', name) 
Example #6
Source File: resnet_tf.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _decay(self):
        """L2 weight decay loss."""
        if self.decay_cost is not None:
            return self.decay_cost

        costs = []
        if self.device_name is None:
            for var in tf.trainable_variables():
                if var.op.name.find(r'DW') > 0:
                    costs.append(tf.nn.l2_loss(var))
        else:
            for layer in self.layers:
                for var in layer.params_device[self.device_name].values():
                    if (isinstance(var, tf.Variable) and
                            var.op.name.find(r'DW') > 0):
                        costs.append(tf.nn.l2_loss(var))

        self.decay_cost = tf.multiply(self.hps.weight_decay_rate,
                                      tf.add_n(costs))
        return self.decay_cost 
Example #7
Source File: graph_builder.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _create_learning_rate(hyperparams, step_var):
  """Creates learning rate var, with decay and switching for CompositeOptimizer.

  Args:
    hyperparams: a GridPoint proto containing optimizer spec, particularly
      learning_method to determine optimizer class to use.
    step_var: tf.Variable, global training step.

  Returns:
    a scalar `Tensor`, the learning rate based on current step and hyperparams.
  """
  if hyperparams.learning_method != 'composite':
    base_rate = hyperparams.learning_rate
  else:
    spec = hyperparams.composite_optimizer_spec
    switch = tf.less(step_var, spec.switch_after_steps)
    base_rate = tf.cond(switch, lambda: tf.constant(spec.method1.learning_rate),
                        lambda: tf.constant(spec.method2.learning_rate))
  return tf.train.exponential_decay(
      base_rate,
      step_var,
      hyperparams.decay_steps,
      hyperparams.decay_base,
      staircase=hyperparams.decay_staircase) 
Example #8
Source File: cifar10.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  var = _variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
Example #9
Source File: model_deploy_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1)

    self.assertDeviceEqual(deploy_config.clone_device(0),
                           '/job:worker/device:GPU:0')
    self.assertEqual(deploy_config.clone_scope(0), '')
    self.assertDeviceEqual(deploy_config.optimizer_device(),
                           '/job:worker/device:CPU:0')
    self.assertDeviceEqual(deploy_config.inputs_device(),
                           '/job:worker/device:CPU:0')
    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())
    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '') 
Example #10
Source File: dataset.py    From disentangling_conditional_gans with MIT License 6 votes vote down vote up
def __init__(self, resolution=1024, num_channels=3, dtype='uint8', dynamic_range=[0,255], label_size=0, label_dtype='float32'):
        self.resolution         = resolution
        self.resolution_log2    = int(np.log2(resolution))
        self.shape              = [num_channels, resolution, resolution]
        self.dtype              = dtype
        self.dynamic_range      = dynamic_range
        self.label_size         = label_size
        self.label_dtype        = label_dtype
        self._tf_minibatch_var  = None
        self._tf_lod_var        = None
        self._tf_minibatch_np   = None
        self._tf_labels_np      = None

        assert self.resolution == 2 ** self.resolution_log2
        with tf.name_scope('Dataset'):
            self._tf_minibatch_var = tf.Variable(np.int32(0), name='minibatch_var')
            self._tf_lod_var = tf.Variable(np.int32(0), name='lod_var') 
Example #11
Source File: tfutil.py    From disentangling_conditional_gans with MIT License 6 votes vote down vote up
def _create_autosummary_var(name, value_expr):
    assert not _autosummary_finalized
    v = tf.cast(value_expr, tf.float32)
    if v.shape.ndims is 0:
        v = [v, np.float32(1.0)]
    elif v.shape.ndims is 1:
        v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)]
    else:
        v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))]
    v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2))
    with tf.control_dependencies(None):
        var = tf.Variable(tf.zeros(2)) # [numerator, denominator]
    update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
    if name in _autosummary_vars:
        _autosummary_vars[name].append(var)
    else:
        _autosummary_vars[name] = [var]
    return update_op

#----------------------------------------------------------------------------
# Call filewriter.add_summary() with all summaries in the default graph,
# automatically finalizing and merging them on the first call. 
Example #12
Source File: model.py    From Neural-LP with MIT License 6 votes vote down vote up
def _build_input(self):
        self.tails = tf.placeholder(tf.int32, [None])
        self.heads = tf.placeholder(tf.int32, [None])
        self.targets = tf.one_hot(indices=self.heads, depth=self.num_entity)
            
        if not self.query_is_language:
            self.queries = tf.placeholder(tf.int32, [None, self.num_step])
            self.query_embedding_params = tf.Variable(self._random_uniform_unit(
                                                          self.num_query + 1, # <END> token 
                                                          self.query_embed_size), 
                                                      dtype=tf.float32)
        
            rnn_inputs = tf.nn.embedding_lookup(self.query_embedding_params, 
                                                self.queries)
        else:
            self.queries = tf.placeholder(tf.int32, [None, self.num_step, self.num_word])
            self.vocab_embedding_params = tf.Variable(self._random_uniform_unit(
                                                          self.num_vocab + 1, # <END> token
                                                          self.vocab_embed_size),
                                                      dtype=tf.float32)
            embedded_query = tf.nn.embedding_lookup(self.vocab_embedding_params, 
                                                    self.queries)
            rnn_inputs = tf.reduce_mean(embedded_query, axis=2)

        return rnn_inputs 
Example #13
Source File: mnist_histogram.py    From deep-learning-note with MIT License 6 votes vote down vote up
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
    # 同一层神经网络放在一个统一的命名空间下
    with tf.name_scope(layer_name):
        with tf.name_scope('weights'):
            # 权重及监控变量
            weights = tf.Variable(tf.truncated_normal([input_dim, output_dim], stddev=0.1))
            variable_summaries(weights, layer_name+'/weights')

        with tf.name_scope('biases'):
            # 偏置及监控变量
            biases = tf.Variable(tf.constant(0.0, shape=[output_dim]))
            variable_summaries(biases, layer_name + '/biases')

        with tf.name_scope('Wx_plus_b'):
            preactivate = tf.matmul(input_tensor, weights) + biases
            # 记录神经网络输出节点在经过激活函数之前的分布
            tf.summary.histogram(layer_name + '/pre_activations', preactivate)
        
        activations = act(preactivate, name='activation')
        # 记录神经网络输出节点在经过激活函数之后的分布
        tf.summary.histogram(layer_name + '/activations', activations)
        return activations 
Example #14
Source File: 2_tf_linear.py    From deep-learning-note with MIT License 6 votes vote down vote up
def createLinearModel(dimension):
    np.random.seed(1024)
    # 定义 x 和 y
    x = tf.placeholder(tf.float64, shape=[None, dimension], name='x')
    # 写成矩阵形式会大大加快运算速度
    y = tf.placeholder(tf.float64, shape=[None, 1], name='y')
    # 定义参数估计值和预测值
    betaPred = tf.Variable(np.random.random([dimension, 1]))
    yPred = tf.matmul(x, betaPred, name='y_pred')
    # 定义损失函数
    loss = tf.reduce_mean(tf.square(yPred - y))
    model = {
        'loss_function': loss,
        'independent_variable': x,
        'dependent_variable': y,
        'prediction': yPred,
        'model_params': betaPred
    }
    return model 
Example #15
Source File: batcher_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def test_batch_and_unpad_2d_tensors_of_same_size_in_all_dimensions(self):
    with self.test_session() as sess:
      batch_size = 3
      num_batches = 2
      examples = tf.Variable(tf.constant(1, dtype=tf.int32))
      counter = examples.count_up_to(num_batches * batch_size + 1)
      image = tf.reshape(tf.range(1, 13), [4, 3]) * counter
      batch_queue = batcher.BatchQueue(
          tensor_dict={'image': image},
          batch_size=batch_size,
          batch_queue_capacity=100,
          num_batch_queue_threads=1,
          prefetch_queue_capacity=100)
      batch = batch_queue.dequeue()

      for tensor_dict in batch:
        for tensor in tensor_dict.values():
          self.assertAllEqual([4, 3], tensor.get_shape().as_list())

      tf.initialize_all_variables().run()
      with slim.queues.QueueRunners(sess):
        i = 1
        for _ in range(num_batches):
          batch_np = sess.run(batch)
          for tensor_dict in batch_np:
            for tensor in tensor_dict.values():
              self.assertAllEqual(tensor, np.arange(1, 13).reshape((4, 3)) * i)
              i += 1
        with self.assertRaises(tf.errors.OutOfRangeError):
          sess.run(batch) 
Example #16
Source File: shape_utils.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def _is_tensor(t):
  """Returns a boolean indicating whether the input is a tensor.

  Args:
    t: the input to be tested.

  Returns:
    a boolean that indicates whether t is a tensor.
  """
  return isinstance(t, (tf.Tensor, tf.SparseTensor, tf.Variable)) 
Example #17
Source File: batcher_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def test_batch_and_unpad_2d_tensors_of_different_sizes_in_all_dimensions(
      self):
    with self.test_session() as sess:
      batch_size = 3
      num_batches = 2
      examples = tf.Variable(tf.constant(2, dtype=tf.int32))
      counter = examples.count_up_to(num_batches * batch_size + 2)
      image = tf.reshape(
          tf.range(counter * counter), tf.stack([counter, counter]))
      batch_queue = batcher.BatchQueue(
          tensor_dict={'image': image},
          batch_size=batch_size,
          batch_queue_capacity=100,
          num_batch_queue_threads=1,
          prefetch_queue_capacity=100)
      batch = batch_queue.dequeue()

      for tensor_dict in batch:
        for tensor in tensor_dict.values():
          self.assertAllEqual([None, None], tensor.get_shape().as_list())

      tf.initialize_all_variables().run()
      with slim.queues.QueueRunners(sess):
        i = 2
        for _ in range(num_batches):
          batch_np = sess.run(batch)
          for tensor_dict in batch_np:
            for tensor in tensor_dict.values():
              self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i)))
              i += 1
        with self.assertRaises(tf.errors.OutOfRangeError):
          sess.run(batch) 
Example #18
Source File: batcher_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def test_batch_and_unpad_2d_tensors_of_different_sizes_in_1st_dimension(self):
    with self.test_session() as sess:
      batch_size = 3
      num_batches = 2
      examples = tf.Variable(tf.constant(2, dtype=tf.int32))
      counter = examples.count_up_to(num_batches * batch_size + 2)
      boxes = tf.tile(
          tf.reshape(tf.range(4), [1, 4]), tf.stack([counter, tf.constant(1)]))
      batch_queue = batcher.BatchQueue(
          tensor_dict={'boxes': boxes},
          batch_size=batch_size,
          batch_queue_capacity=100,
          num_batch_queue_threads=1,
          prefetch_queue_capacity=100)
      batch = batch_queue.dequeue()

      for tensor_dict in batch:
        for tensor in tensor_dict.values():
          self.assertAllEqual([None, 4], tensor.get_shape().as_list())

      tf.initialize_all_variables().run()
      with slim.queues.QueueRunners(sess):
        i = 2
        for _ in range(num_batches):
          batch_np = sess.run(batch)
          for tensor_dict in batch_np:
            for tensor in tensor_dict.values():
              self.assertAllEqual(tensor, np.tile(np.arange(4), (i, 1)))
              i += 1
        with self.assertRaises(tf.errors.OutOfRangeError):
          sess.run(batch) 
Example #19
Source File: generate_itb_data.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def __init__(self, N):
    scale = 0.8 / float(N**0.5)
    self.N = N
    self.Wh_nxn = tf.Variable(tf.random_normal([N, N], stddev=scale))
    self.b_1xn = tf.Variable(tf.zeros([1, N]))
    self.Bu_1xn = tf.Variable(tf.zeros([1, N]))
    self.Wro_nxo = tf.Variable(tf.random_normal([N, 1], stddev=scale))
    self.bro_o = tf.Variable(tf.zeros([1])) 
Example #20
Source File: show_and_tell_model.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def setup_global_step(self):
    """Sets up the global step Tensor."""
    global_step = tf.Variable(
        initial_value=0,
        name="global_step",
        trainable=False,
        collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])

    self.global_step = global_step 
Example #21
Source File: batcher_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def test_batcher_when_batch_size_is_one(self):
    with self.test_session() as sess:
      batch_size = 1
      num_batches = 2
      examples = tf.Variable(tf.constant(2, dtype=tf.int32))
      counter = examples.count_up_to(num_batches * batch_size + 2)
      image = tf.reshape(
          tf.range(counter * counter), tf.stack([counter, counter]))
      batch_queue = batcher.BatchQueue(
          tensor_dict={'image': image},
          batch_size=batch_size,
          batch_queue_capacity=100,
          num_batch_queue_threads=1,
          prefetch_queue_capacity=100)
      batch = batch_queue.dequeue()

      for tensor_dict in batch:
        for tensor in tensor_dict.values():
          self.assertAllEqual([None, None], tensor.get_shape().as_list())

      tf.initialize_all_variables().run()
      with slim.queues.QueueRunners(sess):
        i = 2
        for _ in range(num_batches):
          batch_np = sess.run(batch)
          for tensor_dict in batch_np:
            for tensor in tensor_dict.values():
              self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i)))
              i += 1
        with self.assertRaises(tf.errors.OutOfRangeError):
          sess.run(batch) 
Example #22
Source File: 17_conv_mnist.py    From deep-learning-note with MIT License 5 votes vote down vote up
def __init__(self):
        self.lr = 0.001
        self.batch_size = 128
        self.keep_prob = tf.constant(0.75)
        self.gstep = tf.Variable(0, dtype=tf.int32,
                                 trainable=False, name='global_step')
        self.n_classes = 10
        self.skip_step = 20
        self.n_test = 10000
        self.training = True 
Example #23
Source File: accountant.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def __init__(self, total_examples):
    """Initialization. Currently only support amortized tracking.

    Args:
      total_examples: total number of examples.
    """

    assert total_examples > 0
    self._total_examples = total_examples
    self._eps_squared_sum = tf.Variable(tf.zeros([1]), trainable=False,
                                        name="eps_squared_sum")
    self._delta_sum = tf.Variable(tf.zeros([1]), trainable=False,
                                  name="delta_sum") 
Example #24
Source File: component.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def get_variable(self, var_name=None, var_params=None):
    """Returns either the original or averaged version of a given variable.

    If the master.read_from_avg flag is set to True, and the
    ExponentialMovingAverage (EMA) object has been attached, then this will ask
    the EMA object for the given variable.

    This is to allow executing inference from the averaged version of
    parameters.

    Arguments:
      var_name: Name of the variable.
      var_params: tf.Variable for which to retrieve an average.

    Only one of |var_name| or |var_params| needs to be provided.  If both are
    provided, |var_params| takes precedence.

    Returns:
      tf.Variable object corresponding to original or averaged version.
    """
    if var_params:
      var_name = var_params.name
    else:
      check.NotNone(var_name, 'specify at least one of var_name or var_params')
      var_params = tf.get_variable(var_name)

    if self.moving_average and self.master.read_from_avg:
      logging.info('Retrieving average for: %s', var_name)
      var_params = self.moving_average.average(var_params)
      assert var_params
    logging.info('Returning: %s', var_params.name)
    return var_params 
Example #25
Source File: alexnet_benchmark.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def run_benchmark():
  """Run the benchmark on AlexNet."""
  with tf.Graph().as_default():
    # Generate some dummy images.
    image_size = 224
    # Note that our padding definition is slightly different the cuda-convnet.
    # In order to force the model to start with the same activations sizes,
    # we add 3 to the image_size and employ VALID padding above.
    images = tf.Variable(tf.random_normal([FLAGS.batch_size,
                                           image_size,
                                           image_size, 3],
                                          dtype=tf.float32,
                                          stddev=1e-1))

    # Build a Graph that computes the logits predictions from the
    # inference model.
    pool5, parameters = inference(images)

    # Build an initialization operation.
    init = tf.global_variables_initializer()

    # Start running operations on the Graph.
    config = tf.ConfigProto()
    config.gpu_options.allocator_type = 'BFC'
    sess = tf.Session(config=config)
    sess.run(init)

    # Run the forward benchmark.
    time_tensorflow_run(sess, pool5, "Forward")

    # Add a simple objective so we can calculate the backward pass.
    objective = tf.nn.l2_loss(pool5)
    # Compute the gradient with respect to all the parameters.
    grad = tf.gradients(objective, parameters)
    # Run the backward benchmark.
    time_tensorflow_run(sess, grad, "Forward-backward") 
Example #26
Source File: seq2seq_attention_model.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def build_graph(self):
    self._add_placeholders()
    self._add_seq2seq()
    self.global_step = tf.Variable(0, name='global_step', trainable=False)
    if self._hps.mode == 'train':
      self._add_train_op()
    self._summaries = tf.summary.merge_all() 
Example #27
Source File: actor.py    From neural-combinatorial-optimization-rl-tensorflow with MIT License 5 votes vote down vote up
def __init__(self, config):
        self.config=config

        # Data config
        self.batch_size = config.batch_size # batch size
        self.max_length = config.max_length # input sequence length (number of cities)
        self.input_dimension = config.input_dimension # dimension of a city (coordinates)

        # Reward config
        self.avg_baseline = tf.Variable(config.init_baseline, trainable=False, name="moving_avg_baseline") # moving baseline for Reinforce
        self.alpha = config.alpha # moving average update

        # Training config (actor)
        self.global_step= tf.Variable(0, trainable=False, name="global_step") # global step
        self.lr1_start = config.lr1_start # initial learning rate
        self.lr1_decay_rate= config.lr1_decay_rate # learning rate decay rate
        self.lr1_decay_step= config.lr1_decay_step # learning rate decay step

        # Training config (critic)
        self.global_step2 = tf.Variable(0, trainable=False, name="global_step2") # global step
        self.lr2_start = config.lr1_start # initial learning rate
        self.lr2_decay_rate= config.lr1_decay_rate # learning rate decay rate
        self.lr2_decay_step= config.lr1_decay_step # learning rate decay step

        # Tensor block holding the input sequences [Batch Size, Sequence Length, Features]
        self.input_ = tf.placeholder(tf.float32, [self.batch_size, self.max_length, self.input_dimension], name="input_coordinates")

        self.build_permutation()
        self.build_critic()
        self.build_reward()
        self.build_optim()
        self.merged = tf.summary.merge_all() 
Example #28
Source File: critic.py    From neural-combinatorial-optimization-rl-tensorflow with MIT License 5 votes vote down vote up
def predict_rewards(self,input_):

        with tf.variable_scope("encoder"):

            Encoder = Attentive_encoder(self.config)
            encoder_output = Encoder.encode(input_)
            frame = tf.reduce_mean(encoder_output, 1) # [Batch size, Sequence Length, Num_neurons] to [Batch size, Num_neurons]

        with tf.variable_scope("ffn"):
            # ffn 1
            h0 = tf.layers.dense(frame, self.num_neurons, activation=tf.nn.relu, kernel_initializer=self.initializer)
            # ffn 2
            w1 =tf.get_variable("w1", [self.num_neurons, 1], initializer=self.initializer)
            b1 = tf.Variable(self.init_baseline, name="b1")
            self.predictions = tf.squeeze(tf.matmul(h0, w1)+b1) 
Example #29
Source File: actor.py    From neural-combinatorial-optimization-rl-tensorflow with MIT License 5 votes vote down vote up
def __init__(self, config):
        self.config=config

        # Data config
        self.batch_size = config.batch_size # batch size
        self.max_length = config.max_length # input sequence length (number of cities)
        self.input_dimension = config.input_dimension # dimension of a city (coordinates)
        self.speed = config.speed # agent's speed

        # Network config
        self.input_embed = config.input_embed # dimension of embedding space
        self.num_neurons = config.hidden_dim # dimension of hidden states (LSTM cell)
        self.initializer = tf.contrib.layers.xavier_initializer() # variables initializer

        # Reward config
        self.beta = config.beta # penalty for constraint

        # Training config (actor)
        self.global_step = tf.Variable(0, trainable=False, name="global_step") # global step
        self.lr1_start = config.lr1_start # initial learning rate
        self.lr1_decay_rate = config.lr1_decay_rate # learning rate decay rate
        self.lr1_decay_step = config.lr1_decay_step # learning rate decay step
        self.is_training = not config.inference_mode

        # Training config (critic)
        self.global_step2 = tf.Variable(0, trainable=False, name="global_step2") # global step
        self.lr2_start = config.lr1_start # initial learning rate
        self.lr2_decay_rate= config.lr1_decay_rate # learning rate decay rate
        self.lr2_decay_step= config.lr1_decay_step # learning rate decay step

        # Tensor block holding the input sequences [Batch Size, Sequence Length, Features]
        self.input_ = tf.placeholder(tf.float32, [self.batch_size, self.max_length+1, self.input_dimension+2], name="input_raw")  # +1 for depot / +2 for TW mean and TW width

        self.build_permutation()
        self.build_critic()
        self.build_reward()
        self.build_optim()
        self.merged = tf.summary.merge_all() 
Example #30
Source File: 11_w2v_visual.py    From deep-learning-note with MIT License 5 votes vote down vote up
def visualize(self, visual_fld, num_visualize):
        """ run "'tensorboard --logdir='visualization'" to see the embeddings """

        # create the list of num_variable most common words to visualize
        w2v_utils.most_common_words(visual_fld, num_visualize)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            ckpt = tf.train.get_checkpoint_state(os.path.dirname('data/checkpoints/checkpoint'))

            # if that checkpoint exists, restore from checkpoint
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)

            final_embed_matrix = sess.run(self.embed_matrix)

            # you have to store embeddings in a new variable
            embedding_var = tf.Variable(final_embed_matrix[:num_visualize], name='embedding')
            sess.run(embedding_var.initializer)

            config = projector.ProjectorConfig()
            summary_writer = tf.summary.FileWriter(visual_fld)

            # add embedding to the config file
            embedding = config.embeddings.add()
            embedding.tensor_name = embedding_var.name

            # link this tensor to its metadata file, in this case the first NUM_VISUALIZE words of vocab
            embedding.metadata_path = 'vocab_' + str(num_visualize) + '.tsv'

            # saves a configuration file that TensorBoard will read during startup.
            projector.visualize_embeddings(summary_writer, config)
            saver_embed = tf.train.Saver([embedding_var])
            saver_embed.save(sess, os.path.join(visual_fld, 'model.ckpt'), 1)