Python tensorflow.fixed_size_partitioner() Examples

The following are 16 code examples of tensorflow.fixed_size_partitioner(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: partitions.py    From parallax with Apache License 2.0 6 votes vote down vote up
def get_partitioner(min_num_partitions):
    """Return tf.fixed_size_partitioner with num_partitions
       that determined by Parallax.
   
    Args:
      min_num_partitions: A minimum (default) number of partitions 
                          without memory exception.
    """

    if PARALLAX_MIN_PARTITIONS not in os.environ:
       os.environ[PARALLAX_MIN_PARTITIONS] = str(min_num_partitions)

    if PARALLAX_PARTITIONS in os.environ:
        partitions = int(os.environ[PARALLAX_PARTITIONS])
    else:
        partitions = min_num_partitions
    return tf.fixed_size_partitioner(partitions) 
Example #2
Source File: graph_regularization_test.py    From neural-structured-learning with Apache License 2.0 6 votes vote down vote up
def build_linear_regressor(self, weight, weight_shape, bias, bias_shape):
    with tf.Graph().as_default():
      # Use a partitioner that is known a priori because canned Estimators
      # default to using one otherwise. This allows tests to access variables
      # used in the underlying Estimator.
      tf.get_variable(
          name=WEIGHT_VARIABLE,
          shape=weight_shape,
          initializer=weight,
          partitioner=tf.fixed_size_partitioner(1))
      tf.get_variable(
          name=BIAS_VARIABLE,
          shape=bias_shape,
          initializer=bias,
          partitioner=tf.fixed_size_partitioner(1))
      tf.Variable(100, name=tf.GraphKeys.GLOBAL_STEP, dtype=tf.int64)

      with tf.Session() as sess:
        sess.run([tf.global_variables_initializer()])
        tf.train.Saver().save(sess, os.path.join(self.model_dir, 'model.ckpt'))

    fc = tf.feature_column.numeric_column(
        FEATURE_NAME, shape=np.array(weight).shape)
    return tf.estimator.LinearRegressor(
        feature_columns=(fc,), model_dir=self.model_dir, optimizer='SGD') 
Example #3
Source File: variables_helper_test.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 6 votes vote down vote up
def test_return_all_variables_from_checkpoint_with_partition(self):
    with tf.Graph().as_default():
      partitioner = tf.fixed_size_partitioner(2)
      variables = [
          tf.get_variable(
              name='weights', shape=(2, 2), partitioner=partitioner),
          tf.Variable([1.0, 2.0], name='biases')
      ]
      checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
      init_op = tf.global_variables_initializer()
      saver = tf.train.Saver(variables)
      with self.test_session() as sess:
        sess.run(init_op)
        saver.save(sess, checkpoint_path)
      out_variables = variables_helper.get_variables_available_in_checkpoint(
          variables, checkpoint_path)
    self.assertItemsEqual(out_variables, variables) 
Example #4
Source File: op_regularizer_manager_test.py    From g-tensorflow-models with Apache License 2.0 6 votes vote down vote up
def testSimpleOpGetRegularizer(self, use_batch_norm, use_partitioner, scope):
    # Tests the alive patern of the conv and relu ops.
    # use_batch_norm: A Boolean. Inidcats if batch norm should be used.
    # use_partitioner: A Boolean. Inidcats if a fixed_size_partitioner should be
    #   used.
    # scope: A String. with the scope to test.
    sc = self._batch_norm_scope() if use_batch_norm else []
    partitioner = tf.fixed_size_partitioner(2) if use_partitioner else None
    with tf.contrib.framework.arg_scope(sc):
      with tf.variable_scope(tf.get_variable_scope(), partitioner=partitioner):
        final_op = op_regularizer_stub.build_model()

    op_reg_manager = orm.OpRegularizerManager([final_op],
                                              op_regularizer_stub.MOCK_REG_DICT)
    expected_alive = op_regularizer_stub.expected_alive()
    with self.test_session():
      conv_reg = op_reg_manager.get_regularizer(_get_op(scope + '/Conv2D'))
      self.assertAllEqual(expected_alive[scope],
                          conv_reg.alive_vector.eval())

      relu_reg = op_reg_manager.get_regularizer(_get_op(scope +  '/Relu'))
      self.assertAllEqual(expected_alive[scope],
                          relu_reg.alive_vector.eval()) 
Example #5
Source File: op_regularizer_manager_test.py    From g-tensorflow-models with Apache License 2.0 6 votes vote down vote up
def testConcatOpGetRegularizer(self, use_batch_norm, use_partitioner):
    sc = self._batch_norm_scope() if use_batch_norm else []
    partitioner = tf.fixed_size_partitioner(2) if use_partitioner else None
    with tf.contrib.framework.arg_scope(sc):
      with tf.variable_scope(tf.get_variable_scope(), partitioner=partitioner):
        final_op = op_regularizer_stub.build_model()
    op_reg_manager = orm.OpRegularizerManager([final_op],
                                              op_regularizer_stub.MOCK_REG_DICT)
    expected_alive = op_regularizer_stub.expected_alive()

    expected = np.logical_or(expected_alive['conv4'],
                             expected_alive['concat'])
    with self.test_session():
      conv_reg = op_reg_manager.get_regularizer(_get_op('conv4/Conv2D'))
      self.assertAllEqual(expected, conv_reg.alive_vector.eval())

      relu_reg = op_reg_manager.get_regularizer(_get_op('conv4/Relu'))
      self.assertAllEqual(expected, relu_reg.alive_vector.eval()) 
Example #6
Source File: op_regularizer_manager_test.py    From multilabel-image-classification-tensorflow with MIT License 6 votes vote down vote up
def testSimpleOpGetRegularizer(self, use_batch_norm, use_partitioner, scope):
    # Tests the alive patern of the conv and relu ops.
    # use_batch_norm: A Boolean. Inidcats if batch norm should be used.
    # use_partitioner: A Boolean. Inidcats if a fixed_size_partitioner should be
    #   used.
    # scope: A String. with the scope to test.
    sc = self._batch_norm_scope() if use_batch_norm else []
    partitioner = tf.fixed_size_partitioner(2) if use_partitioner else None
    with tf.contrib.framework.arg_scope(sc):
      with tf.variable_scope(tf.get_variable_scope(), partitioner=partitioner):
        final_op = op_regularizer_stub.build_model()

    op_reg_manager = orm.OpRegularizerManager([final_op],
                                              op_regularizer_stub.MOCK_REG_DICT)
    expected_alive = op_regularizer_stub.expected_alive()
    with self.test_session():
      conv_reg = op_reg_manager.get_regularizer(_get_op(scope + '/Conv2D'))
      self.assertAllEqual(expected_alive[scope],
                          conv_reg.alive_vector.eval())

      relu_reg = op_reg_manager.get_regularizer(_get_op(scope +  '/Relu'))
      self.assertAllEqual(expected_alive[scope],
                          relu_reg.alive_vector.eval()) 
Example #7
Source File: op_regularizer_manager_test.py    From multilabel-image-classification-tensorflow with MIT License 6 votes vote down vote up
def testConcatOpGetRegularizer(self, use_batch_norm, use_partitioner):
    sc = self._batch_norm_scope() if use_batch_norm else []
    partitioner = tf.fixed_size_partitioner(2) if use_partitioner else None
    with tf.contrib.framework.arg_scope(sc):
      with tf.variable_scope(tf.get_variable_scope(), partitioner=partitioner):
        final_op = op_regularizer_stub.build_model()
    op_reg_manager = orm.OpRegularizerManager([final_op],
                                              op_regularizer_stub.MOCK_REG_DICT)
    expected_alive = op_regularizer_stub.expected_alive()

    expected = np.logical_or(expected_alive['conv4'],
                             expected_alive['concat'])
    with self.test_session():
      conv_reg = op_reg_manager.get_regularizer(_get_op('conv4/Conv2D'))
      self.assertAllEqual(expected, conv_reg.alive_vector.eval())

      relu_reg = op_reg_manager.get_regularizer(_get_op('conv4/Relu'))
      self.assertAllEqual(expected, relu_reg.alive_vector.eval()) 
Example #8
Source File: model_helper.py    From nslt with Apache License 2.0 5 votes vote down vote up
def create_emb_for_encoder_and_decoder(tgt_vocab_size, tgt_embed_size, dtype=tf.float32, num_partitions=0, scope=None):

    """Create embedding matrix for both encoder and decoder.

  Args:
    tgt_vocab_size: An integer. The target vocab size.
    tgt_embed_size: An integer. The embedding dimension for the decoder's
      embedding.
    dtype: dtype of the embedding matrix. Default to float32.
    num_partitions: number of partitions used for the embedding vars.
    scope: VariableScope for the created subgraph. Default to "embedding".

  Returns:
    embedding_decoder: Decoder's embedding matrix.

  Raises:
    ValueError: if use share_vocab but source and target have different vocab
      size.
  """

    if num_partitions <= 1:
        partitioner = None
    else:
        partitioner = tf.fixed_size_partitioner(num_partitions)

    with tf.variable_scope(scope or "embeddings", dtype=dtype, partitioner=partitioner) as scope:
        with tf.variable_scope("decoder", partitioner=partitioner):
            embedding_decoder = tf.get_variable("embedding_decoder", [tgt_vocab_size, tgt_embed_size], dtype)

    return embedding_decoder 
Example #9
Source File: partitioned_variables_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testFixedSizePartitioner(self):
    with self.test_session():
      partitioner = tf.fixed_size_partitioner(5, axis=0)
      with tf.variable_scope("root", partitioner=partitioner):
        v0 = tf.get_variable("v0", dtype=tf.float32, shape=(10, 10))
        v0_list = v0._get_variable_list()
        v0_part = v0._get_partitions()
        self.assertEqual(len(v0_list), 5)
        self.assertAllEqual(v0_part, (5, 1)) 
Example #10
Source File: layers_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testEvalMovingVarsWithPartitioner(self):
    # This test makes sure that the moving-mean and moving-variance logic works
    # when `batch_norm` is called within a variable-scope that has a variable
    # partitioner.
    partitioner = tf.fixed_size_partitioner(2, axis=0)
    with tf.variable_scope(tf.get_variable_scope(), partitioner=partitioner):
      self.testEvalMovingVars() 
Example #11
Source File: gamma_mapper_test.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def TestSuccess(self, connectivity, partitioning, fused, use_resource):
    params = {
        'trainable': True,
        'normalizer_fn': layers.batch_norm,
        'normalizer_params': {
            'scale': True,
            'fused': fused
        }
    }

    partitioner = tf.fixed_size_partitioner(2) if partitioning else None
    with tf.variable_scope(
        tf.get_variable_scope(),
        partitioner=partitioner,
        use_resource=use_resource):
      with tf.contrib.framework.arg_scope(
          [layers.conv2d, layers.separable_conv2d], **params):
        build_model()

    sess = tf.Session()
    saver = tf.train.Saver()
    saver.restore(sess, os.path.join(FLAGS.test_tmpdir, CKPT_FILE_NAME))
    mapper = self.createMapper(connectivity)
    conv = get_op('conv1/Conv2D')
    sep_conv = get_op('sep_conv/separable_conv2d')
    with sess.as_default():
      self.assertAllClose(CONV1_GAMMA, mapper.get_gamma(conv).eval())
      self.assertAllClose(SEP_CONV_GAMMA, mapper.get_gamma(sep_conv).eval()) 
Example #12
Source File: gamma_mapper_test.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def testNoBatchNorm(self, connectivity, partitioning):
    partitioner = tf.fixed_size_partitioner(2) if partitioning else None
    with tf.variable_scope(
        tf.get_variable_scope(), partitioner=partitioner):
      build_model()
    mapper = self.createMapper(connectivity)
    conv = get_op('conv1/Conv2D')
    self.assertEqual(None, mapper.get_gamma(conv)) 
Example #13
Source File: gamma_mapper_test.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def TestSuccess(self, connectivity, partitioning, fused, use_resource):
    params = {
        'trainable': True,
        'normalizer_fn': layers.batch_norm,
        'normalizer_params': {
            'scale': True,
            'fused': fused
        }
    }

    partitioner = tf.fixed_size_partitioner(2) if partitioning else None
    with tf.variable_scope(
        tf.get_variable_scope(),
        partitioner=partitioner,
        use_resource=use_resource):
      with tf.contrib.framework.arg_scope(
          [layers.conv2d, layers.separable_conv2d], **params):
        build_model()

    sess = tf.Session()
    saver = tf.train.Saver()
    saver.restore(sess, os.path.join(FLAGS.test_tmpdir, CKPT_FILE_NAME))
    mapper = self.createMapper(connectivity)
    conv = get_op('conv1/Conv2D')
    sep_conv = get_op('sep_conv/separable_conv2d')
    with sess.as_default():
      self.assertAllClose(CONV1_GAMMA, mapper.get_gamma(conv).eval())
      self.assertAllClose(SEP_CONV_GAMMA, mapper.get_gamma(sep_conv).eval()) 
Example #14
Source File: gamma_mapper_test.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def testNoBatchNorm(self, connectivity, partitioning):
    partitioner = tf.fixed_size_partitioner(2) if partitioning else None
    with tf.variable_scope(
        tf.get_variable_scope(), partitioner=partitioner):
      build_model()
    mapper = self.createMapper(connectivity)
    conv = get_op('conv1/Conv2D')
    self.assertEqual(None, mapper.get_gamma(conv)) 
Example #15
Source File: graph_regularization_test.py    From neural-structured-learning with Apache License 2.0 4 votes vote down vote up
def _train_and_check_params(self, example, max_neighbors, weight, bias,
                              expected_grad_from_weight,
                              expected_grad_from_bias):
    """Runs training for one step and verifies gradient-based updates."""

    def embedding_fn(features, unused_mode):
      # Computes y = w*x
      with tf.variable_scope(
          tf.get_variable_scope(),
          reuse=tf.AUTO_REUSE,
          auxiliary_name_scope=False):
        weight_tensor = tf.reshape(
            tf.get_variable(
                WEIGHT_VARIABLE,
                shape=[2, 1],
                partitioner=tf.fixed_size_partitioner(1)),
            shape=[-1, 2])

      x_tensor = tf.reshape(features[FEATURE_NAME], shape=[-1, 2])
      return tf.reduce_sum(
          tf.multiply(weight_tensor, x_tensor), 1, keep_dims=True)

    def optimizer_fn():
      return tf.train.GradientDescentOptimizer(LEARNING_RATE)

    base_est = self.build_linear_regressor(
        weight=weight, weight_shape=[2, 1], bias=bias, bias_shape=[1])

    graph_reg_config = nsl_configs.make_graph_reg_config(
        max_neighbors=max_neighbors, multiplier=1)
    graph_reg_est = nsl_estimator.add_graph_regularization(
        base_est, embedding_fn, optimizer_fn, graph_reg_config=graph_reg_config)

    input_fn = single_example_input_fn(
        example, input_shape=[2], max_neighbors=max_neighbors)
    graph_reg_est.train(input_fn=input_fn, steps=1)

    # Compute the new bias and weight values based on the gradients.
    expected_bias = bias - LEARNING_RATE * (expected_grad_from_bias)
    expected_weight = weight - LEARNING_RATE * (expected_grad_from_weight)

    # Check that the parameters of the linear regressor have the correct values.
    self.assertAllClose(expected_bias,
                        graph_reg_est.get_variable_value(BIAS_VARIABLE))
    self.assertAllClose(expected_weight,
                        graph_reg_est.get_variable_value(WEIGHT_VARIABLE)) 
Example #16
Source File: graph_regularization_test.py    From neural-structured-learning with Apache License 2.0 4 votes vote down vote up
def _train_and_check_eval_results(self, train_example, test_example,
                                    max_neighbors, weight, bias):
    """Verifies evaluation results for the graph-regularized model."""

    def embedding_fn(features, unused_mode):
      # Computes y = w*x
      with tf.variable_scope(
          tf.get_variable_scope(),
          reuse=tf.AUTO_REUSE,
          auxiliary_name_scope=False):
        weight_tensor = tf.reshape(
            tf.get_variable(
                WEIGHT_VARIABLE,
                shape=[2, 1],
                partitioner=tf.fixed_size_partitioner(1)),
            shape=[-1, 2])

      x_tensor = tf.reshape(features[FEATURE_NAME], shape=[-1, 2])
      return tf.reduce_sum(
          tf.multiply(weight_tensor, x_tensor), 1, keep_dims=True)

    def optimizer_fn():
      return tf.train.GradientDescentOptimizer(LEARNING_RATE)

    base_est = self.build_linear_regressor(
        weight=weight, weight_shape=[2, 1], bias=bias, bias_shape=[1])

    graph_reg_config = nsl_configs.make_graph_reg_config(
        max_neighbors=max_neighbors, multiplier=1)
    graph_reg_est = nsl_estimator.add_graph_regularization(
        base_est, embedding_fn, optimizer_fn, graph_reg_config=graph_reg_config)

    train_input_fn = single_example_input_fn(
        train_example, input_shape=[2], max_neighbors=max_neighbors)
    graph_reg_est.train(input_fn=train_input_fn, steps=1)

    # Evaluating the graph-regularized model should yield the same results
    # as evaluating the base model because model paramters are shared.
    eval_input_fn = single_example_input_fn(
        test_example, input_shape=[2], max_neighbors=0)
    graph_eval_results = graph_reg_est.evaluate(input_fn=eval_input_fn)
    base_eval_results = base_est.evaluate(input_fn=eval_input_fn)
    self.assertAllClose(base_eval_results, graph_eval_results)