Python tensorflow.python.framework.ops.Graph() Examples

The following are 30 code examples of tensorflow.python.framework.ops.Graph(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.framework.ops , or try the search function .
Example #1
Source File: util.py    From lambda-packs with MIT License 6 votes vote down vote up
def make_placeholder_from_tensor(t, scope=None):
  """Create a `tf.placeholder` for the Graph Editor.

  Note that the correct graph scope must be set by the calling function.

  Args:
    t: a `tf.Tensor` whose name will be used to create the placeholder
      (see function placeholder_name).
    scope: absolute scope within which to create the placeholder. None
      means that the scope of `t` is preserved. `""` means the root scope.
  Returns:
    A newly created `tf.placeholder`.
  Raises:
    TypeError: if `t` is not `None` or a `tf.Tensor`.
  """
  return tf_array_ops.placeholder(
      dtype=t.dtype, shape=t.get_shape(), name=placeholder_name(
          t, scope=scope)) 
Example #2
Source File: vgg_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testEndPoints(self):
    batch_size = 5
    height, width = 224, 224
    num_classes = 1000
    for is_training in [True, False]:
      with ops.Graph().as_default():
        inputs = random_ops.random_uniform((batch_size, height, width, 3))
        _, end_points = vgg.vgg_a(inputs, num_classes, is_training=is_training)
        expected_names = [
            'vgg_a/conv1/conv1_1', 'vgg_a/pool1', 'vgg_a/conv2/conv2_1',
            'vgg_a/pool2', 'vgg_a/conv3/conv3_1', 'vgg_a/conv3/conv3_2',
            'vgg_a/pool3', 'vgg_a/conv4/conv4_1', 'vgg_a/conv4/conv4_2',
            'vgg_a/pool4', 'vgg_a/conv5/conv5_1', 'vgg_a/conv5/conv5_2',
            'vgg_a/pool5', 'vgg_a/fc6', 'vgg_a/fc7', 'vgg_a/fc8'
        ]
        self.assertSetEqual(set(end_points.keys()), set(expected_names)) 
Example #3
Source File: resnet_v2_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testAtrousFullyConvolutionalValues(self):
    """Verify dense feature extraction with atrous convolution."""
    nominal_stride = 32
    for output_stride in [4, 8, 16, 32, None]:
      with arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
        with ops.Graph().as_default():
          with self.test_session() as sess:
            random_seed.set_random_seed(0)
            inputs = create_test_input(2, 81, 81, 3)
            # Dense feature extraction followed by subsampling.
            output, _ = self._resnet_small(
                inputs, None, global_pool=False, output_stride=output_stride)
            if output_stride is None:
              factor = 1
            else:
              factor = nominal_stride // output_stride
            output = resnet_utils.subsample(output, factor)
            # Make the two networks use the same weights.
            variable_scope.get_variable_scope().reuse_variables()
            # Feature extraction at the nominal network rate.
            expected, _ = self._resnet_small(inputs, None, global_pool=False)
            sess.run(variables.global_variables_initializer())
            self.assertAllClose(
                output.eval(), expected.eval(), atol=1e-4, rtol=1e-4) 
Example #4
Source File: vgg_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testEndPoints(self):
    batch_size = 5
    height, width = 224, 224
    num_classes = 1000
    for is_training in [True, False]:
      with ops.Graph().as_default():
        inputs = random_ops.random_uniform((batch_size, height, width, 3))
        _, end_points = vgg.vgg_16(inputs, num_classes, is_training=is_training)
        expected_names = [
            'vgg_16/conv1/conv1_1', 'vgg_16/conv1/conv1_2', 'vgg_16/pool1',
            'vgg_16/conv2/conv2_1', 'vgg_16/conv2/conv2_2', 'vgg_16/pool2',
            'vgg_16/conv3/conv3_1', 'vgg_16/conv3/conv3_2',
            'vgg_16/conv3/conv3_3', 'vgg_16/pool3', 'vgg_16/conv4/conv4_1',
            'vgg_16/conv4/conv4_2', 'vgg_16/conv4/conv4_3', 'vgg_16/pool4',
            'vgg_16/conv5/conv5_1', 'vgg_16/conv5/conv5_2',
            'vgg_16/conv5/conv5_3', 'vgg_16/pool5', 'vgg_16/fc6', 'vgg_16/fc7',
            'vgg_16/fc8'
        ]
        self.assertSetEqual(set(end_points.keys()), set(expected_names)) 
Example #5
Source File: inception_v3_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testBuildOnlyUptoFinalEndpoint(self):
    batch_size = 5
    height, width = 299, 299
    endpoints = [
        'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3',
        'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b',
        'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
        'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'
    ]

    for index, endpoint in enumerate(endpoints):
      with ops.Graph().as_default():
        inputs = random_ops.random_uniform((batch_size, height, width, 3))
        out_tensor, end_points = inception_v3.inception_v3_base(
            inputs, final_endpoint=endpoint)
        self.assertTrue(
            out_tensor.op.name.startswith('InceptionV3/' + endpoint))
        self.assertItemsEqual(endpoints[:index + 1], end_points) 
Example #6
Source File: vgg_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testEndPoints(self):
    batch_size = 5
    height, width = 224, 224
    num_classes = 1000
    for is_training in [True, False]:
      with ops.Graph().as_default():
        inputs = random_ops.random_uniform((batch_size, height, width, 3))
        _, end_points = vgg.vgg_19(inputs, num_classes, is_training=is_training)
        expected_names = [
            'vgg_19/conv1/conv1_1', 'vgg_19/conv1/conv1_2', 'vgg_19/pool1',
            'vgg_19/conv2/conv2_1', 'vgg_19/conv2/conv2_2', 'vgg_19/pool2',
            'vgg_19/conv3/conv3_1', 'vgg_19/conv3/conv3_2',
            'vgg_19/conv3/conv3_3', 'vgg_19/conv3/conv3_4', 'vgg_19/pool3',
            'vgg_19/conv4/conv4_1', 'vgg_19/conv4/conv4_2',
            'vgg_19/conv4/conv4_3', 'vgg_19/conv4/conv4_4', 'vgg_19/pool4',
            'vgg_19/conv5/conv5_1', 'vgg_19/conv5/conv5_2',
            'vgg_19/conv5/conv5_3', 'vgg_19/conv5/conv5_4', 'vgg_19/pool5',
            'vgg_19/fc6', 'vgg_19/fc7', 'vgg_19/fc8'
        ]
        self.assertSetEqual(set(end_points.keys()), set(expected_names)) 
Example #7
Source File: learning_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    g = ops.Graph()
    with g.as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = BatchNormClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(
          train_op, logdir, number_of_steps=300, log_every_n_steps=10)
      self.assertLess(loss, .1) 
Example #8
Source File: inception_v2_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testBuildOnlyUptoFinalEndpoint(self):
    batch_size = 5
    height, width = 224, 224
    endpoints = [
        'Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',
        'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a', 'Mixed_4b',
        'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c'
    ]
    for index, endpoint in enumerate(endpoints):
      with ops.Graph().as_default():
        inputs = random_ops.random_uniform((batch_size, height, width, 3))
        out_tensor, end_points = inception_v2.inception_v2_base(
            inputs, final_endpoint=endpoint)
        self.assertTrue(
            out_tensor.op.name.startswith('InceptionV2/' + endpoint))
        self.assertItemsEqual(endpoints[:index + 1], end_points) 
Example #9
Source File: session.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(self, target='', graph=None, config=None):
    """Creates a new TensorFlow session.

    If no `graph` argument is specified when constructing the session,
    the default graph will be launched in the session. If you are
    using more than one graph (created with `tf.Graph()` in the same
    process, you will have to use different sessions for each graph,
    but each graph can be used in multiple sessions. In this case, it
    is often clearer to pass the graph to be launched explicitly to
    the session constructor.

    Args:
      target: (Optional.) The execution engine to connect to.
        Defaults to using an in-process engine. See
        @{$distributed$Distributed TensorFlow}
        for more examples.
      graph: (Optional.) The `Graph` to be launched (described above).
      config: (Optional.) A [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
        protocol buffer with configuration options for the session.

    """
    super(Session, self).__init__(target, graph, config=config)
    # NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle.
    self._default_graph_context_manager = None
    self._default_session_context_manager = None 
Example #10
Source File: session.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def __init__(self, target='', graph=None, config=None):
    """Creates a new TensorFlow session.

    If no `graph` argument is specified when constructing the session,
    the default graph will be launched in the session. If you are
    using more than one graph (created with `tf.Graph()` in the same
    process, you will have to use different sessions for each graph,
    but each graph can be used in multiple sessions. In this case, it
    is often clearer to pass the graph to be launched explicitly to
    the session constructor.

    Args:
      target: (Optional.) The execution engine to connect to.
        Defaults to using an in-process engine. See
        [Distributed Tensorflow](https://www.tensorflow.org/how_tos/distributed/index.html)
        for more examples.
      graph: (Optional.) The `Graph` to be launched (described above).
      config: (Optional.) A [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
        protocol buffer with configuration options for the session.

    """
    super(Session, self).__init__(target, graph, config=config)
    # NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle.
    self._default_graph_context_manager = None
    self._default_session_context_manager = None 
Example #11
Source File: util.py    From lambda-packs with MIT License 6 votes vote down vote up
def get_tensors(graph):
  """get all the tensors which are input or output of an op in the graph.

  Args:
    graph: a `tf.Graph`.
  Returns:
    A list of `tf.Tensor`.
  Raises:
    TypeError: if graph is not a `tf.Graph`.
  """
  if not isinstance(graph, tf_ops.Graph):
    raise TypeError("Expected a graph, got: {}".format(type(graph)))
  ts = []
  for op in graph.get_operations():
    ts += op.outputs
  return ts 
Example #12
Source File: saved_model_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testClearDevices(self):
    export_dir = os.path.join(test.get_temp_dir(), "test_clear_devices")
    builder = saved_model_builder.SavedModelBuilder(export_dir)

    # Specify a device and save a variable.
    ops.reset_default_graph()
    with session.Session(
        target="",
        config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
      with sess.graph.device("/cpu:0"):
        self._init_and_validate_variable(sess, "v", 42)
        builder.add_meta_graph_and_variables(
            sess, [tag_constants.TRAINING], clear_devices=True)

    # Save the SavedModel to disk.
    builder.save()

    # Restore the graph with a single predefined tag whose variables were saved
    # without any device information.
    with self.test_session(graph=ops.Graph()) as sess:
      loader.load(sess, [tag_constants.TRAINING], export_dir)
      self.assertEqual(
          42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval()) 
Example #13
Source File: util.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(self, graph):
    """Create a dictionary of control-output dependencies.

    Args:
      graph: a `tf.Graph`.
    Returns:
      A dictionary where a key is a `tf.Operation` instance and the
         corresponding value is a list of all the ops which have the key
         as one of their control-input dependencies.
    Raises:
      TypeError: graph is not a `tf.Graph`.
    """
    if not isinstance(graph, tf_ops.Graph):
      raise TypeError("Expected a tf.Graph, got: {}".format(type(graph)))
    self._control_outputs = {}
    self._graph = graph
    self._version = None
    self._build() 
Example #14
Source File: subgraph.py    From lambda-packs with MIT License 6 votes vote down vote up
def __copy__(self):
    """Create a copy of this subgraph.

    Note that this class is a "view", copying it only create another view and
    does not copy the underlying part of the `tf.Graph`.

    Returns:
      A new identical instance of the original subgraph view.
    """
    cls = self.__class__
    result = cls.__new__(cls)
    for k, v in iteritems(self.__dict__):
      if k == "_graph":
        setattr(result, k, v)
      else:
        setattr(result, k, list(v))  # copy the list
    return result 
Example #15
Source File: saved_model_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testNoOverwrite(self):
    export_dir = os.path.join(test.get_temp_dir(), "test_no_overwrite")
    builder = saved_model_builder.SavedModelBuilder(export_dir)

    # Graph with a single variable. SavedModel invoked to:
    # - add with weights.
    with self.test_session(graph=ops.Graph()) as sess:
      self._init_and_validate_variable(sess, "v", 42)
      builder.add_meta_graph_and_variables(sess, ["foo"])

    # Save the SavedModel to disk in text format.
    builder.save(as_text=True)

    # Restore the graph with tag "foo", whose variables were saved.
    with self.test_session(graph=ops.Graph()) as sess:
      loader.load(sess, ["foo"], export_dir)
      self.assertEqual(
          42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())

    # An attempt to create another builder with the same export directory should
    # result in an assertion error.
    self.assertRaises(AssertionError, saved_model_builder.SavedModelBuilder,
                      export_dir) 
Example #16
Source File: subgraph.py    From lambda-packs with MIT License 6 votes vote down vote up
def remap_inputs(self, new_input_indices):
    """Remap the inputs of the subgraph.

    If the inputs of the original subgraph are [t0, t1, t2], remapping to [2,0]
    will create a new instance whose inputs is [t2, t0].

    Note that this is only modifying the view: the underlying `tf.Graph` is not
    affected.

    Args:
      new_input_indices: an iterable of integers or tf.Tensors
        representing a mapping between the old inputs and the new ones.
        Integers must be positive and smaller than the number of old inputs.
        tf.Tensors must belong to the old list of inputs.
        This mapping can be under-complete and must be without repetitions.
    Returns:
      A new modified instance of the original subgraph view with remapped
        inputs.
    """
    res = self.copy()
    res._remap_inputs(new_input_indices)  # pylint: disable=protected-access
    return res 
Example #17
Source File: subgraph.py    From lambda-packs with MIT License 6 votes vote down vote up
def remap_outputs(self, new_output_indices):
    """Remap the output of the subgraph.

    If the output of the original subgraph are [t0, t1, t2], remapping to
    [1,1,0] will create a new instance whose outputs is [t1, t1, t0].

    Note that this is only modifying the view: the underlying tf.Graph is not
    affected.

    Args:
      new_output_indices: an iterable of integers or tf.Tensors
        representing a mapping between the old outputs and the new ones.
        Integers must be positive and smaller than the number of old outputs.
        tf.Tensors must belong to the old list of outputs.
        This mapping can be under-complete and can have repetitions.
    Returns:
      A new modified instance of the original subgraph view with remapped
        outputs.
    """
    res = copy.copy(self)
    res._remap_outputs(new_output_indices)  # pylint: disable=protected-access
    return res 
Example #18
Source File: saved_model_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testSequence(self):
    export_dir = os.path.join(test.get_temp_dir(), "test_sequence")
    builder = saved_model_builder.SavedModelBuilder(export_dir)

    # Expect an assertion error since add_meta_graph_and_variables() should be
    # invoked before any add_meta_graph() calls.
    with self.test_session(graph=ops.Graph()) as sess:
      self.assertRaises(AssertionError, builder.add_meta_graph, ["foo"])

    # Expect an assertion error for multiple calls of
    # add_meta_graph_and_variables() since weights should be saved exactly once.
    with self.test_session(graph=ops.Graph()) as sess:
      self._init_and_validate_variable(sess, "v", 42)
      builder.add_meta_graph_and_variables(sess, ["bar"])
      self.assertRaises(AssertionError, builder.add_meta_graph_and_variables,
                        sess, ["baz"]) 
Example #19
Source File: subgraph.py    From lambda-packs with MIT License 6 votes vote down vote up
def _check_graph(sgv, graph):
  """Check if sgv belongs to the given graph.

  Args:
    sgv: a SubGraphView.
    graph: a graph or None.
  Returns:
    The SubGraphView sgv.
  Raises:
    TypeError: if sgv is not a SubGraphView or if graph is not None and not
      a tf.Graph.
    ValueError: if the graph of sgv and the given graph are not None and
      different.
  """
  if not isinstance(sgv, SubGraphView):
    raise TypeError("Expected a SubGraphView, got: {}".format(type(graph)))
  if graph is None or not sgv.graph:
    return sgv
  if not isinstance(graph, tf_ops.Graph):
    raise TypeError("Expected a tf.Graph, got: {}".format(type(graph)))
  if sgv.graph is not graph:
    raise ValueError("Graph mismatch.")
  return sgv 
Example #20
Source File: linear_operator_test_util.py    From lambda-packs with MIT License 6 votes vote down vote up
def test_diag_part(self):
    self._skip_if_tests_to_skip_contains("diag_part")
    for use_placeholder in False, True:
      for shape in self._shapes_to_test:
        for dtype in self._dtypes_to_test:
          with self.test_session(graph=ops.Graph()) as sess:
            sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
            operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
                shape, dtype, use_placeholder=use_placeholder)
            op_diag_part = operator.diag_part()
            mat_diag_part = array_ops.matrix_diag_part(mat)

            if not use_placeholder:
              self.assertAllEqual(
                  mat_diag_part.get_shape(), op_diag_part.get_shape())

            op_diag_part_, mat_diag_part_ = sess.run(
                [op_diag_part, mat_diag_part], feed_dict=feed_dict)

            self.assertAC(op_diag_part_, mat_diag_part_) 
Example #21
Source File: linear_operator_test_util.py    From lambda-packs with MIT License 6 votes vote down vote up
def test_add_to_tensor(self):
    self._skip_if_tests_to_skip_contains("add_to_tensor")
    for use_placeholder in False, True:
      for shape in self._shapes_to_test:
        for dtype in self._dtypes_to_test:
          with self.test_session(graph=ops.Graph()) as sess:
            sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
            operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
                shape, dtype, use_placeholder=use_placeholder)
            op_plus_2mat = operator.add_to_tensor(2 * mat)

            if not use_placeholder:
              self.assertAllEqual(shape, op_plus_2mat.get_shape())

            op_plus_2mat_v, mat_v = sess.run([op_plus_2mat, mat],
                                             feed_dict=feed_dict)

            self.assertAC(op_plus_2mat_v, 3 * mat_v) 
Example #22
Source File: estimator.py    From lambda-packs with MIT License 6 votes vote down vote up
def infer_real_valued_columns_from_input_fn(input_fn):
  """Creates `FeatureColumn` objects for inputs defined by `input_fn`.

  This interprets all inputs as dense, fixed-length float values. This creates
  a local graph in which it calls `input_fn` to build the tensors, then discards
  it.

  Args:
    input_fn: Input function returning a tuple of:
        features - Dictionary of string feature name to `Tensor` or `Tensor`.
        labels - `Tensor` of label values.

  Returns:
    List of `FeatureColumn` objects.
  """
  with ops.Graph().as_default():
    features, _ = input_fn()
    return layers.infer_real_valued_columns(features) 
Example #23
Source File: linear_operator_test_util.py    From lambda-packs with MIT License 6 votes vote down vote up
def test_log_abs_det(self):
    self._skip_if_tests_to_skip_contains("log_abs_det")
    for use_placeholder in False, True:
      for shape in self._shapes_to_test:
        for dtype in self._dtypes_to_test:
          if dtype.is_complex:
            self.skipTest(
                "tf.matrix_determinant does not work with complex, so this "
                "test is being skipped.")
          with self.test_session(graph=ops.Graph()) as sess:
            sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
            operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
                shape, dtype, use_placeholder=use_placeholder)
            op_log_abs_det = operator.log_abs_determinant()
            mat_log_abs_det = math_ops.log(
                math_ops.abs(linalg_ops.matrix_determinant(mat)))
            if not use_placeholder:
              self.assertAllEqual(shape[:-2], op_log_abs_det.get_shape())
            op_log_abs_det_v, mat_log_abs_det_v = sess.run(
                [op_log_abs_det, mat_log_abs_det],
                feed_dict=feed_dict)
            self.assertAC(op_log_abs_det_v, mat_log_abs_det_v) 
Example #24
Source File: linear_operator_test_util.py    From lambda-packs with MIT License 6 votes vote down vote up
def test_det(self):
    self._skip_if_tests_to_skip_contains("det")
    for use_placeholder in False, True:
      for shape in self._shapes_to_test:
        for dtype in self._dtypes_to_test:
          if dtype.is_complex:
            self.skipTest(
                "tf.matrix_determinant does not work with complex, so this "
                "test is being skipped.")
          with self.test_session(graph=ops.Graph()) as sess:
            sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
            operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
                shape, dtype, use_placeholder=use_placeholder)
            op_det = operator.determinant()
            if not use_placeholder:
              self.assertAllEqual(shape[:-2], op_det.get_shape())
            op_det_v, mat_det_v = sess.run(
                [op_det, linalg_ops.matrix_determinant(mat)],
                feed_dict=feed_dict)
            self.assertAC(op_det_v, mat_det_v) 
Example #25
Source File: saved_model_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def testLegacyInitOp(self):
    export_dir = os.path.join(test.get_temp_dir(), "test_legacy_init_op")
    builder = saved_model_builder.SavedModelBuilder(export_dir)

    with self.test_session(graph=ops.Graph()) as sess:
      # Add `v1` and `v2` variables to the graph.
      v1 = variables.Variable(1, name="v1")
      ops.add_to_collection("v", v1)
      v2 = variables.Variable(2, name="v2")
      ops.add_to_collection("v", v2)

      # Initialize another variable `v3` to 42.
      v3 = variables.Variable(42, name="v3", trainable=False, collections=[])
      ops.add_to_collection("v", v3)

      # Set up an assignment op to be run as part of the legacy_init_op.
      assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
      legacy_init_op = control_flow_ops.group(assign_v3, name="legacy_init_op")

      sess.run(variables.global_variables_initializer())
      builder.add_meta_graph_and_variables(
          sess, ["foo"], legacy_init_op=legacy_init_op)

    # Save the SavedModel to disk.
    builder.save()

    with self.test_session(graph=ops.Graph()) as sess:
      loader.load(sess, ["foo"], export_dir)
      self.assertEqual(1, ops.get_collection("v")[0].eval())
      self.assertEqual(2, ops.get_collection("v")[1].eval())
      # Evaluates to the sum of the first two variables and assigned as part of
      # the legacy_init_op, following a restore.
      self.assertEqual(3, ops.get_collection("v")[2].eval()) 
Example #26
Source File: saved_model_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _validate_inputs_tensor_info(self, builder, tensor_info):
    with self.test_session(graph=ops.Graph()) as sess:
      self._init_and_validate_variable(sess, "v", 42)

      foo_signature = signature_def_utils.build_signature_def({
          "foo_inputs": tensor_info
      }, dict(), "foo")
      self.assertRaises(
          AssertionError,
          builder.add_meta_graph_and_variables,
          sess, ["foo"],
          signature_def_map={"foo_key": foo_signature}) 
Example #27
Source File: prefetch_queue_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def testDictConstruction(self):
    with ops.Graph().as_default():
      batches = {
          'first': constant_op.constant([1]),
          'second': constant_op.constant([2.0, 2.1])
      }
      prefetcher = prefetch_queue.prefetch_queue(batches)
      dequeued = prefetcher.dequeue()
      self.assertTrue(isinstance(dequeued, dict))
      self.assertEqual(2, len(dequeued))
      self.assertEqual(dtypes.int32, dequeued['first'].dtype)
      self.assertEqual(dtypes.float32, dequeued['second'].dtype) 
Example #28
Source File: linear_operator_test_util.py    From lambda-packs with MIT License 5 votes vote down vote up
def test_solve(self):
    self._skip_if_tests_to_skip_contains("solve")
    for use_placeholder in False, True:
      for shape in self._shapes_to_test:
        for dtype in self._dtypes_to_test:
          for adjoint in False, True:
            for adjoint_arg in False, True:
              with self.test_session(graph=ops.Graph()) as sess:
                sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
                operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
                    shape, dtype, use_placeholder=use_placeholder)
                rhs = self._make_rhs(operator, adjoint=adjoint)
                # If adjoint_arg, solve A X = (rhs^H)^H = rhs.
                if adjoint_arg:
                  op_solve = operator.solve(
                      linear_operator_util.matrix_adjoint(rhs),
                      adjoint=adjoint, adjoint_arg=adjoint_arg)
                else:
                  op_solve = operator.solve(
                      rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
                mat_solve = linalg_ops.matrix_solve(mat, rhs, adjoint=adjoint)
                if not use_placeholder:
                  self.assertAllEqual(
                      op_solve.get_shape(), mat_solve.get_shape())
                op_solve_v, mat_solve_v = sess.run([op_solve, mat_solve],
                                                   feed_dict=feed_dict)
                self.assertAC(op_solve_v, mat_solve_v) 
Example #29
Source File: learning_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def testUseUpdateOps(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      expected_mean = np.mean(self._inputs, axis=(0))
      expected_var = np.var(self._inputs, axis=(0))

      tf_predictions = BatchNormClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      moving_mean = variables_lib2.get_variables_by_name('moving_mean')[0]
      moving_variance = variables_lib2.get_variables_by_name('moving_variance')[
          0]

      with session.Session() as sess:
        # Initialize all variables
        sess.run(variables_lib.global_variables_initializer())
        mean, variance = sess.run([moving_mean, moving_variance])
        # After initialization moving_mean == 0 and moving_variance == 1.
        self.assertAllClose(mean, [0] * 4)
        self.assertAllClose(variance, [1] * 4)

        for _ in range(10):
          sess.run([train_op])
        mean = moving_mean.eval()
        variance = moving_variance.eval()
        # After 10 updates with decay 0.1 moving_mean == expected_mean and
        # moving_variance == expected_var.
        self.assertAllClose(mean, expected_mean)
        self.assertAllClose(variance, expected_var) 
Example #30
Source File: allreduce_test.py    From benchmarks with Apache License 2.0 5 votes vote down vote up
def _do_pack_unpack_test(self, tt):
    """Do a single pack-unpack test.

    Args:
      tt: A _test_tuple defining the parameters of the test to do.

    This test executes a graph that performs a pack of tower_grads
    followed by an unpack and verifies that the shapes and values
    of gradient tensors are unchanged, along with paired variables.
    """
    with ops.Graph().as_default():
      tower_grads, consts, _, vrbls = self._init_tensors(
          tt.num_devices, tt.in_shapes)
      packed_tg, packing = allreduce.pack_small_tensors(
          tower_grads, max_bytes=40, max_group=10)
      unpacked_tg = allreduce.unpack_small_tensors(packed_tg, packing)
      with self.test_session() as sess:
        sess.run(variables.global_variables_initializer())
        packed = sess.run(packed_tg)
        for d in range(0, tt.num_devices):
          for t in range(0, len(tt.out_shapes)):
            num_elts = 0
            for dim in tt.out_shapes[t]:
              num_elts = (num_elts or 1) * dim
            self.assertTrue(np.array_equal(
                np.array(range(tt.out_i[t], tt.out_i[t] + num_elts),
                         dtype=np.float32).reshape(tt.out_shapes[t]),
                packed[d][t][0]))
        unpacked = sess.run(unpacked_tg)
        for d in range(0, tt.num_devices):
          for t in range(0, len(tt.in_shapes)):
            self.assertTrue(np.array_equal(consts[d][t], unpacked[d][t][0]))
            self.assertEqual(vrbls[d][t], unpacked_tg[d][t][1])