Python tensorflow.compat.v1.add() Examples

The following are 30 code examples of tensorflow.compat.v1.add(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v1 , or try the search function .
Example #1
Source File: preprocessor.py    From models with Apache License 2.0 6 votes vote down vote up
def remap_labels(labels,
                 original_labels=None,
                 new_label=None):
  """Remaps labels that have an id in original_labels to new_label.

  Args:
    labels: rank 1 int32 tensor of shape [num_instance] containing the object
      classes.
      original_labels: int list of original labels that should be mapped from.
      new_label: int label to map to
  Returns:
    Remapped labels
  """
  new_labels = labels
  for original_label in original_labels:
    change = tf.where(
        tf.equal(new_labels, original_label),
        tf.add(tf.zeros_like(new_labels), new_label - original_label),
        tf.zeros_like(new_labels))
    new_labels = tf.add(
        new_labels,
        change)
  new_labels = tf.reshape(new_labels, tf.shape(labels))
  return new_labels 
Example #2
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testInit_BlacklistGroup(self):
    with arg_scope(self._batch_norm_scope()):
      inputs = tf.zeros([2, 4, 4, 3])
      c1 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv1')
      c2 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv2')
      add = c1 + c2
      c3 = layers.conv2d(add, num_outputs=10, kernel_size=3, scope='conv3')

    # Verify c2 has a regularizer.
    manager = orm.OpRegularizerManager(
        [c3.op], self._default_op_handler_dict, SumGroupingRegularizer)
    self.assertIsNotNone(manager.get_regularizer(c2.op))

    # Verify c2 has None regularizer after blacklisting c1 which is grouped.
    manager = orm.OpRegularizerManager(
        [c3.op], self._default_op_handler_dict, SumGroupingRegularizer,
        regularizer_blacklist=['conv1'])
    self.assertIsNone(manager.get_regularizer(c2.op)) 
Example #3
Source File: test_control_flow.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def test_cond_in_loop():
    graph = tf.Graph()
    with graph.as_default():
        def body(x):
            x = tf.constant(7)
            z = tf.constant(20)
            res = tf.cond(tf.less(x, 10), lambda: tf.add(
                10, 20), lambda: tf.square(10))
            return tf.multiply(res, x)

        x = tf.constant(21)
        def condition(x):
            return tf.less(x, 100)

        r = tf.while_loop(condition, body, loop_vars=[x])
        with tf.Session() as sess:
            tf_out = sess.run(r)

    check_equal(graph, tf_out) 
Example #4
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testInit_AddConcat_AllOps(self):
    with arg_scope(self._batch_norm_scope()):
      inputs = tf.zeros([2, 4, 4, 3])
      c1 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv1')
      c2 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv2')
      add = c1 + c2
      c3 = layers.conv2d(add, num_outputs=10, kernel_size=3, scope='conv3')
      out = tf.identity(c3)
      concat = tf.concat([c1, c2], axis=3)
      c4 = layers.conv2d(concat, num_outputs=10, kernel_size=3, scope='conv4')

    manager = orm.OpRegularizerManager(
        [out.op], self._default_op_handler_dict, SumGroupingRegularizer)

    # Op c4 is not in the DFS path of out.  Verify that OpRegularizerManager
    # does not process c4.
    self.assertNotIn(c4.op, manager.ops)
    self.assertNotIn(concat.op, manager.ops) 
Example #5
Source File: test_control_flow.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def test_loop_in_cond():
    graph = tf.Graph()
    with graph.as_default():
        def fn1(a, b):
            i = tf.constant(0)

            def cd(i): return tf.less(i, 10)

            def bd(i): return tf.add(i, 1)
            res = tf.while_loop(cd, bd, [i])
            return tf.multiply(tf.add(20, res), 10)

        def fn2(a, b):
            return tf.add(10, 20)

        x = tf.constant(7)
        y = tf.constant(20)
        z = tf.constant(10)
        pred = tf.less(x, y)
        r = tf.cond(pred, lambda: fn1(x, y), lambda: fn2(y, z))

        with tf.Session() as sess:
            tf_out = sess.run(r, feed_dict={x: 1, y: 2, z: 3, pred: True})

    check_equal(graph, tf_out) 
Example #6
Source File: test_control_flow.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def test_cond_fn_parameters():
    graph = tf.Graph()
    with graph.as_default():
        def fn1(x, y):
            return tf.multiply(5, 6)

        def fn2(x, y):
            return tf.add(3, 4)

        i = tf.constant(1)
        j = tf.constant(2)
        k = tf.constant(3)
        r = tf.cond(tf.less(i, j), lambda: fn1(i, k), lambda: fn2(j, k))

        with tf.Session() as sess:
            tf_out = sess.run(r, feed_dict={i: 1, j: 2, k: 3})

    check_equal(graph, tf_out) 
Example #7
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testGetRegularizerForConcatWithNone(self, test_concat, depth):
    image = tf.constant(0.0, shape=[1, 17, 19, 3])
    conv2 = layers.conv2d(image, 5, [1, 1], padding='SAME', scope='conv2')
    other_input = tf.add(
        tf.identity(tf.constant(3.0, shape=[1, 17, 19, depth])), 3.0)
    # other_input has None as regularizer.
    concat = tf.concat([other_input, conv2], 3)
    output = tf.add(concat, concat, name='output_out')
    op = concat.op if test_concat else output.op

    # Instantiate OpRegularizerManager.
    op_handler_dict = self._default_op_handler_dict
    op_handler_dict['Conv2D'] = StubConvSourceOpHandler(add_concat_model_stub)
    op_reg_manager = orm.OpRegularizerManager([output.op], op_handler_dict)

    expected_alive = add_concat_model_stub.expected_alive()
    alive = op_reg_manager.get_regularizer(op).alive_vector
    self.assertAllEqual([True] * depth, alive[:depth])
    self.assertAllEqual(expected_alive['conv2'], alive[depth:]) 
Example #8
Source File: expert_utils.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def add_scope(scope=None, scope_fn=None):
  """Return a decorator which add a TF name/variable scope to a function.

  Note that the function returned by the decorator accept an additional 'name'
  parameter, which can overwrite the name scope given when the function is
  created.

  Args:
    scope (str): name of the scope. If None, the function name is used.
    scope_fn (fct): Either tf.name_scope or tf.variable_scope

  Returns:
    fct: the add_scope decorator
  """
  def decorator(f):

    @functools.wraps(f)
    def decorated(*args, **kwargs):
      name = kwargs.pop("name", None)  # Python 2 hack for keyword only args
      with scope_fn(name or scope or f.__name__):
        return f(*args, **kwargs)

    return decorated

  return decorator 
Example #9
Source File: transformer_memory.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def post_attention(self, token, x):
    """Called after self-attention. The memory can be updated here.

    Args:
      token: Data returned by pre_attention, which can be used to carry over
        state related to the current memory operation.
      x: a Tensor of data after self-attention and feed-forward
    Returns:
      a (possibly modified) version of the input x
    """
    with tf.variable_scope(self.name + "/post_attention", reuse=tf.AUTO_REUSE):
      depth = common_layers.shape_list(x)[-1]
      actual_batch_size = common_layers.shape_list(x)[0]
      memory_output = tf.gather(token["retrieved_mem"],
                                tf.range(actual_batch_size))
      output = tf.add(tf.layers.dense(x, depth, use_bias=False),
                      tf.layers.dense(memory_output, depth))
      with tf.control_dependencies([output]):
        with tf.control_dependencies([
            self.write(token["x"], token["access_logits"])]):
          return tf.identity(output) 
Example #10
Source File: test_control_flow.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def test_loop_2_vars():
    graph = tf.Graph()
    with graph.as_default():
        i0 = tf.constant(0)
        j0 = tf.ones([2, 2])

        def c(i, j): return i < 10

        def b(i, j): return [tf.add(i, 1), j]

        i1, i2 = tf.while_loop(c, b, loop_vars=[i0, j0])
        i1 += tf.constant(1337)

        with tf.Session() as sess:
            tf_out = sess.run(i1)

    check_equal(graph, tf_out) 
Example #11
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testCorrectSourceOpsWithSkipConnection(self):
    inputs = tf.zeros([2, 4, 4, 3])
    x0 = layers.conv2d(
        inputs, num_outputs=8, kernel_size=3, activation_fn=None, scope='conv0')
    x1 = tf.nn.relu(layers.batch_norm(x0, scale=True, scope='bn0'))
    x1 = layers.conv2d(
        x1, num_outputs=8, kernel_size=3, activation_fn=None, scope='conv1')
    x2 = tf.add_n([x0, x1], name='add')
    final_op = tf.nn.relu(layers.batch_norm(x2, scale=True, scope='bn1'))

    op_handler_dict = self._default_op_handler_dict
    op_reg_manager = orm.OpRegularizerManager([final_op.op], op_handler_dict)

    # All ops are in the same group
    group = list(op_reg_manager._op_group_dict.values())[0]
    source_op_names = [s.op.name for s in group.source_op_slices]
    self.assertSetEqual(set(['bn0/FusedBatchNormV3', 'bn1/FusedBatchNormV3']),
                        set(source_op_names)) 
Example #12
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def _test_spop_function_invocation_no_autograph():
    with tf.Graph().as_default():

        @tf.function(autograph=False)
        def fun1(a):
            return tf.multiply(a,a)

        @tf.function(autograph=False)
        def fun2(b):
            return tf.multiply(b,10)

        @tf.function
        def fun3(x,y):
            x = fun2(x)
            y = fun1(y)
            z = tf.add(x,y)
            return z

        t3 = fun3(tf.constant(10.5), tf.constant(20.4))

        compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True) 
Example #13
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def _test_spop_function_invocation_nested():
    with tf.Graph().as_default():
        t1 = tf.placeholder(tf.int32, (3, 3, 3), name="t1")
        t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
        t2 = tf.placeholder(tf.int32, name="t2")
        t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))

        @tf.function
        def myfunc(x, y):
            return tf.add(x, y, "myfunc")

        @tf.function
        def myfunc2(x, y):
            z = myfunc(x, y)
            l = myfunc(z, y)
            m = myfunc(l,z)
            return tf.add(l, m, "myfunc2")

        res1 = myfunc(t1, t2)
        res2 = myfunc2(res1, t1)

        compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [res2.name], mode='vm', init_global_variables=True) 
Example #14
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def _test_spop_function_invocation_basic():
    with tf.Graph().as_default():

        def fun1(a):
            return tf.multiply(a,a)

        def fun2(b):
            return tf.multiply(b,10)

        @tf.function
        def fun3(x,y):
            x = fun2(x)
            y = fun1(y)
            z = tf.add(x,y)
            return z

        t3 = fun3(tf.constant(10.5), tf.constant(20.4))

        compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True) 
Example #15
Source File: tf_mittens.py    From mittens with Apache License 2.0 6 votes vote down vote up
def _get_cost_function(self):
        """Compute the cost of the Mittens objective function.

        If self.mittens = 0, this is the same as the cost of GloVe.
        """
        self.weights = tf.placeholder(
            tf.float32, shape=[self.n_words, self.n_words])
        self.log_coincidence = tf.placeholder(
            tf.float32, shape=[self.n_words, self.n_words])
        self.diffs = tf.subtract(self.model, self.log_coincidence)
        cost = tf.reduce_sum(
            0.5 * tf.multiply(self.weights, tf.square(self.diffs)))
        if self.mittens > 0:
            self.mittens = tf.constant(self.mittens, tf.float32)
            cost += self.mittens * tf.reduce_sum(
                tf.multiply(
                    self.has_embedding,
                    self._tf_squared_euclidean(
                        tf.add(self.W, self.C),
                        self.original_embedding)))
        tf.summary.scalar("cost", cost)
        return cost 
Example #16
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def _test_spop_placeholder_without_shape_info():
    with tf.Graph().as_default():

        @function.Defun(*[tf.int32]*2)
        def Forward(x,y):
            print(x.name)
            print(y.name)
            b = tf.add(x, y)
            return b
        pl1 = tf.placeholder(tf.int32,name="pl1")
        pl2 = tf.placeholder(tf.int32,name="pl2")
        pl3 = tf.placeholder(tf.int32, name="pl3")
        data = np.array([[-1, 1], [2, -2]], dtype=np.int32)
        data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
        data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
        z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1,pl2], Tout=[tf.int32],f=Forward)
        z2 = z1 + pl3
        compare_tf_with_tvm([data, data2, data3], ['pl1:0', 'pl2:0', 'pl3:0'],
                            ['StatefulPartitionedCall:0',z2.name],  mode='vm', init_global_variables=True) 
Example #17
Source File: nq_long_utils.py    From language with Apache License 2.0 6 votes vote down vote up
def f1_metric(precision, precision_op, recall, recall_op):
  """Computes F1 based on precision and recall.

  Args:
    precision: <float> [batch_size]
    precision_op: Update op for precision.
    recall: <float> [batch_size]
    recall_op: Update op for recall.

  Returns:
    tensor and update op for F1.
  """
  f1_op = tf.group(precision_op, recall_op)
  numerator = 2 * tf.multiply(precision, recall)
  denominator = tf.add(precision, recall)
  f1 = tf.divide(numerator, denominator)

  # <float> [batch_size]
  zero_vec = tf.zeros_like(f1)
  is_valid = tf.greater(denominator, zero_vec)
  f1 = tf.where(is_valid, x=f1, y=zero_vec)

  return f1, f1_op 
Example #18
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def test_placeholder():
    with tf.Graph().as_default():
        in_data1 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
        var1 = tf.Variable(in_data1, name='in1')
        var2 = array_ops.placeholder_with_default(var1, None, name='place1')

        in_data2 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
        place1 = array_ops.placeholder(
            shape=in_data1.shape, dtype=in_data1.dtype, name='in2')

        out1 = tf.math.add(var1, var2, name='out1')
        out2 = tf.math.add(out1, place1, name='out2')

        compare_tf_with_tvm([in_data1, in_data2], ['place1:0', 'in2:0'], 'out2:0',
                            init_global_variables=True)

#######################################################################
# OneHot
# ---------------------- 
Example #19
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def test_forward_multi_input():
    with tf.Graph().as_default():
        in1 = tf.placeholder(tf.int32, shape=[3, 3], name='in1')
        in2 = tf.placeholder(tf.int32, shape=[3, 3], name='in2')
        in3 = tf.placeholder(tf.int32, shape=[3, 3], name='in3')
        in4 = tf.placeholder(tf.int32, shape=[3, 3], name='in4')

        out1 = tf.add(in1, in2, name='out1')
        out2 = tf.subtract(in3, in4, name='out2')
        out = tf.multiply(out1, out2, name='out')
        in_data = np.arange(9, dtype='int32').reshape([3, 3])

        compare_tf_with_tvm([in_data, in_data, in_data, in_data],
                            ['in1:0', 'in2:0', 'in3:0', 'in4:0'], 'out:0')

#######################################################################
# Multi Output to Graph
# --------------------- 
Example #20
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def _test_quantize_dequantize(data):
    """ One iteration of quantize and dequantize """

    # Keras model to force TFLite converter to insert 2 TFLite quantize ops.
    # First TFLite quantize op converts float32 tensor to int8 tensor - Qnn quantize.
    # Second TFLite quantize op converts int8 tensor to int8 tensor - Qnn requantize.
    data_in = tf.keras.layers.Input(shape=data.shape[1:])
    relu = tf.keras.layers.ReLU()(data_in)
    add = tf.keras.layers.Add()([data_in, relu])
    concat = tf.keras.layers.Concatenate(axis=0)([relu, add])
    keras_model = tf.keras.models.Model(inputs=data_in, outputs=concat)
    input_name = data_in.name.split(":")[0]

    # To create quantized values with dynamic range of activations, needs representative dataset
    def representative_data_gen():
        for i in range(1):
            yield [data]

    tflite_model_quant = _quantize_keras_model(keras_model, representative_data_gen)

    tflite_output = run_tflite_graph(tflite_model_quant, data)
    tvm_output = run_tvm_graph(tflite_model_quant, data, input_name)
    tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),
                                rtol=1e-5, atol=1e-2) 
Example #21
Source File: deep_cnn.py    From privacy with Apache License 2.0 6 votes vote down vote up
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
Example #22
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def _test_fill(dims, value_data, value_dtype):
    """ Use the fill op to create a tensor of value_data with constant dims."""

    value_data = np.array(value_data, dtype=value_dtype)
    # TF 1.13 TFLite convert method does not accept empty shapes
    if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):
        with tf.Graph().as_default():
            value = array_ops.placeholder(dtype=value_dtype, name="value", shape=[])
            out = tf.fill(dims,  value)
            compare_tflite_with_tvm([value_data], ["value"], [value], [out])

    with tf.Graph().as_default():
        input1 = array_ops.placeholder(dtype=value_dtype, name="input1", shape=dims)
        # Fill op gets converted to static tensor during conversion
        out = tf.fill(dims,  value_data)
        out1 = tf.add(out, input1)
        input1_data = np.random.uniform(0, 5, size=dims).astype(value_dtype)
        compare_tflite_with_tvm([input1_data], ["input1"], [input1], [out1]) 
Example #23
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def _test_spop_function_invocation_defun():
    with tf.Graph().as_default():

        def fun1(a):
            return tf.multiply(a,a)

        def fun2(b):
            return tf.multiply(b,b)

        @function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
        def fun3(x,y):
            x = fun2(x)
            y = fun1(y)
            z = tf.add(x,y)
            return z

        op = gen_functional_ops.StatefulPartitionedCall(args=[tf.constant(10.5),tf.constant(20.4)],
                                                        Tout=[dtypes.float32], f=fun3, name="SpopFnInvocation")
        compare_tf_with_tvm([],[], 'SpopFnInvocation:0', mode='vm', init_global_variables=True) 
Example #24
Source File: test_control_flow.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_nested_loop_bound():
    graph = tf.Graph()
    with graph.as_default():
        dshape = (2, 10)
        dtype = "float32"
        dname = "data"
        np_data = np.random.uniform(size=dshape).astype(dtype)
        data = tf.placeholder(shape=dshape, dtype=dtype, name=dname)
        x = tf.slice(data, [1, 4], [1, 4])
        outer = x + 5.0
        def body(x, y):
            res = tf.cond(tf.less(y, 10), lambda: tf.add(
                10.0, 20.0), lambda: tf.square(10.0))
            def nested_body(nx, ny):
                return nx + 1, res + 2.0
            def nested_cond(nx, ny):
                return tf.less(nx, 15)
            nx = tf.constant(0)
            ny = tf.constant(0.0)
            nested_res = tf.while_loop(nested_cond, nested_body, loop_vars=[nx, ny])
            res = res + nested_res[1]
            z = tf.constant(7)
            res = tf.cond(tf.less(z, 10), lambda: res * 5, lambda: res + 10)
            return tf.multiply(res, x * outer), y + 1

        y = tf.constant(0)
        def condition(x, y):
            return tf.less(y, 20)

        r = tf.while_loop(condition, body, loop_vars=[x, y])
        with tf.Session() as sess:
            tf_out = sess.run(r, feed_dict={"%s:0" % dname: np_data})

    check_equal(graph, tf_out, {dname: np_data}) 
Example #25
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def _test_reshape_symbolic(data, a_data, b_data):
    with tf.Graph().as_default():
        in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
        a = array_ops.placeholder(shape=a_data.shape, dtype=a_data.dtype)
        b = array_ops.placeholder(shape=b_data.shape, dtype=b_data.dtype)
        newshape = tf.add(a, b)
        out = array_ops.reshape(in_data, newshape)

        for mode in ["debug", "vm"]:
            compare_tf_with_tvm([data, a_data, b_data], [in_data.name, a.name, b.name], out.name, mode=mode) 
Example #26
Source File: graph_compute_order_test.py    From receptive_field with Apache License 2.0 5 votes vote down vote up
def create_test_network():
  """Convolutional neural network for test.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = tf.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = tf.placeholder(tf.float32, (None, None, None, 1), name='input_image')
    # Left branch before first addition.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch before first addition.
    l2_pad = tf.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
    l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
    # First addition.
    l4 = tf.nn.relu(l1 + l3, name='L4_relu')
    # Left branch after first addition.
    l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
    # Right branch after first addition.
    l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
    # Final addition.
    tf.add(l5, l6, name='L7_add')

  return g 
Example #27
Source File: parse_layer_parameters_test.py    From receptive_field with Apache License 2.0 5 votes vote down vote up
def create_test_network(image_resolution, convert_variables_to_constants):
  """Convolutional neural network for test.

  Args:
    image_resolution: Resolution to use for input placeholder. Used for height
      and width dimensions.
    convert_variables_to_constants: Whether to convert variables to constants.

  Returns:
    graph_def: GraphDef proto of the model.
  """
  g = tf.Graph()
  sess = tf.Session(graph=g)
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = tf.placeholder(
        tf.float32, (1, image_resolution, image_resolution, 1),
        name='input_image')
    # Left branch before first addition.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch before first addition.
    l2_pad = tf.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
    l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
    # First addition.
    l4 = tf.nn.relu(l1 + l3, name='L4_relu')
    # Left branch after first addition.
    l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
    # Right branch after first addition.
    l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
    # Final addition.
    tf.add(l5, l6, name='L7_add')

    if convert_variables_to_constants:
      sess.run(tf.global_variables_initializer())
      graph_def = tf.graph_util.convert_variables_to_constants(
          sess, g.as_graph_def(), ['L7_add'])
    else:
      graph_def = g.as_graph_def()

  return graph_def 
Example #28
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def _test_fill_from_tensor(in_shape):
    """ Use the fill op to create a tensor of ones with non-constant shape.
        Some extra ops need to be added here to prevent the graph from
        being fully constant and folded away."""

    data = np.random.uniform(size=in_shape).astype('float32')

    with tf.Graph().as_default():
        in_data = array_ops.placeholder(
            shape=[in_shape[0], in_shape[1], None, None], dtype=data.dtype)

        x = tf.ones(shape=2*tf.shape(in_data), dtype=data.dtype)
        y = tf.math.add(in_data, tf.reduce_mean(x), name='out1')
        compare_tf_with_tvm(data, 'Placeholder:0', 'out1:0') 
Example #29
Source File: test_control_flow.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_callnode_loop_vars():
    graph = tf.Graph()
    with graph.as_default():
        i = tf.add(tf.constant(0), 1)

        def c(i): return tf.less(i, 10)

        def b(i): return tf.add(i, 1)

        r = tf.while_loop(c, b, [i])

        with tf.Session() as sess:
            tf_out = sess.run(r)

        check_equal(graph, tf_out) 
Example #30
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def _test_add(data, fused_activation_function=None, quantized=False, qnn_op=None):
    """ One iteration of add """
    return _test_elemwise(math_ops.add, data, fused_activation_function, quantized, qnn_op)

#######################################################################
# Subtract
# --------