Python tensorflow.half() Examples

The following are 30 code examples of tensorflow.half(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: gradient_descent_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testWithGlobalStep(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        global_step = tf.Variable(0, trainable=False)
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([3.0, 4.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.1], dtype=dtype)
        grads1 = tf.constant([0.01, 0.01], dtype=dtype)
        sgd_op = tf.train.GradientDescentOptimizer(3.0).apply_gradients(
            zip([grads0, grads1], [var0, var1]),
            global_step=global_step)
        tf.global_variables_initializer().run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
        self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params and global_step
        self.assertAllCloseAccordingToType(
            [1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], var0.eval())
        self.assertAllCloseAccordingToType(
            [3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], var1.eval())
        self.assertAllCloseAccordingToType(1, global_step.eval()) 
Example #2
Source File: adagrad_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testTensorLearningRate(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([3.0, 4.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.1], dtype=dtype)
        grads1 = tf.constant([0.01, 0.01], dtype=dtype)
        ada_opt = tf.train.AdagradOptimizer(
            tf.constant(3.0),
            initial_accumulator_value=0.1)
        ada_update = ada_opt.apply_gradients(zip(
            [grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()
        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())
        # Run 3 steps of adagrad
        for _ in range(3):
          ada_update.run()
        # Validate updated params
        self.assertAllCloseAccordingToType(
            np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
        self.assertAllCloseAccordingToType(
            np.array([2.715679168701172, 3.715679168701172]), var1.eval()) 
Example #3
Source File: adagrad_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def doTestBasic(self, use_locking=False):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([3.0, 4.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.1], dtype=dtype)
        grads1 = tf.constant([0.01, 0.01], dtype=dtype)
        ada_opt = tf.train.AdagradOptimizer(3.0,
                                            initial_accumulator_value=0.1,
                                            use_locking=use_locking)
        ada_update = ada_opt.apply_gradients(zip(
            [grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()
        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())
        # Run 3 steps of adagrad
        for _ in range(3):
          ada_update.run()
        # Validate updated params
        self.assertAllCloseAccordingToType(
            np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
        self.assertAllCloseAccordingToType(
            np.array([2.715679168701172, 3.715679168701172]), var1.eval()) 
Example #4
Source File: optimizer_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testPrecomputedGradient(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([3.0, 4.0], dtype=dtype)
        cost = 5 * var0 + 3 * var1
        grad_loss = tf.constant([42, -42], dtype=dtype)
        global_step = tf.Variable(tf.zeros([], tf.int64), name='global_step')
        sgd_op = tf.train.GradientDescentOptimizer(3.0)
        opt_op = sgd_op.minimize(cost,
                                 global_step, [var0, var1],
                                 grad_loss=grad_loss)

        tf.global_variables_initializer().run()
        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())
        # Run 1 step of sgd through optimizer
        opt_op.run()
        # Validate updated params
        self.assertAllClose(
            [1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)], var0.eval())
        self.assertAllClose(
            [3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)], var1.eval()) 
Example #5
Source File: optimizer_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testAggregationMethod(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([3.0, 4.0], dtype=dtype)
        cost = 5 * var0 + 3 * var1
        global_step = tf.Variable(tf.zeros([], tf.int64), name='global_step')
        sgd_op = tf.train.GradientDescentOptimizer(3.0)
        opt_op = sgd_op.minimize(
            cost,
            global_step,
            [var0, var1],
            aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)

        tf.global_variables_initializer().run()
        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())
        # Run 1 step of sgd through optimizer
        opt_op.run()
        # Validate updated params
        self.assertAllClose([-14., -13.], var0.eval())
        self.assertAllClose([-6., -5.], var1.eval()) 
Example #6
Source File: optimizer_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testBasic(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([3.0, 4.0], dtype=dtype)
        cost = 5 * var0 + 3 * var1
        global_step = tf.Variable(tf.zeros([], tf.int64), name='global_step')
        sgd_op = tf.train.GradientDescentOptimizer(3.0)
        opt_op = sgd_op.minimize(cost, global_step, [var0, var1])

        tf.global_variables_initializer().run()
        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())
        # Run 1 step of sgd through optimizer
        opt_op.run()
        # Validate updated params
        self.assertAllClose([-14., -13.], var0.eval())
        self.assertAllClose([-6., -5.], var1.eval()) 
Example #7
Source File: gradient_descent_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testTensorLearningRate(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([3.0, 4.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.1], dtype=dtype)
        grads1 = tf.constant([0.01, 0.01], dtype=dtype)
        lrate = tf.constant(3.0)
        sgd_op = tf.train.GradientDescentOptimizer(lrate).apply_gradients(zip(
            [grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
        self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        self.assertAllCloseAccordingToType(
            [1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], var0.eval())
        self.assertAllCloseAccordingToType(
            [3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], var1.eval()) 
Example #8
Source File: ftrl_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testEquivAdagradwithoutRegularization(self):
    for dtype in [tf.half, tf.float32]:
      with self.test_session():
        val0, val1 = self.applyOptimizer(
            tf.train.FtrlOptimizer(3.0,
                                   # Adagrad learning rate
                                   learning_rate_power=-0.5,
                                   initial_accumulator_value=0.1,
                                   l1_regularization_strength=0.0,
                                   l2_regularization_strength=0.0),
            dtype)

      with self.test_session():
        val2, val3 = self.applyOptimizer(
            tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
            dtype)

      self.assertAllCloseAccordingToType(val0, val2)
      self.assertAllCloseAccordingToType(val1, val3) 
Example #9
Source File: ftrl_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testEquivSparseAdagradwithoutRegularization(self):
    for dtype in [tf.half, tf.float32]:
      with self.test_session():
        val0, val1 = self.applyOptimizer(
            tf.train.FtrlOptimizer(3.0,
                                   # Adagrad learning rate
                                   learning_rate_power=-0.5,
                                   initial_accumulator_value=0.1,
                                   l1_regularization_strength=0.0,
                                   l2_regularization_strength=0.0),
            dtype,
            is_sparse=True)

      with self.test_session():
        val2, val3 = self.applyOptimizer(
            tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
            dtype, is_sparse=True)

      self.assertAllCloseAccordingToType(val0, val2)
      self.assertAllCloseAccordingToType(val1, val3) 
Example #10
Source File: ftrl_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testEquivSparseGradientDescentwithoutRegularization(self):
    for dtype in [tf.half, tf.float32]:
      with self.test_session():
        val0, val1 = self.applyOptimizer(
            tf.train.FtrlOptimizer(3.0,
                                   # Fixed learning rate
                                   learning_rate_power=-0.0,
                                   initial_accumulator_value=0.1,
                                   l1_regularization_strength=0.0,
                                   l2_regularization_strength=0.0),
            dtype,
            is_sparse=True)

      with self.test_session():
        val2, val3 = self.applyOptimizer(
            tf.train.GradientDescentOptimizer(3.0), dtype, is_sparse=True)

      self.assertAllCloseAccordingToType(val0, val2)
      self.assertAllCloseAccordingToType(val1, val3) 
Example #11
Source File: gradient_descent_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testBasic(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([3.0, 4.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.1], dtype=dtype)
        grads1 = tf.constant([0.01, 0.01], dtype=dtype)
        sgd_op = tf.train.GradientDescentOptimizer(3.0).apply_gradients(zip(
            [grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
        self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        self.assertAllCloseAccordingToType(
            [1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], var0.eval())
        self.assertAllCloseAccordingToType(
            [3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], var1.eval()) 
Example #12
Source File: ftrl_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testEquivGradientDescentwithoutRegularization(self):
    for dtype in [tf.half, tf.float32]:
      with self.test_session():
        val0, val1 = self.applyOptimizer(
            tf.train.FtrlOptimizer(3.0,
                                   # Fixed learning rate
                                   learning_rate_power=-0.0,
                                   initial_accumulator_value=0.1,
                                   l1_regularization_strength=0.0,
                                   l2_regularization_strength=0.0),
            dtype)

      with self.test_session():
        val2, val3 = self.applyOptimizer(
            tf.train.GradientDescentOptimizer(3.0), dtype)

      self.assertAllCloseAccordingToType(val0, val2)
      self.assertAllCloseAccordingToType(val1, val3) 
Example #13
Source File: adam_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testTensorLearningRate(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        # Initialize variables for numpy implementation.
        m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
        var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
        grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
        var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
        grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

        var0 = tf.Variable(var0_np)
        var1 = tf.Variable(var1_np)
        grads0 = tf.constant(grads0_np)
        grads1 = tf.constant(grads1_np)
        opt = tf.train.AdamOptimizer(tf.constant(0.001))
        update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()

        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())

        beta1_power, beta2_power = opt._get_beta_accumulators()

        # Run 3 steps of Adam
        for t in range(1, 4):
          self.assertAllCloseAccordingToType(0.9 ** t, beta1_power.eval())
          self.assertAllCloseAccordingToType(0.999 ** t, beta2_power.eval())
          update.run()

          var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
          var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)

          # Validate updated params
          self.assertAllCloseAccordingToType(var0_np, var0.eval())
          self.assertAllCloseAccordingToType(var1_np, var1.eval()) 
Example #14
Source File: ftrl_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testFtrlWithL1_L2(self):
    for dtype in [tf.half, tf.float32]:
      with self.test_session() as sess:
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([4.0, 3.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.2], dtype=dtype)
        grads1 = tf.constant([0.01, 0.02], dtype=dtype)

        opt = tf.train.FtrlOptimizer(3.0,
                                     initial_accumulator_value=0.1,
                                     l1_regularization_strength=0.001,
                                     l2_regularization_strength=2.0)
        update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()

        v0_val, v1_val = sess.run([var0, var1])
        self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
        self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)

        # Run 10 steps FTRL
        for _ in range(10):
          update.run()

        v0_val, v1_val = sess.run([var0, var1])
        self.assertAllCloseAccordingToType(np.array([-0.24059935, -0.46829352]),
                                           v0_val)
        self.assertAllCloseAccordingToType(np.array([-0.02406147, -0.04830509]),
                                           v1_val) 
Example #15
Source File: variable_clipping_optimizer_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testDenseDistributed(self):
    worker, unused_ps = self._setupCluster()
    for dtype in [tf.float64, tf.half, tf.float32]:
      with tf.Session(worker.target):
        var0, var1, update_op = self._setupDense(True, dtype)
        self._assertDenseCorrect(var0, var1, update_op) 
Example #16
Source File: variable_clipping_optimizer_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSparseLocal(self):
    for dtype in [tf.float64, tf.float32, tf.half]:
      with self.test_session():
        var0, var1, update_op = self._setupSparse(False, dtype)
        self._assertSparseCorrect(var0, var1, update_op) 
Example #17
Source File: ftrl_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testFtrlwithoutRegularization2(self):
    for dtype in [tf.half, tf.float32]:
      with self.test_session() as sess:
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([4.0, 3.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.2], dtype=dtype)
        grads1 = tf.constant([0.01, 0.02], dtype=dtype)

        opt = tf.train.FtrlOptimizer(3.0,
                                     initial_accumulator_value=0.1,
                                     l1_regularization_strength=0.0,
                                     l2_regularization_strength=0.0)
        update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()

        v0_val, v1_val = sess.run([var0, var1])
        self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
        self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)

        # Run 3 steps FTRL
        for _ in range(3):
          update.run()
        v0_val, v1_val = sess.run([var0, var1])
        self.assertAllCloseAccordingToType(np.array([-2.55607247, -3.98729396]),
                                           v0_val)
        self.assertAllCloseAccordingToType(np.array([-0.28232238, -0.56096673]),
                                           v1_val) 
Example #18
Source File: ftrl_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testFtrlwithoutRegularization(self):
    for dtype in [tf.half, tf.float32]:
      with self.test_session() as sess:
        var0 = tf.Variable([0.0, 0.0], dtype=dtype)
        var1 = tf.Variable([0.0, 0.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.2], dtype=dtype)
        grads1 = tf.constant([0.01, 0.02], dtype=dtype)
        opt = tf.train.FtrlOptimizer(3.0,
                                     initial_accumulator_value=0.1,
                                     l1_regularization_strength=0.0,
                                     l2_regularization_strength=0.0)
        update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()

        v0_val, v1_val = sess.run([var0, var1])
        self.assertAllClose([0.0, 0.0], v0_val)
        self.assertAllClose([0.0, 0.0], v1_val)

        # Run 3 steps FTRL
        for _ in range(3):
          update.run()

        v0_val, v1_val = sess.run([var0, var1])
        self.assertAllCloseAccordingToType(np.array([-2.60260963, -4.29698515]),
                                           v0_val)
        self.assertAllCloseAccordingToType(np.array([-0.28432083, -0.56694895]),
                                           v1_val) 
Example #19
Source File: adagrad_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSharing(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([3.0, 4.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.1], dtype=dtype)
        grads1 = tf.constant([0.01, 0.01], dtype=dtype)
        ada_opt = tf.train.AdagradOptimizer(3.0)
        # Apply the optimizer twice.  Both applications will use
        # the same accums.
        ada_update1 = ada_opt.apply_gradients(zip(
            [grads0, grads1], [var0, var1]))
        ada_update2 = ada_opt.apply_gradients(zip(
            [grads0, grads1], [var0, var1]))
        self.assertEqual(["accumulator"], ada_opt.get_slot_names())
        slot0 = ada_opt.get_slot(var0, "accumulator")
        self.assertEquals(slot0.get_shape(), var0.get_shape())
        slot1 = ada_opt.get_slot(var1, "accumulator")
        self.assertEquals(slot1.get_shape(), var1.get_shape())
        tf.global_variables_initializer().run()

        # Fetch params to validate initial values.
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())
        # Mix the first and the second adagrad for 3 steps.
        ada_update1.run()
        ada_update2.run()
        ada_update1.run()
        # Validate updated params (the same as with only 1 Adagrad).
        self.assertAllCloseAccordingToType(
            np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
        self.assertAllCloseAccordingToType(
            np.array([2.715679168701172, 3.715679168701172]), var1.eval()) 
Example #20
Source File: adagrad_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSparseStability(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        shape = [1, 6]
        var0 = tf.Variable(
            [[0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257,
              -0.0105945]],
            dtype=dtype)
        grads0 = tf.IndexedSlices(
            tf.constant(
                [[-5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05,
                  -8.4877e-05, -9.48906e-05]],
                shape=shape,
                dtype=dtype),
            tf.constant([0]),
            tf.constant(shape))
        ada_opt = tf.train.AdagradOptimizer(1.0, initial_accumulator_value=0.1)
        ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
        self.assertEqual(["accumulator"], ada_opt.get_slot_names())
        slot0 = ada_opt.get_slot(var0, "accumulator")
        init = tf.global_variables_initializer()
        for _ in range(100):
          init.run()
          ada_update.run()
          self.assertAllCloseAccordingToType(
              np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), slot0.eval())
          self.assertAllCloseAccordingToType(
              np.array([[0.00891194, -0.10712013, 0.11047515, 0.22636929, -
                         0.0144573, -0.01029443]]), var0.eval()) 
Example #21
Source File: adagrad_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSparseBasic(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = tf.Variable([[1.0], [2.0]], dtype=dtype)
        var1 = tf.Variable([[3.0], [4.0]], dtype=dtype)
        grads0 = tf.IndexedSlices(
            tf.constant([0.1], shape=[1, 1], dtype=dtype),
            tf.constant([0]),
            tf.constant([2, 1]))
        grads1 = tf.IndexedSlices(
            tf.constant([0.01], shape=[1, 1], dtype=dtype),
            tf.constant([1]),
            tf.constant([2, 1]))
        ada_opt = tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
        ada_update = ada_opt.apply_gradients(zip(
            [grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()
        # Fetch params to validate initial values
        self.assertAllClose([[1.0], [2.0]], var0.eval())
        self.assertAllClose([[3.0], [4.0]], var1.eval())
        # Run 3 step of sgd
        for _ in range(3):
          ada_update.run()
        # Validate updated params
        self.assertAllCloseAccordingToType(
            np.array([[-1.6026098728179932], [2.0]]), var0.eval())
        self.assertAllCloseAccordingToType(
            np.array([[3.0], [3.715679168701172]]), var1.eval()) 
Example #22
Source File: variable_clipping_optimizer_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSparseDistributed(self):
    worker, unused_ps = self._setupCluster()
    for dtype in [tf.half, tf.float32, tf.float64]:
      with tf.Session(worker.target):
        var0, var1, update_op = self._setupSparse(True, dtype)
        self._assertSparseCorrect(var0, var1, update_op) 
Example #23
Source File: tpu_random.py    From compare_gan with Apache License 2.0 5 votes vote down vote up
def uniform(shape, name=None):
  """Outputs pseudorandom random values from a uniform distribution.

  If the _RANDOM_OFFSET_TENSOR is set these output is deterministic based on the
  seed and the `name` of this operation. If `name` is None this will use the
  index in the graph instead.

  There is no `dtype` parameter since the underlying
  tf.contrib.stateless.stateless_random_uniform only supports tf.half,
  tf.float32 and tf.float64 and we do not care about tf.half and tf.float64.
  Patches welcome.

  Args:
    shape: A Tensor. Must be one of the following types: int32, int64.
        The shape of the output tensor.
    name: A name for the operation (optional).

  Returns:
    A Tensor.
  """
  if _RANDOM_OFFSET_TENSOR is None:
    logging.warning("No global random offset set, falling back to "
                    "un-deterministic pseudorandom numbers for operation %s.",
                    name)
    return tf.random.uniform(shape, name=name)
  return tf.contrib.stateless.stateless_random_uniform(
      shape=shape, seed=_get_seed(name), name=name) 
Example #24
Source File: gradient_descent_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSparseBasic(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = tf.Variable([[1.0], [2.0]], dtype=dtype)
        var1 = tf.Variable([[3.0], [4.0]], dtype=dtype)
        grads0 = tf.IndexedSlices(
            tf.constant([0.1], shape=[1, 1], dtype=dtype),
            tf.constant([0]),
            tf.constant([2, 1]))
        grads1 = tf.IndexedSlices(
            tf.constant([0.01], shape=[1, 1], dtype=dtype),
            tf.constant([1]),
            tf.constant([2, 1]))
        sgd_op = tf.train.GradientDescentOptimizer(3.0).apply_gradients(
            zip([grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([[1.0], [2.0]], var0.eval())
        self.assertAllCloseAccordingToType([[3.0], [4.0]], var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        self.assertAllCloseAccordingToType(
            [[1.0 - 3.0 * 0.1], [2.0]], var0.eval())
        self.assertAllCloseAccordingToType(
            [[3.0], [4.0 - 3.0 * 0.01]], var1.eval()) 
Example #25
Source File: equal.py    From onnx-tensorflow with Apache License 2.0 5 votes vote down vote up
def args_check(cls, node, **kwargs):
    supported_dtype = [
        tf.bfloat16, tf.half, tf.float32, tf.float64, tf.uint8, tf.int8,
        tf.int16, tf.int32, tf.int64, tf.complex64, tf.quint8, tf.qint8,
        tf.qint32, tf.string, tf.bool, tf.complex128
    ]
    x = kwargs["tensor_dict"][node.inputs[0]]
    if x.dtype not in supported_dtype:
      exception.OP_UNSUPPORTED_EXCEPT(
          "Equal inputs in " + str(x.dtype) + " which", "Tensorflow") 
Example #26
Source File: lamb_test.py    From addons with Apache License 2.0 5 votes vote down vote up
def _dtypes_to_test(use_gpu):
    # Based on issue #347 (https://github.com/tensorflow/addons/issues/347)
    # tf.half is not registered for 'ResourceScatterUpdate' OpKernel for 'GPU'.
    # So we have to remove tf.half when testing with gpu.
    if use_gpu:
        return [tf.float32, tf.float64]
    else:
        return [tf.half, tf.float32, tf.float64] 
Example #27
Source File: conditional_gradient_test.py    From addons with Apache License 2.0 5 votes vote down vote up
def _dtypes_to_test(use_gpu):
    # Based on issue #347 in the following link,
    #        "https://github.com/tensorflow/addons/issues/347"
    # tf.half is not registered for 'ResourceScatterUpdate' OpKernel
    # for 'GPU' devices.
    # So we have to remove tf.half when testing with gpu.
    # The function "_DtypesToTest" is from
    #       "https://github.com/tensorflow/tensorflow/blob/5d4a6cee737a1dc6c20172a1dc1
    #        5df10def2df72/tensorflow/python/kernel_tests/conv_ops_3d_test.py#L53-L62"
    if use_gpu:
        return [tf.float32, tf.float64]
    else:
        return [tf.half, tf.float32, tf.float64] 
Example #28
Source File: conditional_gradient_test.py    From addons with Apache License 2.0 5 votes vote down vote up
def _dtypes_with_checking_system(use_gpu, system):
    # Based on issue #36764 in the following link,
    #        "https://github.com/tensorflow/tensorflow/issues/36764"
    # tf.half is not registered for tf.linalg.svd function on Windows
    # CPU version.
    # So we have to remove tf.half when testing with Windows CPU version.
    if system == "Windows":
        return [tf.float32, tf.float64]
    else:
        return _dtypes_to_test(use_gpu) 
Example #29
Source File: gradient_descent_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testBasicResourceVariable(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = resource_variable_ops.ResourceVariable(
            [1.0, 2.0], dtype=dtype)
        var1 = resource_variable_ops.ResourceVariable(
            [3.0, 4.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.1], dtype=dtype)
        grads1 = tf.constant([0.01, 0.01], dtype=dtype)
        sgd_op = tf.train.GradientDescentOptimizer(3.0).apply_gradients(zip(
            [grads0, grads1], [var0, var1]))
        # TODO(apassos) calling initialize_resources on all resources here
        # doesn't work because the sessions and graph are reused across unit
        # tests and this would mean trying to reinitialize variables. Figure out
        # a long-term solution for this.
        resources.initialize_resources([var0, var1]).run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
        self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        self.assertAllCloseAccordingToType(
            [1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], var0.eval())
        self.assertAllCloseAccordingToType(
            [3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], var1.eval()) 
Example #30
Source File: gradient_descent_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testMinimizeResourceVariable(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = resource_variable_ops.ResourceVariable(
            [[1.0, 2.0]], dtype=dtype)
        var1 = resource_variable_ops.ResourceVariable(
            [3.0], dtype=dtype)
        x = tf.constant([[4.0], [5.0]], dtype=dtype)
        pred = tf.matmul(var0, x) + var1
        loss = pred*pred
        sgd_op = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
        # TODO(apassos) calling initialize_resources on all resources here
        # doesn't work because the sessions and graph are reused across unit
        # tests and this would mean trying to reinitialize variables. Figure out
        # a long-term solution for this.
        resources.initialize_resources([var0, var1]).run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
        self.assertAllCloseAccordingToType([3.0], var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
        np_grad = 2 * np_pred
        self.assertAllCloseAccordingToType(
            [[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
        self.assertAllCloseAccordingToType(
            [3.0 - np_grad], var1.eval())