Python tensorflow.atanh() Examples

The following are 20 code examples of tensorflow.atanh(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def test_forward_unary():
    def _test_forward_unary(op, a_min=1, a_max=5, dtype=np.float32):
        """test unary operators"""
        np_data = np.random.uniform(a_min, a_max, size=(2, 3, 5)).astype(dtype)
        tf.reset_default_graph()
        with tf.Graph().as_default():
            in_data = tf.placeholder(dtype, (2, 3, 5), name="in_data")
            out = op(in_data)
            compare_tf_with_tvm([np_data], ['in_data:0'], out.name)

    _test_forward_unary(tf.acos, -1, 1)
    _test_forward_unary(tf.asin, -1, 1)
    _test_forward_unary(tf.atanh, -1, 1)
    _test_forward_unary(tf.sinh)
    _test_forward_unary(tf.cosh)
    _test_forward_unary(tf.acosh)
    _test_forward_unary(tf.asinh)
    _test_forward_unary(tf.atan)
    _test_forward_unary(tf.sin)
    _test_forward_unary(tf.cos)
    _test_forward_unary(tf.tan)
    _test_forward_unary(tf.tanh)
    _test_forward_unary(tf.erf)
    _test_forward_unary(tf.log)
    _test_forward_unary(tf.log1p) 
Example #2
Source File: squash_bijector.py    From mbpo with MIT License 5 votes vote down vote up
def _inverse(self, y):
        return tf.atanh(y) 
Example #3
Source File: rommeo_ac.py    From mapr2 with Apache License 2.0 5 votes vote down vote up
def _create_q_update(self):
        """Create a minimization operation for Q-function update."""
        opponent_actions, opponent_actions_log_pis = self.opponent_policy.actions_for(
            observations=self._next_observations_ph,
            reuse=tf.AUTO_REUSE, with_log_pis=True)
        assert_shape(opponent_actions, [None, self._opponent_action_dim])

        prior = self._get_opponent_prior(self._next_observations_ph)
        raw_actions = tf.atanh(opponent_actions)
        prior_log_pis = prior.dist.log_prob(raw_actions)
        prior_log_pis = prior_log_pis - squash_correction(raw_actions)

        actions, actions_log_pis = self.policy.actions_for(observations=self._next_observations_ph,
                                                           reuse=tf.AUTO_REUSE,
                                                           with_log_pis=True,
                                                           opponent_actions=opponent_actions)

        with tf.variable_scope('target_joint_q_agent_{}'.format(self._agent_id), reuse=tf.AUTO_REUSE):
            q_value_targets = self.target_joint_qf.output_for(
                observations=self._next_observations_ph,
                actions=actions,
                opponent_actions=opponent_actions)
            q_value_targets = q_value_targets - self._annealing_pl * actions_log_pis - opponent_actions_log_pis + prior_log_pis
            assert_shape(q_value_targets, [None])

        self._q_values = self.joint_qf.output_for(
            self._observations_ph, self._actions_pl, self._opponent_actions_pl, reuse=True)
        assert_shape(self._q_values, [None])

        ys = tf.stop_gradient(self._reward_scale * self._rewards_pl + (
            1 - self._terminals_pl) * self._discount * q_value_targets)
        assert_shape(ys, [None])

        bellman_residual = 0.5 * tf.reduce_mean((ys - self._q_values)**2)
        with tf.variable_scope('target_joint_qf_opt_agent_{}'.format(self._agent_id), reuse=tf.AUTO_REUSE):
            if self._train_qf:
                td_train_op = tf.train.AdamOptimizer(self._qf_lr).minimize(
                    loss=bellman_residual, var_list=self.joint_qf.get_params_internal())
                self._training_ops.append(td_train_op)

        self._bellman_residual = bellman_residual 
Example #4
Source File: rommeo_ac.py    From mapr2 with Apache License 2.0 5 votes vote down vote up
def _create_opponent_p_update(self):
        opponent_actions, opponent_actions_log_pis, reg_loss = self.opponent_policy.actions_for(
            observations=self._observations_ph,
            reuse=tf.AUTO_REUSE, with_log_pis=True, return_reg=True)
        assert_shape(opponent_actions, [None, self._opponent_action_dim])

        prior = self._get_opponent_prior(self._observations_ph)
        raw_actions = tf.atanh(opponent_actions)
        prior_log_pis = prior.dist.log_prob(raw_actions)
        prior_log_pis = prior_log_pis - squash_correction(raw_actions)

        actions, agent_log_pis = self.policy.actions_for(observations=self._observations_ph,
                                                         reuse=tf.AUTO_REUSE,
                                                         with_log_pis=True,
                                                         opponent_actions=opponent_actions)

        q_values = self.joint_qf.output_for(
            self._observations_ph, actions, opponent_actions, reuse=True)


        opponent_p_loss = tf.reduce_mean(opponent_actions_log_pis) - tf.reduce_mean(prior_log_pis) - tf.reduce_mean(q_values) + self._annealing_pl * agent_log_pis
        opponent_p_loss = opponent_p_loss + reg_loss
        with tf.variable_scope('opponent_policy_opt_agent_{}'.format(self._agent_id), reuse=tf.AUTO_REUSE):
            if self._train_policy:
                optimizer = tf.train.AdamOptimizer(self._policy_lr)
                om_training_op = optimizer.minimize(
                    loss=opponent_p_loss,
                    var_list=self.opponent_policy.get_params_internal())
                self._training_ops.append(om_training_op) 
Example #5
Source File: rommeo_ac.py    From mapr2 with Apache License 2.0 5 votes vote down vote up
def _create_opponent_prior_update(self):
        prior = self._get_opponent_prior(self._recent_opponent_observations_ph)
        raw_actions = tf.atanh(self._recent_opponent_actions_pl)
        log_pis = prior.dist.log_prob(raw_actions)
        log_pis = log_pis - squash_correction(raw_actions)
        loss = -tf.reduce_mean(log_pis) + prior.reg_loss_t
        vars = U.scope_vars(self._opponent_prior_scope)
        with tf.variable_scope('opponent_prior_opt_agent_{}'.format(self._agent_id), reuse=tf.AUTO_REUSE):
            if self._train_policy:
                optimizer = tf.train.AdamOptimizer(self._policy_lr)
                prior_training_op = optimizer.minimize(
                        loss=loss,
                        var_list=vars)
                self._training_ops.append(prior_training_op) 
Example #6
Source File: gaussian_policy.py    From mapr2 with Apache License 2.0 5 votes vote down vote up
def log_pis_for(self, actions):
        raw_actions = actions
        if self._squash:
            raw_actions = tf.atanh(actions)
            log_pis = self._distribution.log_prob(raw_actions)
            log_pis = log_pis - self._squash_correction(raw_actions)
            return log_pis
        return self._distribution.log_prob(raw_actions) 
Example #7
Source File: gaussian_policy.py    From mapr2 with Apache License 2.0 5 votes vote down vote up
def log_pis_for(self, actions):
        raw_actions = actions
        if self._squash:
           raw_actions = tf.atanh(actions) 
           log_pis = self._distribution.log_prob(raw_actions)
           log_pis -= self._squash_correction(raw_actions)
           return log_pis
        return self._distribution.log_prob(raw_actions) 
Example #8
Source File: TFANN.py    From pythonml with MIT License 5 votes vote down vote up
def _GetActvFn(name):
    '''
    Helper function for selecting an activation function
    name: The name of the activation function
    return: A handle for the tensorflow activation function
    '''
    return {'atanh': tf.atanh,          'elu': tf.nn.elu,
            'ident': tf.identity,
            'sig': tf.sigmoid,          'softplus': tf.nn.softplus, 
            'softsign': tf.nn.softsign, 'relu': tf.nn.relu,
            'relu6': tf.nn.relu6,       'tanh': tf.tanh}.get(name) 
Example #9
Source File: tanh_normal.py    From dreamer with Apache License 2.0 5 votes vote down vote up
def _inverse(self, y):
    precision = 0.99999997
    clipped = tf.where(
        tf.less_equal(tf.abs(y), 1.),
        tf.clip_by_value(y, -precision, precision), y)
    # y = tf.stop_gradient(clipped) + y - tf.stop_gradient(y)
    return tf.atanh(clipped) 
Example #10
Source File: util.py    From hyperbolic_nn with Apache License 2.0 5 votes vote down vote up
def tf_atanh(x):
    return tf.atanh(tf.minimum(x, 1. - EPS)) # Only works for positive real x.

# Real x, not vector! 
Example #11
Source File: common_layers_test.py    From fine-lm with MIT License 5 votes vote down vote up
def testDiscretizedMixLogisticLoss(self):
    batch = 2
    height = 4
    width = 4
    channels = 3
    num_mixtures = 5
    logits = tf.concat(  # assign all probability mass to first component
        [tf.ones([batch, height, width, 1]) * 1e8,
         tf.zeros([batch, height, width, num_mixtures - 1])],
        axis=-1)
    locs = tf.random_uniform([batch, height, width, num_mixtures * 3],
                             minval=-.9, maxval=.9)
    log_scales = tf.random_uniform([batch, height, width, num_mixtures * 3],
                                   minval=-1., maxval=1.)
    coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))
    pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)

    # Test labels that don't satisfy edge cases where 8-bit value is 0 or 255.
    labels = tf.random_uniform([batch, height, width, channels],
                               minval=-.9, maxval=.9)
    locs_0 = locs[..., :3]
    log_scales_0 = log_scales[..., :3]
    centered_labels = labels - locs_0
    inv_stdv = tf.exp(-log_scales_0)
    plus_in = inv_stdv * (centered_labels + 1. / 255.)
    min_in = inv_stdv * (centered_labels - 1. / 255.)
    cdf_plus = tf.nn.sigmoid(plus_in)
    cdf_min = tf.nn.sigmoid(min_in)
    expected_loss = -tf.reduce_sum(tf.log(cdf_plus - cdf_min), axis=-1)

    actual_loss = common_layers.discretized_mix_logistic_loss(
        pred=pred, labels=labels)
    with self.test_session() as session:
      actual_loss_val, expected_loss_val = session.run(
          [actual_loss, expected_loss])
    self.assertAllClose(actual_loss_val, expected_loss_val, rtol=1e-5) 
Example #12
Source File: squash_bijector.py    From malib with MIT License 5 votes vote down vote up
def _inverse(self, y):
        return tf.atanh(y) 
Example #13
Source File: tf_utils.py    From malib with MIT License 5 votes vote down vote up
def squash_correction(actions, squashed=True):
    if squashed:
        actions = tf.atanh(actions)
    return tf.reduce_sum(tf.math.log(1 - tf.tanh(actions) ** 2 + EPS), axis=1) 
Example #14
Source File: hyperbolic.py    From neural-structured-learning with Apache License 2.0 5 votes vote down vote up
def artanh(x):
  eps = BALL_EPS[x.dtype]
  return tf.atanh(tf.minimum(tf.maximum(x, -1 + eps), 1 - eps)) 
Example #15
Source File: common_layers_test.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def testSampleFromDiscretizedMixLogistic(self):
    batch = 2
    height = 4
    width = 4
    num_mixtures = 5
    seed = 42
    logits = tf.concat(  # assign all probability mass to first component
        [tf.ones([batch, height, width, 1]) * 1e8,
         tf.zeros([batch, height, width, num_mixtures - 1])],
        axis=-1)
    locs = tf.random_uniform([batch, height, width, num_mixtures * 3],
                             minval=-.9, maxval=.9)
    log_scales = tf.ones([batch, height, width, num_mixtures * 3]) * -1e8
    coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))
    pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)

    locs_0 = locs[..., :3]
    expected_sample = tf.clip_by_value(locs_0, -1., 1.)

    actual_sample = common_layers.sample_from_discretized_mix_logistic(
        pred, seed=seed)
    actual_sample_val, expected_sample_val = self.evaluate(
        [actual_sample, expected_sample])
    # Use a low tolerance: samples numerically differ, as the actual
    # implementation clips log-scales so they always contribute to sampling.
    self.assertAllClose(actual_sample_val, expected_sample_val, atol=1e-2) 
Example #16
Source File: common_layers_test.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def testDiscretizedMixLogisticLoss(self):
    batch = 2
    height = 4
    width = 4
    channels = 3
    num_mixtures = 5
    logits = tf.concat(  # assign all probability mass to first component
        [tf.ones([batch, height, width, 1]) * 1e8,
         tf.zeros([batch, height, width, num_mixtures - 1])],
        axis=-1)
    locs = tf.random_uniform([batch, height, width, num_mixtures * 3],
                             minval=-.9, maxval=.9)
    log_scales = tf.random_uniform([batch, height, width, num_mixtures * 3],
                                   minval=-1., maxval=1.)
    coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))
    pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)

    # Test labels that don't satisfy edge cases where 8-bit value is 0 or 255.
    labels = tf.random_uniform([batch, height, width, channels],
                               minval=-.9, maxval=.9)
    locs_0 = locs[..., :3]
    log_scales_0 = log_scales[..., :3]
    centered_labels = labels - locs_0
    inv_stdv = tf.exp(-log_scales_0)
    plus_in = inv_stdv * (centered_labels + 1. / 255.)
    min_in = inv_stdv * (centered_labels - 1. / 255.)
    cdf_plus = tf.nn.sigmoid(plus_in)
    cdf_min = tf.nn.sigmoid(min_in)
    expected_loss = -tf.reduce_sum(tf.log(cdf_plus - cdf_min), axis=-1)

    actual_loss = common_layers.discretized_mix_logistic_loss(
        pred=pred, labels=labels)
    actual_loss_val, expected_loss_val = self.evaluate(
        [actual_loss, expected_loss])
    self.assertAllClose(actual_loss_val, expected_loss_val, rtol=1e-5) 
Example #17
Source File: common_layers_test.py    From BERT with Apache License 2.0 5 votes vote down vote up
def testSampleFromDiscretizedMixLogistic(self):
    batch = 2
    height = 4
    width = 4
    num_mixtures = 5
    seed = 42
    logits = tf.concat(  # assign all probability mass to first component
        [tf.ones([batch, height, width, 1]) * 1e8,
         tf.zeros([batch, height, width, num_mixtures - 1])],
        axis=-1)
    locs = tf.random_uniform([batch, height, width, num_mixtures * 3],
                             minval=-.9, maxval=.9)
    log_scales = tf.ones([batch, height, width, num_mixtures * 3]) * -1e8
    coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))
    pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)

    locs_0 = locs[..., :3]
    expected_sample = tf.clip_by_value(locs_0, -1., 1.)

    actual_sample = common_layers.sample_from_discretized_mix_logistic(
        pred, seed=seed)
    actual_sample_val, expected_sample_val = self.evaluate(
        [actual_sample, expected_sample])
    # Use a low tolerance: samples numerically differ, as the actual
    # implementation clips log-scales so they always contribute to sampling.
    self.assertAllClose(actual_sample_val, expected_sample_val, atol=1e-2) 
Example #18
Source File: common_layers_test.py    From BERT with Apache License 2.0 5 votes vote down vote up
def testDiscretizedMixLogisticLoss(self):
    batch = 2
    height = 4
    width = 4
    channels = 3
    num_mixtures = 5
    logits = tf.concat(  # assign all probability mass to first component
        [tf.ones([batch, height, width, 1]) * 1e8,
         tf.zeros([batch, height, width, num_mixtures - 1])],
        axis=-1)
    locs = tf.random_uniform([batch, height, width, num_mixtures * 3],
                             minval=-.9, maxval=.9)
    log_scales = tf.random_uniform([batch, height, width, num_mixtures * 3],
                                   minval=-1., maxval=1.)
    coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))
    pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)

    # Test labels that don't satisfy edge cases where 8-bit value is 0 or 255.
    labels = tf.random_uniform([batch, height, width, channels],
                               minval=-.9, maxval=.9)
    locs_0 = locs[..., :3]
    log_scales_0 = log_scales[..., :3]
    centered_labels = labels - locs_0
    inv_stdv = tf.exp(-log_scales_0)
    plus_in = inv_stdv * (centered_labels + 1. / 255.)
    min_in = inv_stdv * (centered_labels - 1. / 255.)
    cdf_plus = tf.nn.sigmoid(plus_in)
    cdf_min = tf.nn.sigmoid(min_in)
    expected_loss = -tf.reduce_sum(tf.log(cdf_plus - cdf_min), axis=-1)

    actual_loss = common_layers.discretized_mix_logistic_loss(
        pred=pred, labels=labels)
    actual_loss_val, expected_loss_val = self.evaluate(
        [actual_loss, expected_loss])
    self.assertAllClose(actual_loss_val, expected_loss_val, rtol=1e-5) 
Example #19
Source File: squashed_normal.py    From rlgraph with Apache License 2.0 5 votes vote down vote up
def _graph_fn_unsquash(self, values):
        """
        Reverse operation as _graph_fn_squash (using argus tanh).

        Args:
            values (DataOp): The values to unsquash.

        Returns:
            The unsquashed values.
        """
        if get_backend() == "tf":
            return tf.atanh((values - self.low) / (self.high - self.low) * 2.0 - 1.0)
        elif get_backend() == "tf":
            return torch.atanh((values - self.low) / (self.high - self.low) * 2.0 - 1.0) 
Example #20
Source File: common_layers_test.py    From fine-lm with MIT License 5 votes vote down vote up
def testSampleFromDiscretizedMixLogistic(self):
    batch = 2
    height = 4
    width = 4
    num_mixtures = 5
    seed = 42
    logits = tf.concat(  # assign all probability mass to first component
        [tf.ones([batch, height, width, 1]) * 1e8,
         tf.zeros([batch, height, width, num_mixtures - 1])],
        axis=-1)
    locs = tf.random_uniform([batch, height, width, num_mixtures * 3],
                             minval=-.9, maxval=.9)
    log_scales = tf.ones([batch, height, width, num_mixtures * 3]) * -1e8
    coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))
    pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)

    locs_0 = locs[..., :3]
    expected_sample = tf.clip_by_value(locs_0, -1., 1.)

    actual_sample = common_layers.sample_from_discretized_mix_logistic(
        pred, seed=seed)
    with self.test_session() as session:
      actual_sample_val, expected_sample_val = session.run(
          [actual_sample, expected_sample])
    # Use a low tolerance: samples numerically differ, as the actual
    # implementation clips log-scales so they always contribute to sampling.
    self.assertAllClose(actual_sample_val, expected_sample_val, atol=1e-2)