Python tensorflow.cosh() Examples

The following are 10 code examples of tensorflow.cosh(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: rnn_controller.py    From Searching-for-activation-functions with MIT License 6 votes vote down vote up
def __init__(self, config):
        self.config = config
        self.n_steps = 10
        self.n_input, self.n_hidden =  4, 2
        self.state = tf.Variable(tf.random_normal(shape=[1, 4]))
        self.lstm = tf.contrib.rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=False)
        self.Wc, self.bc = self.init_controller_vars()
        self.Wv, self.bv = self.init_value_vars()

        # Other functions used in the paper
        # self.full_list_unary = {1:lambda x:x ,2:lambda x: -x, 3: tf.abs, 4:lambda x : tf.pow(x,2),5:lambda x : tf.pow(x,3),
        #   6:tf.sqrt,7:lambda x: tf.Variable(tf.truncated_normal([1], stddev=0.08))*x,
        #   8:lambda x : x + tf.Variable(tf.truncated_normal([1], stddev=0.08)),9:lambda x: tf.log(tf.abs(x)+10e-8),
        #   10:tf.exp,11:tf.sin,12:tf.sinh,13:tf.cosh,14:tf.tanh,15:tf.asinh,16:tf.atan,17:lambda x: tf.sin(x)/x,
        #   18:lambda x : tf.maximum(x,0),19:lambda x : tf.minimum(x,0),20:tf.sigmoid,21:lambda x:tf.log(1+tf.exp(x)),
        #   22:lambda x:tf.exp(-tf.pow(x,2)),23:tf.erf,24:lambda x: tf.Variable(tf.truncated_normal([1], stddev=0.08))}
        #
        # self.full_list_binary = {1:lambda x,y: x+y,2:lambda x,y:x*y,3:lambda x,y:x-y,4:lambda x,y:x/(y+10e-8),
        # 5:lambda x,y:tf.maximum(x,y),6:lambda x,y: tf.sigmoid(x)*y,7:lambda x,y:tf.exp(-tf.Variable(tf.truncated_normal([1], stddev=0.08))*tf.pow(x-y,2)),
        # 8:lambda x,y:tf.exp(-tf.Variable(tf.truncated_normal([1], stddev=0.08))*tf.abs(x-y)),
        # 9:lambda x,y: tf.Variable(tf.truncated_normal([1], stddev=0.08))*x + (1-tf.Variable(tf.truncated_normal([1], stddev=0.08)))*y}
        #
        # self.unary = {1:lambda x:x ,2:lambda x: -x, 3: lambda x: tf.maximum(x,0), 4:lambda x : tf.pow(x,2),5:tf.tanh}
        # binary = {1:lambda x,y: x+y,2:lambda x,y:x*y,3:lambda x,y:x-y,4:lambda x,y:tf.maximum(x,y),5:lambda x,y: tf.sigmoid(x)*y}
        # inputs = {1:lambda x:x , 2:lambda x:0, 3: lambda x:3.14159265,4: lambda x : 1, 5: lambda x: 1.61803399} 
Example #2
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def test_forward_unary():
    def _test_forward_unary(op, a_min=1, a_max=5, dtype=np.float32):
        """test unary operators"""
        np_data = np.random.uniform(a_min, a_max, size=(2, 3, 5)).astype(dtype)
        tf.reset_default_graph()
        with tf.Graph().as_default():
            in_data = tf.placeholder(dtype, (2, 3, 5), name="in_data")
            out = op(in_data)
            compare_tf_with_tvm([np_data], ['in_data:0'], out.name)

    _test_forward_unary(tf.acos, -1, 1)
    _test_forward_unary(tf.asin, -1, 1)
    _test_forward_unary(tf.atanh, -1, 1)
    _test_forward_unary(tf.sinh)
    _test_forward_unary(tf.cosh)
    _test_forward_unary(tf.acosh)
    _test_forward_unary(tf.asinh)
    _test_forward_unary(tf.atan)
    _test_forward_unary(tf.sin)
    _test_forward_unary(tf.cos)
    _test_forward_unary(tf.tan)
    _test_forward_unary(tf.tanh)
    _test_forward_unary(tf.erf)
    _test_forward_unary(tf.log)
    _test_forward_unary(tf.log1p) 
Example #3
Source File: functions.py    From tangent with Apache License 2.0 5 votes vote down vote up
def numpy_cosh(a):
  return np.cosh(a) 
Example #4
Source File: functions.py    From tangent with Apache License 2.0 5 votes vote down vote up
def tfe_cosh(t):
  return tf.cosh(t) 
Example #5
Source File: tf_extensions.py    From tangent with Apache License 2.0 5 votes vote down vote up
def dtfsinh(y, x):
  d[x] = d[y] * tf.cosh(x) 
Example #6
Source File: tf_extensions.py    From tangent with Apache License 2.0 5 votes vote down vote up
def ttftanh(y, x):
  cx = tf.cosh(x)
  d[y] = d[x] / (cx * cx) 
Example #7
Source File: tf_extensions.py    From tangent with Apache License 2.0 5 votes vote down vote up
def ttfsinh(y, x):
  d[y] = d[x] * tf.cosh(x) 
Example #8
Source File: quantizers.py    From larq with Apache License 2.0 5 votes vote down vote up
def swish_sign(x: tf.Tensor, beta: float = 5.0) -> tf.Tensor:
    @tf.custom_gradient
    def _call(x):
        def grad(dy):
            b_x = beta * x
            return dy * beta * (2 - b_x * tf.tanh(b_x * 0.5)) / (1 + tf.cosh(b_x))

        return math.sign(x), grad

    return _call(x) 
Example #9
Source File: ops.py    From strawberryfields with Apache License 2.0 5 votes vote down vote up
def squeezed_vacuum_vector(r, theta, cutoff, batched=False, eps=1e-32):
    """returns the ket representing a single mode squeezed vacuum state"""
    if batched:
        batch_size = r.shape[0]
    r = tf.cast(r, def_type)
    theta = tf.cast(theta, def_type)
    c1 = tf.cast(
        tf.stack(
            [
                tf.sqrt(1 / tf.cosh(r)) * np.sqrt(factorial(k)) / factorial(k / 2.0)
                for k in range(0, cutoff, 2)
            ],
            axis=-1,
        ),
        def_type,
    )
    c2 = tf.stack(
        [
            (-0.5 * tf.exp(1j * theta) * tf.cast(tf.tanh(r + eps), def_type)) ** (k / 2.0)
            for k in range(0, cutoff, 2)
        ],
        axis=-1,
    )
    even_coeffs = c1 * c2
    ind = [(k,) for k in np.arange(0, cutoff, 2)]
    shape = [cutoff]
    if batched:
        ind = batchify_indices(ind, batch_size)
        shape = [batch_size] + shape
    output = tf.scatter_nd(ind, tf.reshape(even_coeffs, [-1]), shape)
    return output 
Example #10
Source File: ops.py    From strawberryfields with Apache License 2.0 5 votes vote down vote up
def displaced_squeezed(r_d, phi_d, r_s, phi_s, cutoff, pure=True, batched=False, eps=1e-12):
    """creates a single mode input displaced squeezed state"""
    alpha = tf.cast(r_d, def_type) * tf.exp(1j * tf.cast(phi_d, def_type))
    r_s = (
        tf.cast(r_s, def_type) + eps
    )  # to prevent nans if r==0, we add an epsilon (default is miniscule)
    phi_s = tf.cast(phi_s, def_type)

    phase = tf.exp(1j * phi_s)
    sinh = tf.sinh(r_s)
    cosh = tf.cosh(r_s)
    tanh = tf.tanh(r_s)

    # create Hermite polynomials
    gamma = alpha * cosh + tf.math.conj(alpha) * phase * sinh
    hermite_arg = gamma / tf.sqrt(phase * tf.sinh(2 * r_s))

    prefactor = tf.expand_dims(
        tf.exp(-0.5 * alpha * tf.math.conj(alpha) - 0.5 * tf.math.conj(alpha) ** 2 * phase * tanh),
        -1,
    )
    coeff = tf.stack(
        [
            _numer_safe_power(0.5 * phase * tanh, n / 2.0) / tf.sqrt(factorial(n) * cosh)
            for n in range(cutoff)
        ],
        axis=-1,
    )
    hermite_terms = tf.stack([tf.cast(H(n, hermite_arg), def_type) for n in range(cutoff)], axis=-1)
    squeezed_coh = prefactor * coeff * hermite_terms

    if not pure:
        squeezed_coh = mixed(squeezed_coh, batched)
    return squeezed_coh