Python autograd.numpy.tanh() Examples

The following are 30 code examples of autograd.numpy.tanh(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd.numpy , or try the search function .
Example #1
Source File: rnn.py    From MLAlgorithms with MIT License 6 votes vote down vote up
def forward_pass(self, X):
        self.last_input = X
        n_samples, n_timesteps, input_shape = X.shape
        states = np.zeros((n_samples, n_timesteps + 1, self.hidden_dim))
        states[:, -1, :] = self.hprev.copy()
        p = self._params

        for i in range(n_timesteps):
            states[:, i, :] = np.tanh(np.dot(X[:, i, :], p["W"]) + np.dot(states[:, i - 1, :], p["U"]) + p["b"])

        self.states = states
        self.hprev = states[:, n_timesteps - 1, :].copy()
        if self.return_sequences:
            return states[:, 0:-1, :]
        else:
            return states[:, -2, :] 
Example #2
Source File: rnn.py    From autograd with MIT License 6 votes vote down vote up
def rnn_predict(params, inputs):
    def update_rnn(input, hiddens):
        return np.tanh(concat_and_multiply(params['change'], input, hiddens))

    def hiddens_to_output_probs(hiddens):
        output = concat_and_multiply(params['predict'], hiddens)
        return output - logsumexp(output, axis=1, keepdims=True)     # Normalize log-probs.

    num_sequences = inputs.shape[1]
    hiddens = np.repeat(params['init hiddens'], num_sequences, axis=0)
    output = [hiddens_to_output_probs(hiddens)]

    for input in inputs:  # Iterate over time steps.
        hiddens = update_rnn(input, hiddens)
        output.append(hiddens_to_output_probs(hiddens))
    return output 
Example #3
Source File: lstm.py    From autograd with MIT License 6 votes vote down vote up
def lstm_predict(params, inputs):
    def update_lstm(input, hiddens, cells):
        change  = np.tanh(concat_and_multiply(params['change'], input, hiddens))
        forget  = sigmoid(concat_and_multiply(params['forget'], input, hiddens))
        ingate  = sigmoid(concat_and_multiply(params['ingate'], input, hiddens))
        outgate = sigmoid(concat_and_multiply(params['outgate'], input, hiddens))
        cells   = cells * forget + ingate * change
        hiddens = outgate * np.tanh(cells)
        return hiddens, cells

    def hiddens_to_output_probs(hiddens):
        output = concat_and_multiply(params['predict'], hiddens)
        return output - logsumexp(output, axis=1, keepdims=True) # Normalize log-probs.

    num_sequences = inputs.shape[1]
    hiddens = np.repeat(params['init hiddens'], num_sequences, axis=0)
    cells   = np.repeat(params['init cells'],   num_sequences, axis=0)

    output = [hiddens_to_output_probs(hiddens)]
    for input in inputs:  # Iterate over time steps.
        hiddens, cells = update_lstm(input, hiddens, cells)
        output.append(hiddens_to_output_probs(hiddens))
    return output 
Example #4
Source File: test_wrappers.py    From autograd with MIT License 6 votes vote down vote up
def test_grad_and_aux():
    A = npr.randn(5, 4)
    x = npr.randn(4)

    f = lambda x: (np.sum(np.dot(A, x)), x**2)
    g = lambda x: np.sum(np.dot(A, x))

    assert len(grad_and_aux(f)(x)) == 2

    check_equivalent(grad_and_aux(f)(x)[0], grad(g)(x))
    check_equivalent(grad_and_aux(f)(x)[1], x**2)

## No longer support this behavior
# def test_make_ggnvp_broadcasting():
#   A = npr.randn(4, 5)
#   x = npr.randn(10, 4)
#   v = npr.randn(10, 4)

#   fun = lambda x: np.tanh(np.dot(x, A))
#   res1 = np.stack([_make_explicit_ggnvp(fun)(xi)(vi) for xi, vi in zip(x, v)])
#   res2 = make_ggnvp(fun)(x)(v)
#   check_equivalent(res1, res2) 
Example #5
Source File: bench_rnn.py    From autograd with MIT License 6 votes vote down vote up
def setup(self):
        self.batch_size = 16
        self.dtype = "float32"
        self.D = 2**10
        self.x = 0.01 * np.random.randn(self.batch_size,self.D).astype(self.dtype)
        self.W1 = 0.01 * np.random.randn(self.D,self.D).astype(self.dtype)
        self.b1 = 0.01 * np.random.randn(self.D).astype(self.dtype)
        self.Wout = 0.01 * np.random.randn(self.D,1).astype(self.dtype)
        self.bout = 0.01 * np.random.randn(1).astype(self.dtype)
        self.l = (np.random.rand(self.batch_size,1) > 0.5).astype(self.dtype)
        self.n = 50

        def autograd_rnn(params, x, label, n):
            W, b, Wout, bout = params
            h1 = x
            for i in range(n):
                h1 = np.tanh(np.dot(h1, W) + b)
            logit = np.dot(h1, Wout) + bout
            loss = -np.sum(label * logit - (
                    logit + np.log(1 + np.exp(-logit))))
            return loss

        self.fn = autograd_rnn
        self.grad_fn = grad(self.fn) 
Example #6
Source File: Utilities.py    From DeepLearningTutorial with MIT License 5 votes vote down vote up
def activation(x):
    return np.tanh(x) 
Example #7
Source File: optimize_mode_converter.py    From ceviche with MIT License 5 votes vote down vote up
def operator_proj(rho, eta=0.5, beta=100):
    """Density projection
    """
    return npa.divide(npa.tanh(beta * eta) + npa.tanh(beta * (rho - eta)), 
                        npa.tanh(beta * eta) + npa.tanh(beta * (1 - eta))) 
Example #8
Source File: optimize_1_3.py    From ceviche with MIT License 5 votes vote down vote up
def operator_proj(rho, eta=0.5, beta=100):
    """Density projection
    """
    return npa.divide(npa.tanh(beta * eta) + npa.tanh(beta * (rho - eta)), npa.tanh(beta * eta) + npa.tanh(beta * (1 - eta))) 
Example #9
Source File: utils.py    From angler with MIT License 5 votes vote down vote up
def rho_bar(rho, eta=0.5, beta=100):
    num = npa.tanh(beta*eta) + npa.tanh(beta*(rho - eta))
    den = npa.tanh(beta*eta) + npa.tanh(beta*(1 - eta))
    return num / den 
Example #10
Source File: Utilities.py    From DeepLearningTutorial with MIT License 5 votes vote down vote up
def activation(x):
    return np.tanh(x) 
Example #11
Source File: Utilities.py    From DeepLearningTutorial with MIT License 5 votes vote down vote up
def activation(x):
    return np.tanh(x) 
Example #12
Source File: Utilities.py    From DeepLearningTutorial with MIT License 5 votes vote down vote up
def sigmoid(x):
    return 0.5*(np.tanh(x) + 1.0) 
Example #13
Source File: linear_models.py    From MLAlgorithms with MIT License 5 votes vote down vote up
def sigmoid(x):
        return 0.5 * (np.tanh(0.5 * x) + 1) 
Example #14
Source File: Utilities.py    From DeepLearningTutorial with MIT License 5 votes vote down vote up
def activation(x):
    return np.tanh(x) 
Example #15
Source File: activations.py    From MLAlgorithms with MIT License 5 votes vote down vote up
def tanh(z):
    return np.tanh(z) 
Example #16
Source File: rnn.py    From MLAlgorithms with MIT License 5 votes vote down vote up
def __init__(self, hidden_dim, activation="tanh", inner_init="orthogonal", parameters=None, return_sequences=True):
        self.return_sequences = return_sequences
        self.hidden_dim = hidden_dim
        self.inner_init = get_initializer(inner_init)
        self.activation = get_activation(activation)
        self.activation_d = elementwise_grad(self.activation)
        if parameters is None:
            self._params = Parameters()
        else:
            self._params = parameters
        self.last_input = None
        self.states = None
        self.hprev = None
        self.input_dim = None 
Example #17
Source File: model.py    From tree-regularization-public with MIT License 5 votes vote down vote up
def build_mlp(layer_sizes, activation=np.tanh, output_activation=lambda x: x):
    """Constructor for multilayer perceptron.

    @param layer_sizes: list of integers
                        list of layer sizes in the perceptron.
    @param activation: function (default: np.tanh)
                       what activation to use after first N - 1 layers.
    @param output_activation: function (default: linear)
                              what activation to use after last layer.
    @return predict: function
                     used to predict y_hat
    @return log_likelihood: function
                            used to compute log likelihood
    @return parser: WeightsParser object
                    object to organize weights
    """
    parser = WeightsParser()
    for i, shape in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
        parser.add_shape(('weights', i), shape)
        parser.add_shape(('biases', i), (1, shape[1]))

    def predict(weights, X):
        cur_X = copy(X.T)
        for layer in range(len(layer_sizes) - 1):
            cur_W = parser.get(weights, ('weights', layer))
            cur_B = parser.get(weights, ('biases', layer))
            cur_Z = np.dot(cur_X, cur_W) + cur_B
            cur_X = activation(cur_Z)
        return output_activation(cur_Z.T)

    def log_likelihood(weights, X, y):
        y_hat = predict(weights, X)
        return mse(y.T, y_hat.T)

    return predict, log_likelihood, parser 
Example #18
Source File: test_matrices.py    From mici with MIT License 5 votes vote down vote up
def __init__(self):
        matrix_pairs, grad_log_abs_dets, grad_quadratic_form_invs = {}, {}, {}
        rng = np.random.RandomState(SEED)
        for sz in SIZES:
            for softabs_coeff in [0.5, 1., 1.5]:
                sym_array = rng.standard_normal((sz, sz))
                sym_array = sym_array + sym_array.T
                unreg_eigval, eigvec = np.linalg.eigh(sym_array)
                eigval = unreg_eigval / np.tanh(unreg_eigval * softabs_coeff)
                matrix_pairs[(sz, softabs_coeff)] = (
                    matrices.SoftAbsRegularizedPositiveDefiniteMatrix(
                        sym_array, softabs_coeff
                    ), (eigvec * eigval) @ eigvec.T)

        if AUTOGRAD_AVAILABLE:

            def get_param(matrix):
                eigvec = matrix.eigvec.array
                return (eigvec * matrix.unreg_eigval) @ eigvec.T

            def param_func(param, matrix):
                softabs_coeff = matrix._softabs_coeff
                sym_array = (param + param.T) / 2
                unreg_eigval, eigvec = anp.linalg.eigh(sym_array)
                eigval = unreg_eigval / anp.tanh(unreg_eigval * softabs_coeff)
                return (eigvec * eigval) @ eigvec.T

        else:
            param_func, get_param = None, None

        super().__init__(matrix_pairs, get_param, param_func, rng) 
Example #19
Source File: test_systematic.py    From autograd with MIT License 5 votes vote down vote up
def test_tanh():    unary_ufunc_check(np.tanh) 
Example #20
Source File: test_wrappers.py    From autograd with MIT License 5 votes vote down vote up
def test_make_ggnvp_nondefault_g():
    A = npr.randn(5, 4)
    x = npr.randn(4)
    v = npr.randn(4)

    g = lambda y: np.sum(2.*y**2 + y**4)

    fun = lambda x: np.dot(A, x)
    check_equivalent(make_ggnvp(fun, g)(x)(v), _make_explicit_ggnvp(fun, g)(x)(v))

    fun2 = lambda x: np.tanh(np.dot(A, x))
    check_equivalent(make_ggnvp(fun2, g)(x)(v), _make_explicit_ggnvp(fun2, g)(x)(v)) 
Example #21
Source File: test_wrappers.py    From autograd with MIT License 5 votes vote down vote up
def test_make_ggnvp():
    A = npr.randn(5, 4)
    x = npr.randn(4)
    v = npr.randn(4)

    fun = lambda x: np.dot(A, x)
    check_equivalent(make_ggnvp(fun)(x)(v), _make_explicit_ggnvp(fun)(x)(v))

    fun2 = lambda x: np.tanh(np.dot(A, x))
    check_equivalent(make_ggnvp(fun2)(x)(v), _make_explicit_ggnvp(fun2)(x)(v)) 
Example #22
Source File: test_wrappers.py    From autograd with MIT License 5 votes vote down vote up
def test_make_jvp():
    A = npr.randn(3, 5)
    x = npr.randn(5)
    v = npr.randn(5)
    fun = lambda x: np.tanh(np.dot(A, x))

    jvp_explicit = lambda x: lambda v: np.dot(jacobian(fun)(x), v)
    jvp = make_jvp(fun)

    check_equivalent(jvp_explicit(x)(v), jvp(x)(v)[1]) 
Example #23
Source File: bayesian_neural_net.py    From autograd with MIT License 5 votes vote down vote up
def make_nn_funs(layer_sizes, L2_reg, noise_variance, nonlinearity=np.tanh):
    """These functions implement a standard multi-layer perceptron,
    vectorized over both training examples and weight samples."""
    shapes = list(zip(layer_sizes[:-1], layer_sizes[1:]))
    num_weights = sum((m+1)*n for m, n in shapes)

    def unpack_layers(weights):
        num_weight_sets = len(weights)
        for m, n in shapes:
            yield weights[:, :m*n]     .reshape((num_weight_sets, m, n)),\
                  weights[:, m*n:m*n+n].reshape((num_weight_sets, 1, n))
            weights = weights[:, (m+1)*n:]

    def predictions(weights, inputs):
        """weights is shape (num_weight_samples x num_weights)
           inputs  is shape (num_datapoints x D)"""
        inputs = np.expand_dims(inputs, 0)
        for W, b in unpack_layers(weights):
            outputs = np.einsum('mnd,mdo->mno', inputs, W) + b
            inputs = nonlinearity(outputs)
        return outputs

    def logprob(weights, inputs, targets):
        log_prior = -L2_reg * np.sum(weights**2, axis=1)
        preds = predictions(weights, inputs)
        log_lik = -np.sum((preds - targets)**2, axis=1)[:, 0] / noise_variance
        return log_prior + log_lik

    return num_weights, predictions, logprob 
Example #24
Source File: neural_net_regression.py    From autograd with MIT License 5 votes vote down vote up
def nn_predict(params, inputs, nonlinearity=np.tanh):
    for W, b in params:
        outputs = np.dot(inputs, W) + b
        inputs = nonlinearity(outputs)
    return outputs 
Example #25
Source File: generative_adversarial_net.py    From autograd with MIT License 5 votes vote down vote up
def sigmoid(x):    return 0.5 * (np.tanh(x) + 1.0) 
Example #26
Source File: neural_net.py    From autograd with MIT License 5 votes vote down vote up
def neural_net_predict(params, inputs):
    """Implements a deep neural network for classification.
       params is a list of (weights, bias) tuples.
       inputs is an (N x D) matrix.
       returns normalized class log-probabilities."""
    for W, b in params:
        outputs = np.dot(inputs, W) + b
        inputs = np.tanh(outputs)
    return outputs - logsumexp(outputs, axis=1, keepdims=True) 
Example #27
Source File: wing.py    From autograd with MIT License 5 votes vote down vote up
def sigmoid(x):
    return 0.5*(np.tanh(x) + 1.0)   # Output ranges from 0 to 1. 
Example #28
Source File: convnet.py    From autograd with MIT License 5 votes vote down vote up
def nonlinearity(self, x):
        return np.tanh(x) 
Example #29
Source File: rnn.py    From autograd with MIT License 5 votes vote down vote up
def sigmoid(x):
    return 0.5*(np.tanh(x) + 1.0)   # Output ranges from 0 to 1. 
Example #30
Source File: model.py    From tree-regularization-public with MIT License 5 votes vote down vote up
def sigmoid(x):
    return 0.5 * (np.tanh(x) + 1)