Python autograd.numpy.exp() Examples

The following are 30 code examples of autograd.numpy.exp(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd.numpy , or try the search function .
Example #1
Source File: demo_utils.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def simple_admixture_3pop(x=None):
    if x is None:
        x = np.random.normal(size=7)
    t = np.cumsum(np.exp(x[:5]))
    p = 1.0 / (1.0 + np.exp(x[5:]))

    model = momi.DemographicModel(1., .25)
    model.add_leaf("b")
    model.add_leaf("a")
    model.add_leaf("c")
    model.move_lineages("a", "c", t[1], p=1.-p[1])
    model.move_lineages("a", "d", t[0], p=1.-p[0])
    model.move_lineages("c", "d", t[2])
    model.move_lineages("d", "b", t[3])
    model.move_lineages("a", "b", t[4])
    return model 
Example #2
Source File: black_box_svi.py    From autograd with MIT License 6 votes vote down vote up
def black_box_variational_inference(logprob, D, num_samples):
    """Implements http://arxiv.org/abs/1401.0118, and uses the
    local reparameterization trick from http://arxiv.org/abs/1506.02557"""

    def unpack_params(params):
        # Variational dist is a diagonal Gaussian.
        mean, log_std = params[:D], params[D:]
        return mean, log_std

    def gaussian_entropy(log_std):
        return 0.5 * D * (1.0 + np.log(2*np.pi)) + np.sum(log_std)

    rs = npr.RandomState(0)
    def variational_objective(params, t):
        """Provides a stochastic estimate of the variational lower bound."""
        mean, log_std = unpack_params(params)
        samples = rs.randn(num_samples, D) * np.exp(log_std) + mean
        lower_bound = gaussian_entropy(log_std) + np.mean(logprob(samples, t))
        return -lower_bound

    gradient = grad(variational_objective)

    return variational_objective, gradient, unpack_params 
Example #3
Source File: ctp.py    From pymoo with Apache License 2.0 6 votes vote down vote up
def __init__(self, n_var=2, n_constr=2, **kwargs):
        super().__init__(n_var, n_constr, **kwargs)

        a, b = anp.zeros(n_constr + 1), anp.zeros(n_constr + 1)
        a[0], b[0] = 1, 1
        delta = 1 / (n_constr + 1)
        alpha = delta

        for j in range(n_constr):
            beta = a[j] * anp.exp(-b[j] * alpha)
            a[j + 1] = (a[j] + beta) / 2
            b[j + 1] = - 1 / alpha * anp.log(beta / a[j + 1])

            alpha += delta

        self.a = a[1:]
        self.b = b[1:] 
Example #4
Source File: model.py    From tree-regularization-public with MIT License 6 votes vote down vote up
def softplus(x):
    """ Numerically stable transform from real line to positive reals
    Returns np.log(1.0 + np.exp(x))
    Autograd friendly and fully vectorized
    
    @param x: array of values in (-\infty, +\infty)
    @return ans : array of values in (0, +\infty), same size as x
    """
    if not isinstance(x, float):
        mask1 = x > 0
        mask0 = np.logical_not(mask1)
        out = np.zeros_like(x)
        out[mask0] = np.log1p(np.exp(x[mask0]))
        out[mask1] = x[mask1] + np.log1p(np.exp(-x[mask1]))
        return out
    if x > 0:
        return x + np.log1p(np.exp(-x))
    else:
        return np.log1p(np.exp(x)) 
Example #5
Source File: mixture_variational_inference.py    From autograd with MIT License 6 votes vote down vote up
def callback(params, t, g):
        print("Iteration {} lower bound {}".format(t, -objective(params, t)))

        plt.cla()
        target_distribution = lambda x: np.exp(log_density(x, t))
        var_distribution    = lambda x: np.exp(variational_log_density(params, x))
        plot_isocontours(ax, target_distribution)
        plot_isocontours(ax, var_distribution, cmap=plt.cm.bone)
        ax.set_autoscale_on(False)

        rs = npr.RandomState(0)
        samples = variational_sampler(params, num_plotting_samples, rs)
        plt.plot(samples[:, 0], samples[:, 1], 'x')

        plt.draw()
        plt.pause(1.0/30.0) 
Example #6
Source File: poincare_model.py    From hyperbolic_cones with Apache License 2.0 6 votes vote down vote up
def _nll_loss_fn(poincare_dists):
        """
        Parameters
        ----------
        poincare_dists : numpy.array
            All distances d(u,v) and d(u,v'), where v' is negative. Shape (1 + negative_size).

        Returns
        ----------
        log-likelihood loss function from the NIPS paper, Eq (6).
        """
        exp_negative_distances = grad_np.exp(-poincare_dists)

        # Remove the value for the true edge (u,v) from the partition function
        # return -grad_np.log(exp_negative_distances[0] / (- exp_negative_distances[0] + exp_negative_distances.sum()))
        return poincare_dists[0] + grad_np.log(exp_negative_distances[1:].sum()) 
Example #7
Source File: eucl_simple_model.py    From hyperbolic_cones with Apache License 2.0 6 votes vote down vote up
def _compute_loss(self):
        """Compute and store loss value for the given batch of examples."""
        if self._loss_computed:
            return
        self._compute_distances()

        # NLL loss from the NIPS paper.
        exp_negative_distances = np.exp(-self.euclidean_dists)  # (1 + neg_size, batch_size)
        # Remove the value for the true edge (u,v) from the partition function
        Z = exp_negative_distances[1:].sum(axis=0)  # (batch_size)
        self.exp_negative_distances = exp_negative_distances  # (1 + neg_size, batch_size)
        self.Z = Z # (batch_size)

        self.pos_loss = self.euclidean_dists[0].sum()
        self.neg_loss = np.log(self.Z).sum()
        self.loss = self.pos_loss + self.neg_loss  # scalar


        self._loss_computed = True 
Example #8
Source File: beta_geo_beta_binom_fitter.py    From lifetimes with MIT License 5 votes vote down vote up
def _negative_log_likelihood(log_params, frequency, recency, n_periods, weights, penalizer_coef=0):
        params = exp(log_params)
        penalizer_term = penalizer_coef * sum(params ** 2)
        return (
            -(BetaGeoBetaBinomFitter._loglikelihood(params, frequency, recency, n_periods) * weights).sum()
            / weights.sum()
            + penalizer_term
        ) 
Example #9
Source File: convnet.py    From autograd with MIT License 5 votes vote down vote up
def logsumexp(X, axis, keepdims=False):
    max_X = np.max(X)
    return max_X + np.log(np.sum(np.exp(X - max_X), axis=axis, keepdims=keepdims)) 
Example #10
Source File: beta_geo_beta_binom_fitter.py    From lifetimes with MIT License 5 votes vote down vote up
def conditional_probability_alive(self, m_periods_in_future, frequency, recency, n_periods):
        """
        Conditional probability alive.

        Conditional probability customer is alive at transaction opportunity
        n_periods + m_periods_in_future.

        .. math:: P(alive at n_periods + m_periods_in_future|alpha, beta, gamma, delta, frequency, recency, n_periods)

        See (A10) in Fader and Hardie 2010.

        Parameters
        ----------
        m: array_like
            transaction opportunities

        Returns
        -------
        array_like
            alive probabilities

        """
        params = self._unload_params("alpha", "beta", "gamma", "delta")
        alpha, beta, gamma, delta = params

        p1 = betaln(alpha + frequency, beta + n_periods - frequency) - betaln(alpha, beta)
        p2 = betaln(gamma, delta + n_periods + m_periods_in_future) - betaln(gamma, delta)
        p3 = self._loglikelihood(params, frequency, recency, n_periods)

        return exp(p1 + p2) / exp(p3) 
Example #11
Source File: define_gradient.py    From autograd with MIT License 5 votes vote down vote up
def logsumexp(x):
    """Numerically stable log(sum(exp(x))), also defined in scipy.special"""
    max_x = np.max(x)
    return max_x + np.log(np.sum(np.exp(x - max_x)))

# Next, we write a function that specifies the gradient with a closure.
# The reason for the closure is so that the gradient can depend
# on both the input to the original function (x), and the output of the
# original function (ans). 
Example #12
Source File: dot_graph.py    From autograd with MIT License 5 votes vote down vote up
def fun(x):
        y = np.sin(x)
        return (y + np.exp(x) - 0.5) * y 
Example #13
Source File: mixture_variational_inference.py    From autograd with MIT License 5 votes vote down vote up
def diag_gaussian_log_density(x, mu, log_std):
    return np.sum(norm.logpdf(x, mu, np.exp(log_std)), axis=-1) 
Example #14
Source File: mixture_variational_inference.py    From autograd with MIT License 5 votes vote down vote up
def log_density(x, t):
        mu, log_sigma = x[:, 0], x[:, 1]
        sigma_density = norm.logpdf(log_sigma, 0, 1.35)
        mu_density = norm.logpdf(mu, -0.5, np.exp(log_sigma))
        sigma_density2 = norm.logpdf(log_sigma, 0.1, 1.35)
        mu_density2 = norm.logpdf(mu, 0.5, np.exp(log_sigma))
        return np.logaddexp(sigma_density + mu_density,
                            sigma_density2 + mu_density2) 
Example #15
Source File: data.py    From autograd with MIT License 5 votes vote down vote up
def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate,
                  rs=npr.RandomState(0)):
    """Based on code by Ryan P. Adams."""
    rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False)

    features = rs.randn(num_classes*num_per_class, 2) \
        * np.array([radial_std, tangential_std])
    features[:, 0] += 1
    labels = np.repeat(np.arange(num_classes), num_per_class)

    angles = rads[labels] + rate * np.exp(features[:,0])
    rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)])
    rotations = np.reshape(rotations.T, (-1, 2, 2))

    return np.einsum('ti,tij->tj', features, rotations) 
Example #16
Source File: black_box_svi.py    From autograd with MIT License 5 votes vote down vote up
def log_density(x, t):
        mu, log_sigma = x[:, 0], x[:, 1]
        sigma_density = norm.logpdf(log_sigma, 0, 1.35)
        mu_density = norm.logpdf(mu, 0, np.exp(log_sigma))
        return sigma_density + mu_density

    # Build variational objective. 
Example #17
Source File: black_box_svi.py    From autograd with MIT License 5 votes vote down vote up
def callback(params, t, g):
        print("Iteration {} lower bound {}".format(t, -objective(params, t)))

        plt.cla()
        target_distribution = lambda x : np.exp(log_density(x, t))
        plot_isocontours(ax, target_distribution)

        mean, log_std = unpack_params(params)
        variational_contour = lambda x: mvn.pdf(x, mean, np.diag(np.exp(2*log_std)))
        plot_isocontours(ax, variational_contour)
        plt.draw()
        plt.pause(1.0/30.0) 
Example #18
Source File: natural_gradient_black_box_svi.py    From autograd with MIT License 5 votes vote down vote up
def log_density(x, t):
        mu, log_sigma = x[:, :obs_dim], x[:, obs_dim:]
        sigma_density = np.sum(norm.logpdf(log_sigma, 0, 1.35), axis=1)
        mu_density    = np.sum(norm.logpdf(Y, mu, np.exp(log_sigma)), axis=1)
        return sigma_density + mu_density

    # Build variational objective. 
Example #19
Source File: modified_beta_geo_fitter.py    From lifetimes with MIT License 5 votes vote down vote up
def _negative_log_likelihood(log_params, freq, rec, T, weights, penalizer_coef):
        warnings.simplefilter(action="ignore", category=FutureWarning)

        params = np.exp(log_params)
        r, alpha, a, b = params

        A_1 = gammaln(r + freq) - gammaln(r) + r * log(alpha)
        A_2 = gammaln(a + b) + gammaln(b + freq + 1) - gammaln(b) - gammaln(a + b + freq + 1)
        A_3 = -(r + freq) * log(alpha + T)
        A_4 = log(a) - log(b + freq) + (r + freq) * (log(alpha + T) - log(alpha + rec))

        penalizer_term = penalizer_coef * sum(params ** 2)
        return -(weights * (A_1 + A_2 + A_3 + logaddexp(A_4, 0))).sum() / weights.sum() + penalizer_term 
Example #20
Source File: gamma_gamma_fitter.py    From lifetimes with MIT License 5 votes vote down vote up
def _negative_log_likelihood(
        log_params, 
        frequency, 
        avg_monetary_value, 
        weights, 
        penalizer_coef
    ):
        """
        Computes the Negative Log-Likelihood for the Gamma-Gamma Model as in:
        http://www.brucehardie.com/notes/025/

        This also applies a penalizer to the log-likelihood.

        Equivalent to equation (1a).

        Hardie's implementation of this method can be seen on page 8.
        """

        warnings.simplefilter(action="ignore", category=FutureWarning)

        params = np.exp(log_params)
        p, q, v = params

        x = frequency
        m = avg_monetary_value

        negative_log_likelihood_values = (
            gammaln(p * x + q)
            - gammaln(p * x)
            - gammaln(q)
            + q * np.log(v)
            + (p * x - 1) * np.log(m)
            + (p * x) * np.log(x)
            - (p * x + q) * np.log(x * m + v)
        ) * weights
        penalizer_term = penalizer_coef * sum(params ** 2)

        return -negative_log_likelihood_values.sum() / weights.sum() + penalizer_term 
Example #21
Source File: beta_geo_fitter.py    From lifetimes with MIT License 5 votes vote down vote up
def _negative_log_likelihood(
        log_params, 
        freq, 
        rec, 
        T, 
        weights, 
        penalizer_coef
    ):
        """
        The following method for calculatating the *log-likelihood* uses the method
        specified in section 7 of [2]_. More information can also be found in [3]_.

        References
        ----------
        .. [2] Fader, Peter S., Bruce G.S. Hardie, and Ka Lok Lee (2005a),
        "Counting Your Customers the Easy Way: An Alternative to the
        Pareto/NBD Model," Marketing Science, 24 (2), 275-84.
        .. [3] http://brucehardie.com/notes/004/
        """

        warnings.simplefilter(action="ignore", category=FutureWarning)

        params = np.exp(log_params)
        r, alpha, a, b = params

        A_1 = gammaln(r + freq) - gammaln(r) + r * np.log(alpha)
        A_2 = gammaln(a + b) + gammaln(b + freq) - gammaln(b) - gammaln(a + b + freq)
        A_3 = -(r + freq) * np.log(alpha + T)
        A_4 = np.log(a) - np.log(b + np.maximum(freq, 1) - 1) - (r + freq) * np.log(rec + alpha)

        penalizer_term = penalizer_coef * sum(params ** 2)
        ll = weights * (A_1 + A_2 + np.log(np.exp(A_3) + np.exp(A_4) * (freq > 0)))

        return -ll.sum() / weights.sum() + penalizer_term 
Example #22
Source File: poincare_model.py    From hyperbolic_cones with Apache License 2.0 5 votes vote down vote up
def _compute_loss(self):
        """Compute and store loss value for the given batch of examples."""
        if self._loss_computed:
            return
        self._compute_distances()

        if self.loss_type == 'nll':
            # NLL loss from the NIPS paper.
            exp_negative_distances = np.exp(-self.poincare_dists)  # (1 + neg_size, batch_size)
            # Remove the value for the true edge (u,v) from the partition function
            Z = exp_negative_distances[1:].sum(axis=0)  # (batch_size)
            self.exp_negative_distances = exp_negative_distances  # (1 + neg_size, batch_size)
            self.Z = Z # (batch_size)

            self.pos_loss = self.poincare_dists[0].sum()
            self.neg_loss = np.log(self.Z).sum()
            self.loss = self.pos_loss + self.neg_loss  # scalar

        elif self.loss_type == 'neg':
            # NEG loss function:
            # - log sigma((r - d(u,v)) / t) - \sum_{v' \in N(u)} log sigma((d(u,v') - r) / t)
            positive_term = np.log(1.0 + np.exp((- self.neg_r + self.poincare_dists[0]) / self.neg_t))  # (batch_size)
            negative_terms = self.neg_mu * \
                             np.log(1.0 + np.exp((self.neg_r - self.poincare_dists[1:]) / self.neg_t)) # (1 + neg_size, batch_size)

            self.pos_loss = positive_term.sum()
            self.neg_loss = negative_terms.sum()
            self.loss = self.pos_loss + self.neg_loss  # scalar

        elif self.loss_type == 'maxmargin':
            # max - margin loss function: \sum_{v' \in N(u)} max(0, \gamma + d(u,v) - d(u,v'))
            self.loss = np.maximum(0, self.maxmargin_margin + self.poincare_dists[0] - self.poincare_dists[1:]).sum() # scalar
            self.pos_loss = self.loss
            self.neg_loss = self.loss

        else:
            raise ValueError('Unknown loss type : ' + self.loss_type)

        self._loss_computed = True 
Example #23
Source File: poincare_model.py    From hyperbolic_cones with Apache License 2.0 5 votes vote down vote up
def _neg_loss_fn(poincare_dists, neg_r, neg_t, neg_mu):
        # NEG loss function:
        # loss = - log sigma((r - d(u,v)) / t) - \sum_{v' \in N(u)} log sigma((d(u,v') - r) / t)
        positive_term = grad_np.log(1.0 + grad_np.exp((- neg_r + poincare_dists[0]) / neg_t))
        negative_terms = grad_np.log(1.0 + grad_np.exp((neg_r - poincare_dists[1:]) / neg_t))
        return positive_term + neg_mu * negative_terms.sum() 
Example #24
Source File: eucl_simple_model.py    From hyperbolic_cones with Apache License 2.0 5 votes vote down vote up
def _nll_loss_fn(euclidean_dists):
        """
        Parameters
        ----------
        poincare_dists : numpy.array
            All distances d(u,v) and d(u,v'), where v' is negative. Shape (1 + negative_size).

        Returns
        ----------
        log-likelihood loss function from the NIPS paper, Eq (6).
        """
        exp_negative_distances = grad_np.exp(-euclidean_dists)

        # Remove the value for the true edge (u,v) from the partition function
        return euclidean_dists[0] + grad_np.log(exp_negative_distances[1:].sum()) 
Example #25
Source File: lgss_example.py    From variational-smc with MIT License 5 votes vote down vote up
def sim_prop(self, t, Xp, y, prop_params, model_params, rs = npr.RandomState(0)):
        mu0, Sigma0, A, Q, C, R = model_params
        mut, lint, log_s2t = prop_params[t]
        s2t = np.exp(log_s2t)
        
        if t > 0:
            mu = mut + np.dot(A, Xp.T).T*lint
        else:
            mu = mut + lint*mu0
        return mu + rs.randn(*Xp.shape)*np.sqrt(s2t) 
Example #26
Source File: lgss_example.py    From variational-smc with MIT License 5 votes vote down vote up
def log_prop(self, t, Xc, Xp, y, prop_params, model_params):
        mu0, Sigma0, A, Q, C, R = model_params
        mut, lint, log_s2t = prop_params[t]
        s2t = np.exp(log_s2t)
        
        if t > 0:
            mu = mut + np.dot(A, Xp.T).T*lint
        else:
            mu = mut + lint*mu0
        
        return self.log_normal(Xc, mu, np.diag(s2t)) 
Example #27
Source File: test_einsum2.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def test_grad():
    p = .05
    def fun0(B, Bdims):
        return einsum2.einsum2(np.exp(B**2), Bdims, np.transpose(B), Bdims[::-1], [])
    def fun1(B, Bdims):
        if Bdims: Bdims = list(range(len(Bdims)))
        return np.einsum(np.exp(B**2), Bdims,
                         np.transpose(B), Bdims[::-1], [])
    grad0 = autograd.grad(fun0)
    grad1 = autograd.grad(fun1)
    B, Bdims = random_tensor(p)
    assert np.allclose(grad0(B, Bdims), grad1(B, Bdims)) 
Example #28
Source File: test_subsample.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def check_subsampling(demo, sampled_n_dict, add_n, folded=False):
    config_list = momi.data.configurations.build_full_config_list(*zip(
        *sampled_n_dict.items()))
    if folded:
        config_list = momi.site_freq_spectrum(
            config_list.sampled_pops, [config_list]).fold().configs

    sfs1 = demo.expected_sfs(config_list, normalized=True, folded=folded)
    #sfs1 = expected_sfs(demo._get_demo(sampled_n_dict), config_list,
    #                    normalized=True, folded=folded, **kwargs)

    configs2 = config_list._copy(sampled_n=config_list.sampled_n + add_n)
    sfs2 = demo.expected_sfs(configs2, normalized=True, folded=folded)
    #sfs2 = expected_sfs(demo._get_demo(dict(zip(configs2.sampled_pops, configs2.sampled_n))),
    #                    configs2,
    #                    normalized=True, folded=folded, **kwargs)

    sfs1 = np.array(list(sfs1.values()))
    sfs2 = np.array(list(sfs2.values()))
    assert np.allclose(sfs1, sfs2)

    ## check sums to 1 even with error matrix
    #error_matrices = [np.exp(np.random.randn(n + 1, n + 1))
    #                  for n in configs2.sampled_n]
    #error_matrices = [
    #    np.einsum('ij,j->ij', x, 1. / np.sum(x, axis=0)) for x in error_matrices]

    #sfs3 = demo.expected_sfs(configs2, normalized=True, folded=folded,
    #                         error_matrices=error_matrices)
    #assert np.isclose(sum(sfs3.values()), 1.0) 
Example #29
Source File: ackley.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def _evaluate(self, x, out, *args, **kwargs):
        part1 = -1. * self.a * anp.exp(-1. * self.b * anp.sqrt((1. / self.n_var) * anp.sum(x * x, axis=1)))
        part2 = -1. * anp.exp((1. / self.n_var) * anp.sum(anp.cos(self.c * x), axis=1))
        out["F"] = part1 + part2 + self.a + anp.exp(1) 
Example #30
Source File: demo_utils.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def simple_two_pop_demo(x=np.random.normal(size=4)):
    x = [np.exp(xi) for xi in x]
    model = momi.DemographicModel(1., .25)
    model.add_leaf(1)
    model.add_leaf(0)
    model.set_size(1, t=0.0, N=x[1])
    model.set_size(0, t=0.0, N=x[2])
    model.move_lineages(0, 1, t=x[0])
    model.set_size(1, t=x[0], N=x[3])
    return model