Python autograd.numpy.pi() Examples

The following are 30 code examples of autograd.numpy.pi(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd.numpy , or try the search function .
Example #1
Source File: tm.py    From autohmm with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _ll(self, m, p, xn, **kwargs):
        """Computation of log likelihood

        Dimensions
        ----------
        m : n_unique x n_features
        p : n_unique x n_features x n_features
        xn: N x n_features
        """

        samples = xn.shape[0]
        xn = xn.reshape(samples, 1, self.n_features)
        m = m.reshape(1, self.n_unique, self.n_features)

        det = np.linalg.det(np.linalg.inv(p))
        det = det.reshape(1, self.n_unique)
        tem = np.einsum('NUF,UFX,NUX->NU', (xn - m), p, (xn - m))
        res = (-self.n_features/2.0)*np.log(2*np.pi) - 0.5*tem - 0.5*np.log(det)

        return res  # N x n_unique 
Example #2
Source File: observation.py    From scarlet with MIT License 6 votes vote down vote up
def log_norm(self):
        try:
            return self._log_norm
        except AttributeError:
            if self.frame != self.model_frame:
                images_ = self.images[self.slices_for_images]
                weights_ = self.weights[self.slices_for_images]
            else:
                images_ = self.images
                weights_ = self.weights

            # normalization of the single-pixel likelihood:
            # 1 / [(2pi)^1/2 (sigma^2)^1/2]
            # with inverse variance weights: sigma^2 = 1/weight
            # full likelihood is sum over all data samples: pixel in images
            # NOTE: this assumes that all pixels are used in likelihood!
            log_sigma = np.zeros(weights_.shape, dtype=self.weights.dtype)
            cuts = weights_ > 0
            log_sigma[cuts] = np.log(1 / weights_[cuts])
            self._log_norm = (
                    np.prod(images_.shape) / 2 * np.log(2 * np.pi)
                    + np.sum(log_sigma) / 2
            )
        return self._log_norm 
Example #3
Source File: observation.py    From scarlet with MIT License 6 votes vote down vote up
def get_loss(self, model):
        """Computes the loss/fidelity of a given model wrt to the observation
        Parameters
        ----------
        model: array
            A model from `Blend`
        Returns
        -------
        loss: float
            Loss of the model
        """
        model_ = self.render(model)
        images_ = self.images
        weights_ = self.weights

        # properly normalized likelihood
        log_sigma = np.zeros(weights_.shape, dtype=weights_.dtype)
        cuts = weights_ > 0
        log_sigma[cuts] = np.log(1 / weights_[cuts])
        log_norm = (
                np.prod(images_.shape) / 2 * np.log(2 * np.pi)
                + np.sum(log_sigma) / 2
        )

        return log_norm + 0.5 * np.sum(weights_ * (model_ - images_) ** 2) 
Example #4
Source File: VariationalAutoencoders.py    From DeepLearningTutorial with MIT License 6 votes vote down vote up
def likelihood(self, hyp):
        Y = self.Y_batch     
            
        # Encode
        mu_1, Sigma_1 = self.neural_net(Y, self.layers_encoder, hyp[self.idx_encoder]) 
        
        # Reparametrization trick
        epsilon = np.random.randn(self.N_batch,self.Z_dim)        
        z = mu_1 + epsilon*np.sqrt(Sigma_1)
        
        # Decode
        mu_2, Sigma_2 = self.neural_net(z, self.layers_decoder, hyp[self.idx_decoder])
        
        # Log-determinants
        log_det_1 = np.sum(np.log(Sigma_1))
        log_det_2 = np.sum(np.log(Sigma_2))
        
        # KL[q(z|y) || p(z)]
        KL = 0.5*(np.sum(Sigma_1) + np.sum(mu_1**2) - self.Z_dim - log_det_1)
        
        # -log p(y)
        NLML = 0.5*(np.sum((Y-mu_2)**2/Sigma_2) + log_det_2 + np.log(2.*np.pi)*self.Y_dim*self.N_batch)
           
        return NLML + KL 
Example #5
Source File: sources.py    From ceviche with MIT License 6 votes vote down vote up
def compute_f(theta, lambda0, dL, shape):
    """ Compute the 'vacuum' field vector """

    # get plane wave k vector components (in units of grid cells)
    k0 = 2 * npa.pi / lambda0 * dL
    kx =  k0 * npa.sin(theta)
    ky = -k0 * npa.cos(theta)  # negative because downwards

    # array to write into
    f_src = npa.zeros(shape, dtype=npa.complex128)

    # get coordinates
    Nx, Ny = shape
    xpoints = npa.arange(Nx)
    ypoints = npa.arange(Ny)
    xv, yv = npa.meshgrid(xpoints, ypoints, indexing='ij')

    # compute values and insert into array
    x_PW = npa.exp(1j * xpoints * kx)[:, None]
    y_PW = npa.exp(1j * ypoints * ky)[:, None]

    f_src[xv, yv] = npa.outer(x_PW, y_PW)

    return f_src.flatten() 
Example #6
Source File: ctp.py    From pymoo with Apache License 2.0 6 votes vote down vote up
def __init__(self, n_var=2, n_constr=1, option="linear"):
        super().__init__(n_var=n_var, n_obj=2, n_constr=n_constr, xl=0, xu=1, type_var=anp.double)

        def g_linear(x):
            return 1 + anp.sum(x, axis=1)

        def g_multimodal(x):
            A = 10
            return 1 + A * x.shape[1] + anp.sum(x ** 2 - A * anp.cos(2 * anp.pi * x), axis=1)

        if option == "linear":
            self.calc_g = g_linear

        elif option == "multimodal":
            self.calc_g = g_multimodal
            self.xl[:, 1:] = -5.12
            self.xu[:, 1:] = 5.12

        else:
            print("Unknown option for CTP single.") 
Example #7
Source File: density.py    From kernel-gof with MIT License 6 votes vote down vote up
def multivariate_normal_density(mean, cov, X):
        """
        Exact density (not log density) of a multivariate Gaussian.
        mean: length-d array
        cov: a dxd covariance matrix
        X: n x d 2d-array
        """
        
        evals, evecs = np.linalg.eigh(cov)
        cov_half_inv = evecs.dot(np.diag(evals**(-0.5))).dot(evecs.T)
    #     print(evals)
        half_evals = np.dot(X-mean, cov_half_inv)
        full_evals = np.sum(half_evals**2, 1)
        unden = np.exp(-0.5*full_evals)
        
        Z = np.sqrt(np.linalg.det(2.0*np.pi*cov))
        den = unden/Z
        assert len(den) == X.shape[0]
        return den 
Example #8
Source File: zdt.py    From pymoo with Apache License 2.0 6 votes vote down vote up
def _calc_pareto_front(self, n_points=100, flatten=True):
        regions = [[0, 0.0830015349],
                   [0.182228780, 0.2577623634],
                   [0.4093136748, 0.4538821041],
                   [0.6183967944, 0.6525117038],
                   [0.8233317983, 0.8518328654]]

        pf = []

        for r in regions:
            x1 = anp.linspace(r[0], r[1], int(n_points / len(regions)))
            x2 = 1 - anp.sqrt(x1) - x1 * anp.sin(10 * anp.pi * x1)
            pf.append(anp.array([x1, x2]).T)

        if not flatten:
            pf = anp.concatenate([pf[None,...] for pf in pf])
        else:
            pf = anp.row_stack(pf)

        return pf 
Example #9
Source File: black_box_svi.py    From autograd with MIT License 6 votes vote down vote up
def black_box_variational_inference(logprob, D, num_samples):
    """Implements http://arxiv.org/abs/1401.0118, and uses the
    local reparameterization trick from http://arxiv.org/abs/1506.02557"""

    def unpack_params(params):
        # Variational dist is a diagonal Gaussian.
        mean, log_std = params[:D], params[D:]
        return mean, log_std

    def gaussian_entropy(log_std):
        return 0.5 * D * (1.0 + np.log(2*np.pi)) + np.sum(log_std)

    rs = npr.RandomState(0)
    def variational_objective(params, t):
        """Provides a stochastic estimate of the variational lower bound."""
        mean, log_std = unpack_params(params)
        samples = rs.randn(num_samples, D) * np.exp(log_std) + mean
        lower_bound = gaussian_entropy(log_std) + np.mean(logprob(samples, t))
        return -lower_bound

    gradient = grad(variational_objective)

    return variational_objective, gradient, unpack_params 
Example #10
Source File: ctp.py    From pymop with Apache License 2.0 6 votes vote down vote up
def __init__(self, n_var=2, n_constr=1, option="linear"):
        super().__init__(n_var=n_var, n_obj=2, n_constr=n_constr, xl=0, xu=1, type_var=anp.double)

        def g_linear(x):
            return 1 + anp.sum(x, axis=1)

        def g_multimodal(x):
            A = 10
            return 1 + A * x.shape[1] + anp.sum(x ** 2 - A * anp.cos(2 * anp.pi * x), axis=1)

        if option == "linear":
            self.calc_g = g_linear

        elif option == "multimodal":
            self.calc_g = g_multimodal
            self.xl[:, 1:] = -5.12
            self.xu[:, 1:] = 5.12

        else:
            print("Unknown option for CTP problems.") 
Example #11
Source File: zdt.py    From pymop with Apache License 2.0 5 votes vote down vote up
def _evaluate(self, x, out, *args, **kwargs):
        f1 = 1 - anp.exp(-4 * x[:, 0]) * anp.power(anp.sin(6 * anp.pi * x[:, 0]), 6)
        g = 1 + 9.0 * anp.power(anp.sum(x[:, 1:], axis=1) / (self.n_var - 1.0), 0.25)
        f2 = g * (1 - anp.power(f1 / g, 2))

        out["F"] = anp.column_stack([f1, f2]) 
Example #12
Source File: dtlz.py    From pymop with Apache License 2.0 5 votes vote down vote up
def _evaluate(self, x, out, *args, **kwargs):
        f = []
        for i in range(0, self.n_obj - 1):
            f.append(x[:, i])
        f = anp.column_stack(f)

        g = 1 + 9 / self.k * anp.sum(x[:, -self.k:], axis=1)
        h = self.n_obj - anp.sum(f / (1 + g[:, None]) * (1 + anp.sin(3 * anp.pi * f)), axis=1)

        out["F"] = anp.column_stack([f, (1 + g) * h]) 
Example #13
Source File: define_custom_problem.py    From pymop with Apache License 2.0 5 votes vote down vote up
def _evaluate(self, x, out, *args, **kwargs):
        # define an objective function to be evaluated using var1
        f = anp.sum(anp.power(x, 2) - self.const_1 * anp.cos(2 * anp.pi * x), axis=1)

        # !!! only if a constraint value is positive it is violated !!!
        # set the constraint that x1 + x2 > var2
        g1 = (x[:, 0] + x[:, 1]) - self.const_2

        # set the constraint that x3 + x4 < var2
        g2 = self.const_2 - (x[:, 2] + x[:, 3])

        out["F"] = f
        out["G"] = anp.column_stack([g1, g2]) 
Example #14
Source File: zdt.py    From pymop with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_front(self, n_pareto_points=100):
        regions = [[0, 0.0830015349],
                   [0.182228780, 0.2577623634],
                   [0.4093136748, 0.4538821041],
                   [0.6183967944, 0.6525117038],
                   [0.8233317983, 0.8518328654]]

        pareto_front = anp.array([]).reshape((-1, 2))
        for r in regions:
            x1 = anp.linspace(r[0], r[1], int(n_pareto_points / len(regions)))
            x2 = 1 - anp.sqrt(x1) - x1 * anp.sin(10 * anp.pi * x1)
            pareto_front = anp.concatenate((pareto_front, anp.array([x1, x2]).T), axis=0)
        return pareto_front 
Example #15
Source File: zdt.py    From pymop with Apache License 2.0 5 votes vote down vote up
def _evaluate(self, x, out, *args, **kwargs):
        f1 = x[:, 0]
        g = 1.0
        g += 10 * (self.n_var - 1)
        for i in range(1, self.n_var):
            g += x[:, i] * x[:, i] - 10.0 * anp.cos(4.0 * anp.pi * x[:, i])
        h = 1.0 - anp.sqrt(f1 / g)
        f2 = g * h

        out["F"] = anp.column_stack([f1, f2]) 
Example #16
Source File: tnk.py    From pymop with Apache License 2.0 5 votes vote down vote up
def __init__(self):
        super().__init__(n_var=2, n_obj=2, n_constr=2, type_var=anp.double)
        self.xl = anp.array([0, 1e-30])
        self.xu = anp.array([anp.pi, anp.pi]) 
Example #17
Source File: ConditionalVariationalAutoencoders.py    From DeepLearningTutorial with MIT License 5 votes vote down vote up
def likelihood(self, hyp):
        X = self.X_batch
        Y = self.Y_batch     
            
        # Encode X
        mu_0, Sigma_0 = self.neural_net(X, self.layers_encoder_0, hyp[self.idx_encoder_0]) 
        
        # Encode Y
        mu_1, Sigma_1 = self.neural_net(Y, self.layers_encoder_1, hyp[self.idx_encoder_1]) 
        
        # Reparametrization trick
        epsilon = np.random.randn(self.N_batch,self.Z_dim)        
        z = mu_1 + epsilon*np.sqrt(Sigma_1)
        
        # Decode
        mu_2, Sigma_2 = self.neural_net(z, self.layers_decoder, hyp[self.idx_decoder])
        
        # Log-determinants
        log_det_0 = np.sum(np.log(Sigma_0))
        log_det_1 = np.sum(np.log(Sigma_1))
        log_det_2 = np.sum(np.log(Sigma_2))
        
        # KL[q(z|y) || p(z|x)]
        KL = 0.5*(np.sum(Sigma_1/Sigma_0) + np.sum((mu_0-mu_1)**2/Sigma_0) - self.Z_dim + log_det_0 - log_det_1)
        
        # -log p(y|z)
        NLML = 0.5*(np.sum((Y-mu_2)**2/Sigma_2) + log_det_2 + np.log(2.*np.pi)*self.Y_dim*self.N_batch)
                   
        return NLML + KL 
Example #18
Source File: dtlz.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def g1(self, X_M):
        return 100 * (self.k + anp.sum(anp.square(X_M - 0.5) - anp.cos(20 * anp.pi * (X_M - 0.5)), axis=1)) 
Example #19
Source File: ctp.py    From pymop with Apache License 2.0 5 votes vote down vote up
def _evaluate(self, x, out, *args, **kwargs):
        f1, f2 = self.calc_objectives(x)
        out["F"] = anp.column_stack([f1, f2])

        theta = -0.2 * anp.pi
        a, b, c, d, e = 0.1, 10, 1, 0.5, 1

        out["G"] = self.calc_constraint(theta, a, b, c, d, e, f1, f2) 
Example #20
Source File: dtlz.py    From pymop with Apache License 2.0 5 votes vote down vote up
def obj_func(self, X_, g, alpha=1):
        f = []

        for i in range(0, self.n_obj):
            _f = (1 + g)
            _f *= anp.prod(anp.cos(anp.power(X_[:, :X_.shape[1] - i], alpha) * anp.pi / 2.0), axis=1)
            if i > 0:
                _f *= anp.sin(anp.power(X_[:, X_.shape[1] - i], alpha) * anp.pi / 2.0)

            f.append(_f)

        f = anp.column_stack(f)
        return f 
Example #21
Source File: dtlz.py    From pymop with Apache License 2.0 5 votes vote down vote up
def g1(self, X_M):
        return 100 * (self.k + anp.sum(anp.square(X_M - 0.5) - anp.cos(20 * anp.pi * (X_M - 0.5)), axis=1)) 
Example #22
Source File: ackley.py    From pymop with Apache License 2.0 5 votes vote down vote up
def __init__(self, n_var=10, c1=20, c2=.2, c3=2 * np.pi):
        super().__init__(n_var=n_var, n_obj=1, n_constr=0, xl=-32, xu=32, type_var=np.double)
        self.c1 = c1
        self.c2 = c2
        self.c3 = c3 
Example #23
Source File: Fitters.py    From reliability with GNU Lesser General Public License v3.0 5 votes vote down vote up
def logf(t, mu, sigma, gamma):  # Log PDF (3 parameter Lognormal)
        return anp.log(anp.exp(-0.5 * (((anp.log(t - gamma) - mu) / sigma) ** 2)) / ((t - gamma) * sigma * (2 * anp.pi) ** 0.5)) 
Example #24
Source File: Fitters.py    From reliability with GNU Lesser General Public License v3.0 5 votes vote down vote up
def logf(t, mu, sigma):  # Log PDF (Lognormal)
        return anp.log(anp.exp(-0.5 * (((anp.log(t) - mu) / sigma) ** 2)) / (t * sigma * (2 * anp.pi) ** 0.5)) 
Example #25
Source File: Fitters.py    From reliability with GNU Lesser General Public License v3.0 5 votes vote down vote up
def logf(t, mu, sigma):  # Log PDF (Normal)
        return anp.log(anp.exp(-0.5 * (((t - mu) / sigma) ** 2))) - anp.log((sigma * (2 * anp.pi) ** 0.5)) 
Example #26
Source File: density.py    From kernel-gof with MIT License 5 votes vote down vote up
def lamb_sin(self, X):
        return np.prod(np.sin(self.w*np.pi*X),1) 
Example #27
Source File: density.py    From kernel-gof with MIT License 5 votes vote down vote up
def __init__(self, w=1.0):
        """
        lambda_(X,Y) = sin(w*pi*X)+sin(w*pi*Y)
        """
        self.w = w 
Example #28
Source File: density.py    From kernel-gof with MIT License 5 votes vote down vote up
def normal_density(mean, variance, X):
        """
        Exact density (not log density) of an isotropic Gaussian.
        mean: length-d array
        variance: scalar variances
        X: n x d 2d-array
        """
        Z = np.sqrt(2.0*np.pi*variance)
        unden = np.exp(old_div(-np.sum((X-mean)**2.0, 1),(2.0*variance)) )
        den = old_div(unden,Z)
        assert len(den) == X.shape[0]
        return den 
Example #29
Source File: data.py    From kernel-gof with MIT License 5 votes vote down vote up
def __init__(self, w = 1.0):
        """
        2D spatial poission process with default lambda_(X,Y) = sin(w*pi*X)+sin(w*pi*Y)
        """
        self.w = w 
Example #30
Source File: data.py    From kernel-gof with MIT License 5 votes vote down vote up
def sine_intensity(self, X):
        intensity = self.lamb_bar*np.sum(np.sin(self.w*X*np.pi),1)
        return intensity