Python autograd.numpy.newaxis() Examples

The following are 30 code examples of autograd.numpy.newaxis(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd.numpy , or try the search function .
Example #1
Source File: util.py    From kernel-gof with MIT License 6 votes vote down vote up
def bound_by_data(Z, Data):
    """
    Determine lower and upper bound for each dimension from the Data, and project 
    Z so that all points in Z live in the bounds.

    Z: m x d 
    Data: n x d

    Return a projected Z of size m x d.
    """
    n, d = Z.shape
    Low = np.min(Data, 0)
    Up = np.max(Data, 0)
    LowMat = np.repeat(Low[np.newaxis, :], n, axis=0)
    UpMat = np.repeat(Up[np.newaxis, :], n, axis=0)

    Z = np.maximum(LowMat, Z)
    Z = np.minimum(UpMat, Z)
    return Z 
Example #2
Source File: plot.py    From kernel-gof with MIT License 6 votes vote down vote up
def ascii_table(self, tablefmt="pipe"):
        """
        Return an ASCII string representation of the table.

        tablefmt: "plain", "fancy_grid", "grid", "simple" might be useful.
        """
        methods = self.methods
        xvalues = self.xvalues
        plot_matrix = self.plot_matrix

        import tabulate
        # https://pypi.python.org/pypi/tabulate
        aug_table = np.hstack((np.array(methods)[:, np.newaxis], plot_matrix))
        return tabulate.tabulate(aug_table, xvalues, tablefmt=tablefmt)

# end of class PlotValues 
Example #3
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def gradY_X(self, X, Y, dim):
        """
        Compute the gradient with respect to the dimension dim of Y in k(X, Y).

        X: nx x d
        Y: ny x d

        Return a numpy array of size nx x ny.
        """
        gamma = 1/X.shape[1] if self.gamma is None else self.gamma

        if self.degree == 1:  # optimization, other expression is valid too
            out = gamma * X[:, dim, np.newaxis]  # nx x 1
            return np.repeat(out, Y.shape[0], axis=1)

        dot = np.dot(X, Y.T)
        return (self.degree * (gamma * dot + self.coef0) ** (self.degree - 1)
                * gamma * X[:, dim, np.newaxis]) 
Example #4
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def gradX_Y(self, X, Y, dim):
        """
        Compute the gradient with respect to the dimension dim of X in k(X, Y).

        X: nx x d
        Y: ny x d

        Return a numpy array of size nx x ny.
        """
        gamma = 1/X.shape[1] if self.gamma is None else self.gamma

        if self.degree == 1:  # optimization, other expression is valid too
            out = gamma * Y[np.newaxis, :, dim]  # 1 x ny
            return np.repeat(out, X.shape[0], axis=0)

        dot = np.dot(X, Y.T)
        return (self.degree * (gamma * dot + self.coef0) ** (self.degree - 1)
                * gamma * Y[np.newaxis, :, dim]) 
Example #5
Source File: util.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def truncate0(x, axis=None, strict=False, tol=1e-13):
    '''make sure everything in x is non-negative'''
    # the maximum along axis
    maxes = np.maximum(np.amax(x, axis=axis), 1e-300)
    # the negative part of minimum along axis
    mins = np.maximum(-np.amin(x, axis=axis), 0.0)

    # assert the negative numbers are small (relative to maxes)
    assert np.all(mins <= tol * maxes)

    if axis is not None:
        idx = [slice(None)] * x.ndim
        idx[axis] = np.newaxis
        mins = mins[idx]
        maxes = maxes[idx]

    if strict:
        # set everything below the tolerance to 0
        return set0(x, x < tol * maxes)
    else:
        # set everything of same magnitude as most negative number, to 0
        return set0(x, x < 2 * mins) 
Example #6
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def gradXY_sum(self, X, Y):
        r"""
        Compute \sum_{i=1}^d \frac{\partial^2 k(x, Y)}{\partial x_i \partial y_i}
        evaluated at each x_i in X, and y_i in Y.

        X: nx x d numpy array.
        Y: ny x d numpy array.

        Return a nx x ny numpy array of the derivatives.
        """
        d = X.shape[1]
        sumx2 = np.sum(X**2, axis=1)[:, np.newaxis]
        sumy2 = np.sum(Y**2, axis=1)[np.newaxis, :]
        D2 = sumx2 - 2 * np.dot(X, Y.T) + sumy2
        s = (D2[np.newaxis, :, :] / self.sigma2s[:, np.newaxis, np.newaxis])
        return np.einsum('w,wij,wij->ij',
                         self.wts / self.sigma2s, np.exp(s / -2), d - s) 
Example #7
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def eval(self, X, Y):
        """
        Evaluate the kernel on data X and Y
        X: nx x d where each row represents one point
        Y: ny x d
        return nx x ny Gram matrix
        """
        sumx2 = np.sum(X**2, axis=1)[:, np.newaxis]
        sumy2 = np.sum(Y**2, axis=1)[np.newaxis, :]
        D2 = sumx2 - 2 * np.dot(X, Y.T) + sumy2
        return np.tensordot(
            self.wts,
            np.exp(
                D2[np.newaxis, :, :]
                / (-2 * self.sigma2s[:, np.newaxis, np.newaxis])),
            1) 
Example #8
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def gradXY_sum(self, X, Y):
        r"""
        Compute \sum_{i=1}^d \frac{\partial^2 k(X, Y)}{\partial x_i \partial y_i}
        evaluated at each x_i in X, and y_i in Y.

        X: nx x d numpy array.
        Y: ny x d numpy array.

        Return a nx x ny numpy array of the derivatives.
        """
        (n1, d1) = X.shape
        (n2, d2) = Y.shape
        assert d1==d2, 'Dimensions of the two inputs must be the same'
        d = d1
        sigma2 = self.sigma2
        D2 = np.sum(X**2, 1)[:, np.newaxis] - 2*np.dot(X, Y.T) + np.sum(Y**2, 1)
        K = np.exp(old_div(-D2,(2.0*sigma2)))
        G = K/sigma2*(d - old_div(D2,sigma2))
        return G 
Example #9
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def pair_gradX_Y(self, X, Y):
        """
        Compute the gradient with respect to X in k(X, Y), evaluated at the
        specified X and Y.

        X: n x d
        Y: n x d

        Return a numpy array of size n x d
        """
        sigma2 = self.sigma2
        Kvec = self.pair_eval(X, Y)
        # n x d
        Diff = X - Y
        G = -Kvec[:, np.newaxis]*Diff/sigma2
        return G 
Example #10
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def gradX_Y(self, X, Y, dim):
        """
        Compute the gradient with respect to the dimension dim of X in k(X, Y).

        X: nx x d
        Y: ny x d

        Return a numpy array of size nx x ny.
        """
        D2 = util.dist2_matrix(X, Y)
        # 1d array of length nx
        Xi = X[:, dim]
        # 1d array of length ny
        Yi = Y[:, dim]
        # nx x ny
        dim_diff = Xi[:, np.newaxis] - Yi[np.newaxis, :]

        b = self.b
        c = self.c
        Gdim = ( 2.0*b*(c**2 + D2)**(b-1) )*dim_diff
        assert Gdim.shape[0] == X.shape[0]
        assert Gdim.shape[1] == Y.shape[0]
        return Gdim 
Example #11
Source File: data.py    From kernel-gof with MIT License 5 votes vote down vote up
def sample(self, n, seed=3):
        with util.NumpySeedContext(seed=seed):
            X = self.nonhom_sine(size=n)
            if len(X.shape) ==1:
                # This can happen if d=1
                X = X[:, np.newaxis]
            return Data(X)

# end class DSISIPoissonSine 
Example #12
Source File: data.py    From kernel-gof with MIT License 5 votes vote down vote up
def sample(self, n, seed=3):
        with util.NumpySeedContext(seed=seed):
            mvn = stats.multivariate_normal(self.mean, self.cov)
            X = mvn.rvs(size=n)
            if len(X.shape) ==1:
                # This can happen if d=1
                X = X[:, np.newaxis]
            return Data(X) 
Example #13
Source File: data.py    From kernel-gof with MIT License 5 votes vote down vote up
def sample(self, n, seed=3):
        with util.NumpySeedContext(seed=seed):
            X = stats.gamma.rvs(self.alpha, size=n, scale = old_div(1.0,self.beta))
            if len(X.shape) ==1:
                # This can happen if d=1
                X = X[:, np.newaxis]
            return Data(X)

# end class DSGamma 
Example #14
Source File: data.py    From kernel-gof with MIT License 5 votes vote down vote up
def sample(self, n, seed=3):
        with util.NumpySeedContext(seed=seed):
            X = np.log(stats.gamma.rvs(self.alpha, size=n, scale = old_div(1.0,self.beta)))
            if len(X.shape) ==1:
                # This can happen if d=1
                X = X[:, np.newaxis]
            return Data(X)

# end class DSLogGamma 
Example #15
Source File: data.py    From kernel-gof with MIT License 5 votes vote down vote up
def sample(self, n, seed=3):
        with util.NumpySeedContext(seed=seed):
            X = np.log(self.nonhom_linear(size=n))
            if len(X.shape) ==1:
                # This can happen if d=1
                X = X[:, np.newaxis]
            return Data(X)

# end class DSISILogPoissonLinear 
Example #16
Source File: data.py    From kernel-gof with MIT License 5 votes vote down vote up
def sample(self, n, seed=3):
        with util.NumpySeedContext(seed=seed):
            X = self.inh2d(lamb_bar=n)
            if len(X.shape) ==1:
                # This can happen if d=1
                X = X[:, np.newaxis]
            return Data(X)

# end class DSISIPoisson2D 
Example #17
Source File: data.py    From kernel-gof with MIT License 5 votes vote down vote up
def sample(self, n, seed=3):
        with util.NumpySeedContext(seed=seed):
            X_gmm, llh = self.gmm_sample(N=n)
            X = X_gmm
            if len(X.shape) ==1:
                # This can happen if d=1
                X = X[:, np.newaxis]
            return Data(X)

# end class DSPoisson2D 
Example #18
Source File: kernel.py    From kernel-gof with MIT License 5 votes vote down vote up
def gradX_Y(self, X, Y, dim):
        """
        Compute the gradient with respect to the dimension dim of X in k(X, Y).

        X: nx x d
        Y: ny x d

        Return a numpy array of size nx x ny.
        """
        diffs = -X[:, [dim]] + Y[:, [dim]].T
        exps = np.exp(diffs[np.newaxis, :, :] ** 2
                      / (-2 * self.sigma2s[:, np.newaxis, np.newaxis]))
        return np.einsum('w,wij,ij->ij', self.wts / self.sigma2s, exps, diffs) 
Example #19
Source File: rnn.py    From MLAlgorithms with MIT License 5 votes vote down vote up
def backward_pass(self, delta):
        if len(delta.shape) == 2:
            delta = delta[:, np.newaxis, :]
        n_samples, n_timesteps, input_shape = delta.shape
        p = self._params

        # Temporal gradient arrays
        grad = {k: np.zeros_like(p[k]) for k in p.keys()}

        dh_next = np.zeros((n_samples, input_shape))
        output = np.zeros((n_samples, n_timesteps, self.input_dim))

        # Backpropagation through time
        for i in reversed(range(n_timesteps)):
            dhi = self.activation_d(self.states[:, i, :]) * (delta[:, i, :] + dh_next)

            grad["W"] += np.dot(self.last_input[:, i, :].T, dhi)
            grad["b"] += delta[:, i, :].sum(axis=0)
            grad["U"] += np.dot(self.states[:, i - 1, :].T, dhi)

            dh_next = np.dot(dhi, p["U"].T)

            d = np.dot(delta[:, i, :], p["U"].T)
            output[:, i, :] = np.dot(d, p["W"].T)

        # Change actual gradient arrays
        for k in grad.keys():
            self._params.update_grad(k, grad[k])
        return output 
Example #20
Source File: basic.py    From MLAlgorithms with MIT License 5 votes vote down vote up
def backward_pass(self, delta):
        return np.repeat(delta[:, np.newaxis, :], 2, 1) 
Example #21
Source File: train.py    From tree-regularization-public with MIT License 5 votes vote down vote up
def objective(self, W, X, F, y):
        path_length = self.mlp.pred_fun(self.mlp.weights, 
                                        W[:self.gru.num_weights][:, np.newaxis]).ravel()[0]
        return -self.gru.loglike_fun(W, X, F, y) + self.strength * path_length 
Example #22
Source File: wavelet.py    From scarlet with MIT License 5 votes vote down vote up
def image(self, image):
        """Updates the coefficients if the image is changed"""
        if len(image.shape) == 2:
            self._image = image[np.newaxis, :, :]
        else:
            self._image = image
        if self._direct == True:
            self._coeffs = self.direct_transform()
        else:
            self._coeffs = self.transform() 
Example #23
Source File: wavelet.py    From scarlet with MIT License 5 votes vote down vote up
def coefficients(self, coeffs):
        """Updates the image if the coefficients are changed"""
        if len(np.shape(coeffs)) == 3:
            coeffs = coeffs[np.newaxis, :, :, :]
        self._coeffs = coeffs
        rec = []
        for star in self._coeffs:
            rec.append(iuwt(star))
        self._image = np.array(rec) 
Example #24
Source File: wavelet.py    From scarlet with MIT License 5 votes vote down vote up
def transform(self):
        """ Performs the wavelet transform of an image by convolution with the seed wavelet

         Seed wavelets are the transform of a dirac in starlets when computed for a given shape,
         the seed is cached to be reused for images with the same shape.
         The transform is applied to `self._image`

        Returns
        -------
        starlet: numpy ndarray
            the starlet transform of the Starlet object's image
        """
        try:
            #Check if the starlet seed exists
            seed_fft = Cache.check('Starlet', tuple(self._starlet_shape))
        except KeyError:
            # make a starlet seed
            self.seed = mk_starlet(self._starlet_shape)
            # Take its fft
            seed_fft = fft.Fourier(self.seed)
            seed_fft.fft(self._starlet_shape[-2:], (-2,-1))
            # Cache the fft
            Cache.set('Starlet', tuple(self._starlet_shape), seed_fft)
        coefficients = []
        for im in self._image:
            coefficients.append(fft.convolve(seed_fft, fft.Fourier(im[np.newaxis, :, :]), axes = (-2,-1)).image)
        return np.array(coefficients) 
Example #25
Source File: Utilities.py    From ParametricGP with MIT License 5 votes vote down vote up
def kernel(X, Xp, hyp):
    output_scale = np.exp(hyp[0])
    lengthscales = np.sqrt(np.exp(hyp[1:]))
    X = X/lengthscales
    Xp = Xp/lengthscales
    X_SumSquare = np.sum(np.square(X),axis=1);
    Xp_SumSquare = np.sum(np.square(Xp),axis=1);
    mul = np.dot(X,Xp.T);
    dists = X_SumSquare[:,np.newaxis]+Xp_SumSquare-2.0*mul
    return output_scale * np.exp(-0.5 * dists) 
Example #26
Source File: test_gaussian.py    From bayesian-coresets with MIT License 5 votes vote down vote up
def weighted_post(th0, Sig0inv, Siginv, x, w): 
  Sigp = np.linalg.inv(Sig0inv + w.sum()*Siginv)
  mup = np.dot(Sigp,  np.dot(Sig0inv,th0) + np.dot(Siginv, (w[:, np.newaxis]*x).sum(axis=0)))
  return mup, Sigp 
Example #27
Source File: test_gaussian.py    From bayesian-coresets with MIT License 5 votes vote down vote up
def ll_m2_exact(muw, Sigw, Siginv, x):
  L = np.linalg.cholesky(Siginv)
  Rho = np.dot(np.dot(L.T, Sigw), L)

  crho = 2*(Rho**2).sum() + (np.diag(Rho)*np.diag(Rho)[:, np.newaxis]).sum()

  mu = np.dot(L.T, (x - muw).T).T
  musq = (mu**2).sum(axis=1)

  return 0.25*(crho + musq*musq[:, np.newaxis] + np.diag(Rho).sum()*(musq + musq[:,np.newaxis]) + 4*np.dot(np.dot(mu, Rho), mu.T))

#Var[Log N(x;, mu, Sig)] under mu ~ N(muw, Sigw) 
Example #28
Source File: test_gaussian.py    From bayesian-coresets with MIT License 5 votes vote down vote up
def ll_m2_exact_diag(muw, Sigw, Siginv, x):
  L = np.linalg.cholesky(Siginv)
  Rho = np.dot(np.dot(L.T, Sigw), L)

  crho = 2*(Rho**2).sum() + (np.diag(Rho)*np.diag(Rho)[:, np.newaxis]).sum()

  mu = np.dot(L.T, (x - muw).T).T
  musq = (mu**2).sum(axis=1)

  return 0.25*(crho + musq**2 + 2*np.diag(Rho).sum()*musq + 4*(np.dot(mu, Rho)*mu).sum(axis=1)) 
Example #29
Source File: methods.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def taylor_approx(target, stencil, values):
  """Use taylor series to approximate up to second order derivatives.

  Args:
    target: An array of shape (..., n), a batch of n-dimensional points
      where one wants to approximate function value and derivatives.
    stencil: An array of shape broadcastable to (..., k, n), for each target
      point a set of k = triangle(n + 1) points to use on its approximation.
    values: An array of shape broadcastable to (..., k), the function value at
      each of the stencil points.

  Returns:
    An array of shape (..., k), for each target point the approximated
    function value, gradient and hessian evaluated at that point (flattened
    and in the same order as returned by derivative_names).
  """
  # Broadcast arrays to their required shape.
  batch_shape, ndim = target.shape[:-1], target.shape[-1]
  stencil = np.broadcast_to(stencil, batch_shape + (triangular(ndim + 1), ndim))
  values = np.broadcast_to(values, stencil.shape[:-1])

  # Subtract target from each stencil point.
  delta_x = stencil - np.expand_dims(target, axis=-2)
  delta_xy = np.matmul(
      np.expand_dims(delta_x, axis=-1), np.expand_dims(delta_x, axis=-2))
  i = np.arange(ndim)
  j, k = np.triu_indices(ndim, k=1)

  # Build coefficients for the Taylor series equations, namely:
  #   f(stencil) = coeffs @ [f(target), df/d0(target), ...]
  coeffs = np.concatenate([
      np.ones(delta_x.shape[:-1] + (1,)),  # f(target)
      delta_x,  # df/di(target)
      delta_xy[..., i, i] / 2,  # d^2f/di^2(target)
      delta_xy[..., j, k],  # d^2f/{dj dk}(target)
  ], axis=-1)

  # Then: [f(target), df/d0(target), ...] = coeffs^{-1} @ f(stencil)
  return np.squeeze(
      np.matmul(np.linalg.inv(coeffs), values[..., np.newaxis]), axis=-1) 
Example #30
Source File: wing.py    From autograd with MIT License 5 votes vote down vote up
def plot_matrix(ax, r, g, b, t, render=False):
    if ax:
        plt.cla()
        ax.imshow(np.concatenate((r[...,np.newaxis], g[...,np.newaxis], b[...,np.newaxis]), axis=2))
        ax.set_xticks([])
        ax.set_yticks([])
        plt.draw()
        if render:
            plt.savefig('step{0:03d}.png'.format(t), bbox_inches='tight')
        plt.pause(0.001)