Python autograd.numpy.zeros() Examples

The following are 30 code examples of autograd.numpy.zeros(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd.numpy , or try the search function .
Example #1
Source File: test_tuple.py    From autograd with MIT License 6 votes vote down vote up
def test_getter():
    def fun(input_tuple):
        A = np.sum(input_tuple[0])
        B = np.sum(input_tuple[1])
        C = np.sum(input_tuple[1])
        return A + B + C

    d_fun = grad(fun)
    input_tuple = (npr.randn(5, 6),
                   npr.randn(4, 3),
                   npr.randn(2, 4))

    result = d_fun(input_tuple)
    assert np.allclose(result[0], np.ones((5, 6)))
    assert np.allclose(result[1], 2 * np.ones((4, 3)))
    assert np.allclose(result[2], np.zeros((2, 4))) 
Example #2
Source File: sources.py    From ceviche with MIT License 6 votes vote down vote up
def compute_f(theta, lambda0, dL, shape):
    """ Compute the 'vacuum' field vector """

    # get plane wave k vector components (in units of grid cells)
    k0 = 2 * npa.pi / lambda0 * dL
    kx =  k0 * npa.sin(theta)
    ky = -k0 * npa.cos(theta)  # negative because downwards

    # array to write into
    f_src = npa.zeros(shape, dtype=npa.complex128)

    # get coordinates
    Nx, Ny = shape
    xpoints = npa.arange(Nx)
    ypoints = npa.arange(Ny)
    xv, yv = npa.meshgrid(xpoints, ypoints, indexing='ij')

    # compute values and insert into array
    x_PW = npa.exp(1j * xpoints * kx)[:, None]
    y_PW = npa.exp(1j * ypoints * ky)[:, None]

    f_src[xv, yv] = npa.outer(x_PW, y_PW)

    return f_src.flatten() 
Example #3
Source File: primitives.py    From ceviche with MIT License 6 votes vote down vote up
def grad_spsp_mult_entries_a_forward(g, b_out, entries_a, indices_a, entries_x, indices_x, N):
    """ Forward mode is not much better than reverse mode, but the same general logic aoplies:
        Convert to matrix form, do the calculation, convert back to entries.        
            dA/de @ x @ g
    """

    # get the IO indices matrices for B
    _, indices_b = b_out
    Mb = indices_b.shape[1]
    Ib, Ob = make_IO_matrices(indices_b, N)

    # multiply g by X using a's index
    entries_gX, indices_gX = spsp_mult(g, indices_a, entries_x, indices_x, N)
    gX = make_sparse(entries_gX, indices_gX, shape=(N, N))

    # convert these entries and indides into the basis of the indices of B
    M = (Ib.T).dot(gX).dot(Ob.T)

    # return the diagonal (resulting entries) and indices of 0 (because indices are not affected by entries)
    return M.diagonal(), npa.zeros(Mb) 
Example #4
Source File: demography.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def get_treeseq_configs(treeseq, sampled_n):
    mat = np.zeros((len(sampled_n), sum(sampled_n)), dtype=int)
    j = 0
    for i, n in enumerate(sampled_n):
        for _ in range(n):
            mat[i, j] = 1
            j += 1
    mat = scipy.sparse.csr_matrix(mat)

    def get_config(genos):
        derived_counts = mat.dot(genos)
        return np.array([
            sampled_n - derived_counts,
            derived_counts
        ]).T

    for v in treeseq.variants():
        yield get_config(v.genotypes) 
Example #5
Source File: utils.py    From ceviche with MIT License 6 votes vote down vote up
def jac_num(fn, arg, step_size=1e-7):
    """ DEPRICATED: use 'numerical' in jacobians.py instead
    numerically differentiate `fn` w.r.t. its argument `arg` 
    `arg` can be a numpy array of arbitrary shape
    `step_size` can be a number or an array of the same shape as `arg` """

    in_array = float_2_array(arg).flatten()
    out_array = float_2_array(fn(arg)).flatten()

    m = in_array.size
    n = out_array.size
    shape = (m, n)
    jacobian = np.zeros(shape)

    for i in range(m):
        input_i = in_array.copy()
        input_i[i] += step_size
        arg_i = input_i.reshape(in_array.shape)
        output_i = fn(arg_i).flatten()
        grad_i = (output_i - out_array) / step_size
        jacobian[i, :] = get_value(grad_i)

    return jacobian 
Example #6
Source File: component.py    From scarlet with MIT License 6 votes vote down vote up
def _grad_add_models(upstream_grad, *models, full_model, slices, index):
    """Gradient for a single model

    The full model is just the sum of the models,
    so the gradient is 1 for each model,
    we just have to slice it appropriately.
    """
    model = models[index]
    full_model_slices = slices[index][0]
    model_slices = slices[index][1]

    def result(upstream_grad):
        _result = np.zeros(model.shape, dtype=model.dtype)
        _result[model_slices] = upstream_grad[full_model_slices]
        return _result
    return result 
Example #7
Source File: test_graphs.py    From autograd with MIT License 6 votes vote down vote up
def test_assignment_raises_error():
    def fun(A, b):
        A[1] = b
        return A
    A = npr.randn(5)
    with pytest.raises(TypeError):
        check_grads(fun)(A, 3.0)

# def test_nonscalar_output_1():
#     with pytest.raises(TypeError):
#         grad(lambda x: x * 2)(np.zeros(2))

# def test_nonscalar_output_2():
#     with pytest.raises(TypeError):
#         grad(lambda x: x * 2)(np.zeros(2))

# TODO:
# Diamond patterns
# Taking grad again after returning const
# Empty functions
# 2nd derivatives with fanout, thinking about the outgrad adder 
Example #8
Source File: utils.py    From ceviche with MIT License 6 votes vote down vote up
def measure_fields(F, source, steps, probes, component='Ez'):
    """ Returns a time series of the measured `component` fields from FDFD `F`
        driven by `source and measured at `probe`.
    """
    F.initialize_fields()
    if not isinstance(probes, list):
        probes = [probes]
    N_probes = len(probes)
    measured = np.zeros((steps, N_probes))
    for t_index in range(steps):
        if t_index % (steps//20) == 0:
            print('{:.2f} % done'.format(float(t_index)/steps*100.0))
        fields = F.forward(Jz=source(t_index))
        for probe_index, probe in enumerate(probes):
            field_probe = np.sum(fields[component] * probe)
            measured[t_index, probe_index] = field_probe
    return measured 
Example #9
Source File: lgss_example.py    From variational-smc with MIT License 6 votes vote down vote up
def init_model_params(Dx, Dy, alpha, r, obs, rs = npr.RandomState(0)):
    mu0 = np.zeros(Dx)
    Sigma0 = np.eye(Dx)
    
    A = np.zeros((Dx,Dx))
    for i in range(Dx):
        for j in range(Dx):
            A[i,j] = alpha**(abs(i-j)+1)
            
    Q = np.eye(Dx)
    C = np.zeros((Dy,Dx))
    if obs == 'sparse':
        C[:Dy,:Dy] = np.eye(Dy)
    else:
        C = rs.normal(size=(Dy,Dx))
    R = r * np.eye(Dy)
    
    return (mu0, Sigma0, A, Q, C, R) 
Example #10
Source File: observation.py    From scarlet with MIT License 6 votes vote down vote up
def get_loss(self, model):
        """Computes the loss/fidelity of a given model wrt to the observation
        Parameters
        ----------
        model: array
            A model from `Blend`
        Returns
        -------
        loss: float
            Loss of the model
        """
        model_ = self.render(model)
        images_ = self.images
        weights_ = self.weights

        # properly normalized likelihood
        log_sigma = np.zeros(weights_.shape, dtype=weights_.dtype)
        cuts = weights_ > 0
        log_sigma[cuts] = np.log(1 / weights_[cuts])
        log_norm = (
                np.prod(images_.shape) / 2 * np.log(2 * np.pi)
                + np.sum(log_sigma) / 2
        )

        return log_norm + 0.5 * np.sum(weights_ * (model_ - images_) ** 2) 
Example #11
Source File: observation.py    From scarlet with MIT License 6 votes vote down vote up
def log_norm(self):
        try:
            return self._log_norm
        except AttributeError:
            if self.frame != self.model_frame:
                images_ = self.images[self.slices_for_images]
                weights_ = self.weights[self.slices_for_images]
            else:
                images_ = self.images
                weights_ = self.weights

            # normalization of the single-pixel likelihood:
            # 1 / [(2pi)^1/2 (sigma^2)^1/2]
            # with inverse variance weights: sigma^2 = 1/weight
            # full likelihood is sum over all data samples: pixel in images
            # NOTE: this assumes that all pixels are used in likelihood!
            log_sigma = np.zeros(weights_.shape, dtype=self.weights.dtype)
            cuts = weights_ > 0
            log_sigma[cuts] = np.log(1 / weights_[cuts])
            self._log_norm = (
                    np.prod(images_.shape) / 2 * np.log(2 * np.pi)
                    + np.sum(log_sigma) / 2
            )
        return self._log_norm 
Example #12
Source File: lgss_example.py    From variational-smc with MIT License 6 votes vote down vote up
def generate_data(model_params, T = 5, rs = npr.RandomState(0)):
    mu0, Sigma0, A, Q, C, R = model_params
    Dx = mu0.shape[0]
    Dy = R.shape[0]
    
    x_true = np.zeros((T,Dx))
    y_true = np.zeros((T,Dy))

    for t in range(T):
        if t > 0:
            x_true[t,:] = rs.multivariate_normal(np.dot(A,x_true[t-1,:]),Q)
        else:
            x_true[0,:] = rs.multivariate_normal(mu0,Sigma0)
        y_true[t,:] = rs.multivariate_normal(np.dot(C,x_true[t,:]),R)
        
    return x_true, y_true 
Example #13
Source File: fft.py    From scarlet with MIT License 6 votes vote down vote up
def fast_zero_pad(arr, pad_width):
    """Fast version of numpy.pad when `mode="constant"`

    Executing `numpy.pad` with zeros is ~1000 times slower
    because it doesn't make use of the `zeros` method for padding.

    Paramters
    ---------
    arr: array
        The array to pad
    pad_width: tuple
        Number of values padded to the edges of each axis.
        See numpy docs for more.

    Returns
    -------
    result: array
        The array padded with `constant_values`
    """
    newshape = tuple([a+ps[0]+ps[1] for a, ps in zip(arr.shape, pad_width)])

    result = np.zeros(newshape, dtype=arr.dtype)
    slices = tuple([slice(start, s-end) for s, (start, end) in zip(result.shape, pad_width)])
    result[slices] = arr
    return result 
Example #14
Source File: test_builtins.py    From autograd with MIT License 6 votes vote down vote up
def test_isinstance():
  def checker(ex, type_, truthval):
    assert isinstance(ex, type_) == truthval
    return 1.

  examples = [
      [list,          [[]],          [()]],
      [np.ndarray,    [np.zeros(1)], [[]]],
      [(tuple, list), [[], ()],      [np.zeros(1)]],
  ]

  for type_, positive_examples, negative_examples in examples:
    for ex in positive_examples:
      checker(ex, type_, True)
      grad(checker)(ex, type_, True)

    for ex in negative_examples:
      checker(ex, type_, False)
      grad(checker)(ex, type_, False) 
Example #15
Source File: wing.py    From autograd with MIT License 6 votes vote down vote up
def project(vx, vy, occlusion):
    """Project the velocity field to be approximately mass-conserving,
       using a few iterations of Gauss-Seidel."""
    p = np.zeros(vx.shape)
    div = -0.5 * (np.roll(vx, -1, axis=1) - np.roll(vx, 1, axis=1)
                + np.roll(vy, -1, axis=0) - np.roll(vy, 1, axis=0))
    div = make_continuous(div, occlusion)

    for k in range(50):
        p = (div + np.roll(p, 1, axis=1) + np.roll(p, -1, axis=1)
                 + np.roll(p, 1, axis=0) + np.roll(p, -1, axis=0))/4.0
        p = make_continuous(p, occlusion)

    vx = vx - 0.5*(np.roll(p, -1, axis=1) - np.roll(p, 1, axis=1))
    vy = vy - 0.5*(np.roll(p, -1, axis=0) - np.roll(p, 1, axis=0))

    vx = occlude(vx, occlusion)
    vy = occlude(vy, occlusion)
    return vx, vy 
Example #16
Source File: ctp.py    From pymoo with Apache License 2.0 6 votes vote down vote up
def __init__(self, n_var=2, n_constr=2, **kwargs):
        super().__init__(n_var, n_constr, **kwargs)

        a, b = anp.zeros(n_constr + 1), anp.zeros(n_constr + 1)
        a[0], b[0] = 1, 1
        delta = 1 / (n_constr + 1)
        alpha = delta

        for j in range(n_constr):
            beta = a[j] * anp.exp(-b[j] * alpha)
            a[j + 1] = (a[j] + beta) / 2
            b[j + 1] = - 1 / alpha * anp.log(beta / a[j + 1])

            alpha += delta

        self.a = a[1:]
        self.b = b[1:] 
Example #17
Source File: test_dict.py    From autograd with MIT License 6 votes vote down vote up
def test_getter():
    def fun(input_dict):
        A = np.sum(input_dict['item_1'])
        B = np.sum(input_dict['item_2'])
        C = np.sum(input_dict['item_2'])
        return A + B + C

    d_fun = grad(fun)
    input_dict = {'item_1' : npr.randn(5, 6),
                  'item_2' : npr.randn(4, 3),
                  'item_X' : npr.randn(2, 4)}

    result = d_fun(input_dict)
    assert np.allclose(result['item_1'], np.ones((5, 6)))
    assert np.allclose(result['item_2'], 2 * np.ones((4, 3)))
    assert np.allclose(result['item_X'], np.zeros((2, 4))) 
Example #18
Source File: util.py    From kernel-gof with MIT License 6 votes vote down vote up
def one_of_K_code(arr):
    """
    Make a one-of-K coding out of the numpy array.
    For example, if arr = ([0, 1, 0, 2]), then return a 2d array of the form 
     [[1, 0, 0], 
      [0, 1, 0],
      [1, 0, 0],
      [0, 0, 1]]
    """
    U = np.unique(arr)
    n = len(arr)
    nu = len(U)
    X = np.zeros((n, nu))
    for i, u in enumerate(U):
        Ii = np.where( np.abs(arr - u) < 1e-8 )
        #ni = len(Ii)
        X[Ii[0], i] = 1
    return X 
Example #19
Source File: train.py    From tree-regularization-public with MIT License 6 votes vote down vote up
def train(self, X_train, F_train, y_train, batch_size=32, num_iters=1000, 
              lr=1e-3, param_scale=0.01, log_every=100, init_weights=None):
        grad_fun = build_batched_grad_fences(grad(self.objective), batch_size, 
                                             X_train, F_train, y_train)
        if init_weights is None:
            init_weights = self.init_weights(param_scale)
        saved_weights = np.zeros((num_iters, self.num_weights))

        def callback(weights, i, gradients):
            apl = self.average_path_length(weights, X_train, F_train, y_train)
            saved_weights[i, :] = weights
            loss_train = self.objective(weights, X_train, F_train, y_train)
            if i % log_every == 0: 
                print('model: gru | iter: {} | loss: {:.2f} | apl: {:.2f}'.format(i, loss_train, apl))

        optimized_weights = adam(grad_fun, init_weights, num_iters=num_iters, 
                                 step_size=lr, callback=callback)
        self.saved_weights = saved_weights
        self.weights = optimized_weights
        return optimized_weights 
Example #20
Source File: jacobians.py    From ceviche with MIT License 6 votes vote down vote up
def jacobian_numerical(fn, x, step_size=1e-7):
    """ numerically differentiate `fn` w.r.t. its argument `x` """
    in_array = float_2_array(x).flatten()
    out_array = float_2_array(fn(x)).flatten()

    m = in_array.size
    n = out_array.size
    shape = (n, m)
    jacobian = npa.zeros(shape)

    for i in range(m):
        input_i = in_array.copy()
        input_i[i] += step_size
        arg_i = input_i.reshape(in_array.shape)
        output_i = fn(arg_i).flatten()
        grad_i = (output_i - out_array) / step_size
        jacobian[:, i] = get_value_arr(get_value(grad_i))  # need to convert both the grad_i array and its contents to actual data.

    return jacobian 
Example #21
Source File: optimizers.py    From autograd with MIT License 5 votes vote down vote up
def adam(grad, x, callback=None, num_iters=100,
         step_size=0.001, b1=0.9, b2=0.999, eps=10**-8):
    """Adam as described in http://arxiv.org/pdf/1412.6980.pdf.
    It's basically RMSprop with momentum and some correction terms."""
    m = np.zeros(len(x))
    v = np.zeros(len(x))
    for i in range(num_iters):
        g = grad(x, i)
        if callback: callback(x, i, g)
        m = (1 - b1) * g      + b1 * m  # First  moment estimate.
        v = (1 - b2) * (g**2) + b2 * v  # Second moment estimate.
        mhat = m / (1 - b1**(i + 1))    # Bias correction.
        vhat = v / (1 - b2**(i + 1))
        x = x - step_size*mhat/(np.sqrt(vhat) + eps)
    return x 
Example #22
Source File: optimizers.py    From autograd with MIT License 5 votes vote down vote up
def sgd(grad, x, callback=None, num_iters=200, step_size=0.1, mass=0.9):
    """Stochastic gradient descent with momentum.
    grad() must have signature grad(x, i), where i is the iteration number."""
    velocity = np.zeros(len(x))
    for i in range(num_iters):
        g = grad(x, i)
        if callback: callback(x, i, g)
        velocity = mass * velocity - (1.0 - mass) * g
        x = x + step_size * velocity
    return x 
Example #23
Source File: bench_mem.py    From autograd with MIT License 5 votes vote down vote up
def peakmem_needless_nodes():
    N, M = 1000, 100
    def fun(x):
        for i in range(M):
            x = x + 1
        return np.sum(x)

    grad(fun)(np.zeros((N, N))) 
Example #24
Source File: wing.py    From autograd with MIT License 5 votes vote down vote up
def simulate(vx, vy, num_time_steps, occlusion, ax=None, render=False):
    occlusion = sigmoid(occlusion)

    # Disallow occlusion outside a certain area.
    mask = np.zeros((rows, cols))
    mask[10:30, 10:30] = 1.0
    occlusion = occlusion * mask

    # Initialize smoke bands.
    red_smoke = np.zeros((rows, cols))
    red_smoke[rows//4:rows//2] = 1
    blue_smoke = np.zeros((rows, cols))
    blue_smoke[rows//2:3*rows//4] = 1

    print("Running simulation...")
    vx, vy = project(vx, vy, occlusion)
    for t in range(num_time_steps):
        plot_matrix(ax, red_smoke, occlusion, blue_smoke, t, render)
        vx_updated = advect(vx, vx, vy)
        vy_updated = advect(vy, vx, vy)
        vx, vy = project(vx_updated, vy_updated, occlusion)
        red_smoke = advect(red_smoke, vx, vy)
        red_smoke = occlude(red_smoke, occlusion)
        blue_smoke = advect(blue_smoke, vx, vy)
        blue_smoke = occlude(blue_smoke, occlusion)
    plot_matrix(ax, red_smoke, occlusion, blue_smoke, num_time_steps, render)
    return vx, vy 
Example #25
Source File: bayesian_optimization.py    From autograd with MIT License 5 votes vote down vote up
def init_covariance_params(num_params):
    return np.zeros(num_params) 
Example #26
Source File: gmm.py    From autograd with MIT License 5 votes vote down vote up
def init_gmm_params(num_components, D, scale, rs=npr.RandomState(0)):
    return {'log proportions': rs.randn(num_components) * scale,
            'means':           rs.randn(num_components, D) * scale,
            'lower triangles': np.zeros((num_components, D, D)) + np.eye(D)} 
Example #27
Source File: linear_model.py    From scikit-lego with MIT License 5 votes vote down vote up
def fit(self, X, y):
        X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES)
        if self.effect not in self.allowed_effects:
            raise ValueError(f"effect {self.effect} must be in {self.allowed_effects}")

        def deadzone(errors):
            if self.effect == "linear":
                return np.where(errors > self.threshold, errors, np.zeros(errors.shape))
            if self.effect == "quadratic":
                return np.where(
                    errors > self.threshold, errors ** 2, np.zeros(errors.shape)
                )

        def training_loss(weights):
            diff = np.abs(np.dot(X, weights) - y)
            if self.relative:
                diff = diff / y
            return np.mean(deadzone(diff))

        n, k = X.shape

        # Build a function that returns gradients of training loss using autograd.
        training_gradient_fun = grad(training_loss)

        # Check the gradients numerically, just to be safe.
        weights = np.random.normal(0, 1, k)
        if self.check_grad:
            check_grads(training_loss, modes=["rev"])(weights)

        # Optimize weights using gradient descent.
        self.loss_log_ = np.zeros(self.n_iter)
        self.wts_log_ = np.zeros((self.n_iter, k))
        self.deriv_log_ = np.zeros((self.n_iter, k))
        for i in range(self.n_iter):
            weights -= training_gradient_fun(weights) * self.stepsize
            self.wts_log_[i, :] = weights.ravel()
            self.loss_log_[i] = training_loss(weights)
            self.deriv_log_[i, :] = training_gradient_fun(weights).ravel()
        self.coefs_ = weights
        return self 
Example #28
Source File: test_scipy.py    From autograd with MIT License 5 votes vote down vote up
def test_mvn_pdf_sing_cov(): combo_check(mvn.pdf, [0, 1])([np.concatenate((R(2), np.zeros(2)))], [np.concatenate((R(2), np.zeros(2)))], [C], [True]) 
Example #29
Source File: test_scipy.py    From autograd with MIT License 5 votes vote down vote up
def test_mvn_logpdf_sing_cov(): combo_check(mvn.logpdf, [0, 1])([np.concatenate((R(2), np.zeros(2)))], [np.concatenate((R(2), np.zeros(2)))], [C], [True]) 
Example #30
Source File: generative_adversarial_net.py    From autograd with MIT License 5 votes vote down vote up
def adam_minimax(grad_both, init_params_max, init_params_min, callback=None, num_iters=100,
         step_size_max=0.001, step_size_min=0.001, b1=0.9, b2=0.999, eps=10**-8):
    """Adam modified to do minimiax optimization, for instance to help with
    training generative adversarial networks."""

    x_max, unflatten_max = flatten(init_params_max)
    x_min, unflatten_min = flatten(init_params_min)

    m_max = np.zeros(len(x_max))
    v_max = np.zeros(len(x_max))
    m_min = np.zeros(len(x_min))
    v_min = np.zeros(len(x_min))
    for i in range(num_iters):
        g_max_uf, g_min_uf = grad_both(unflatten_max(x_max),
                                       unflatten_min(x_min), i)
        g_max, _ = flatten(g_max_uf)
        g_min, _ = flatten(g_min_uf)

        if callback: callback(unflatten_max(x_max), unflatten_min(x_min), i,
                              unflatten_max(g_max), unflatten_min(g_min))

        m_max = (1 - b1) * g_max      + b1 * m_max  # First  moment estimate.
        v_max = (1 - b2) * (g_max**2) + b2 * v_max  # Second moment estimate.
        mhat_max = m_max / (1 - b1**(i + 1))    # Bias correction.
        vhat_max = v_max / (1 - b2**(i + 1))
        x_max = x_max + step_size_max * mhat_max / (np.sqrt(vhat_max) + eps)

        m_min = (1 - b1) * g_min      + b1 * m_min  # First  moment estimate.
        v_min = (1 - b2) * (g_min**2) + b2 * v_min  # Second moment estimate.
        mhat_min = m_min / (1 - b1**(i + 1))    # Bias correction.
        vhat_min = v_min / (1 - b2**(i + 1))
        x_min = x_min - step_size_min * mhat_min / (np.sqrt(vhat_min) + eps)
    return unflatten_max(x_max), unflatten_min(x_min)


### Setup and run on MNIST ###