Python autograd.numpy.abs() Examples

The following are code examples for showing how to use autograd.numpy.abs(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 6 votes vote down vote up
def prior_error(mu_shift, w, n_u):

    a = -numpy.abs(w[:, numpy.arange(0, n_u)*3] + mu_shift[1])

    b = numpy.abs(w[:, numpy.arange(0, n_u)*3+1] + mu_shift[3])

    c = w[:, numpy.arange(0, n_u)*3+2] + mu_shift[5]

    q = numpy.linspace(1e-8, 1 - 1e-8, 128)

    # q = q.ravel()

    q_hat = numpy.mean(1 / (1 + numpy.exp(numpy.log(q)[None, None, :] * a[:, :, None] +
                                          numpy.log(1 - q)[None, None, :] * b[:, :, None] +
                                          c[:, :, None])), axis=0)

    return numpy.mean((q - q_hat) ** 2) 
Example 2
Project: qoc   Author: SchusterLab   File: controlarea.py    MIT License 6 votes vote down vote up
def cost(self, controls, states, system_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        states
        system_eval_step
        
        Returns:
        cost
        """
        if self.max_control_norms is not None:
            normalized_controls = controls / self.max_control_norms
        else:
            normalized_control = controls

        # The cost is the discrete integral of each normalized control parameter
        # over the evolution time.
        cost = 0
        for i in range(self.control_count):
            cost = cost + anp.abs(anp.sum(normalized_controls[:, i]))
        cost_normalized = cost / self.control_size

        return cost_normalized * self.cost_multiplier 
Example 3
Project: qoc   Author: SchusterLab   File: mathmethods.py    MIT License 6 votes vote down vote up
def magnus_m4(a, dt, time):
    """
    Construct a magnus expasion of `a` of order four.
    
    References:
    [1] https://arxiv.org/abs/1709.06483

    Arguments:
    a :: (time :: float) -> ndarray (a_shape)
        - the matrix to expand
    dt :: float - the time step
    time :: float - the current time

    Returns:
    m4 :: ndarray (a_shape) - magnus expansion
    """
    t1 = time + dt * _M4_C1
    t2 = time + dt * _M4_C2
    a1 = a(t1)
    a2 = a(t2)
    m4 = ((dt / 2) * (a1 + a2)
          + _M4_F0 * (dt ** 2) * commutator(a2, a1))
    return m4 
Example 4
Project: differentiable-atomistic-potentials   Author: google   File: emt.py    Apache License 2.0 6 votes vote down vote up
def stress(parameters, positions, numbers, cell, strain=np.zeros((3, 3))):
  """Compute the stress on an EMT system.

    Parameters
    ----------

    positions : array of floats. Shape = (natoms, 3)

    numbers : array of integers of atomic numbers (natoms,)

    cell: array of unit cell vectors. Shape = (3, 3)

    Returns
    -------
    stress : an array of stress components. Shape = (6,)
    [sxx, syy, szz, syz, sxz, sxy]

    """
  dEdst = elementwise_grad(energy, 4)

  volume = np.abs(np.linalg.det(cell))

  der = dEdst(parameters, positions, numbers, cell, strain)
  result = (der + der.T) / 2 / volume
  return np.take(result, [0, 4, 8, 5, 2, 1]) 
Example 5
Project: momi2   Author: popgenmethods   File: demo_plotter.py    GNU General Public License v3.0 6 votes vote down vote up
def goto_time(self, t, add_time=True):
        # if exponentially growing, add extra time points whenever
        # the population size doubles
        if self.curr_g != 0 and t < float('inf'):
            halflife = np.abs(np.log(.5) / self.curr_g)
            add_t = self.curr_t + halflife
            while add_t < t:
                self._push_time(add_t)
                add_t += halflife

        while self.time_stack and self.time_stack[0] < t:
            self.step_time(hq.heappop(self.time_stack))
        self.step_time(t, add=False)
        if add_time:
            # put t on queue to be added when processing next event
            # (allows further events to change population size before plotting)
            self._push_time(t) 
Example 6
Project: ceviche   Author: fancompute   File: optimize_accelerator.py    MIT License 6 votes vote down vote up
def accel_gradient(eps_arr, mode='max'):

    # set the permittivity of the FDFD and solve the fields
    F.eps_r = eps_arr.reshape((Nx, Ny))
    Ex, Ey, Hz = F.solve(source)

    # compute the gradient and normalize if you want
    G = npa.sum(Ey * eta / Ny)

    if mode == 'max':
        return -np.abs(G) / Emax(Ex, Ey, eps_r)
    elif mode == 'avg':
        return -np.abs(G) / Eavg(Ex, Ey)
    else:        
        return -np.abs(G / E0)

# define the gradient for autograd 
Example 7
Project: disropt   Author: DISROPT   File: max.py    GNU General Public License v3.0 6 votes vote down vote up
def eval(self, x: np.ndarray) -> np.ndarray:
        return ((self.f1.eval(x) + self.f2.eval(x) + anp.abs(self.f1.eval(x) - self.f2.eval(x)))/2).reshape(self.output_shape)

    # @check_input
    # def jacobian(self, x: np.ndarray, **kwargs) -> np.ndarray:
    #     if not (self.f1.is_differentiable and self.f2.is_differentiable):
    #         warnings.warn("Composition of nondifferentiable functions. The Jacobian may be not correct.")
    #     # else:
    #     j1 = self.f1.jacobian(x, **kwargs)
    #     j2 = self.f2.jacobian(x, **kwargs)
    #     diff = self.f1.eval(x) - self.f2.eval(x)
    #     for elem in range(diff.size):
    #         if diff[elem] == 0:
    #             diff[elem] = random.uniform(-1, 1)
    #     res = j1 + j2 + np.diag(np.sign(diff).flatten()) @ (j1 - j2)
    #     return res/2 
Example 8
Project: kernel-gof   Author: wittawatj   File: util.py    MIT License 6 votes vote down vote up
def one_of_K_code(arr):
    """
    Make a one-of-K coding out of the numpy array.
    For example, if arr = ([0, 1, 0, 2]), then return a 2d array of the form 
     [[1, 0, 0], 
      [0, 1, 0],
      [1, 0, 0],
      [0, 0, 1]]
    """
    U = np.unique(arr)
    n = len(arr)
    nu = len(U)
    X = np.zeros((n, nu))
    for i, u in enumerate(U):
        Ii = np.where( np.abs(arr - u) < 1e-8 )
        #ni = len(Ii)
        X[Ii[0], i] = 1
    return X 
Example 9
Project: kernel-gof   Author: wittawatj   File: data.py    MIT License 6 votes vote down vote up
def __init__(self, means, variances, pmix=None):
        """
        means: a k x d 2d array specifying the means.
        variances: a one-dimensional length-k array of variances
        pmix: a one-dimensional length-k array of mixture weights. Sum to one.
        """
        k, d = means.shape
        if k != len(variances):
            raise ValueError('Number of components in means and variances do not match.')

        if pmix is None:
            pmix = old_div(np.ones(k),float(k))

        if np.abs(np.sum(pmix) - 1) > 1e-8:
            raise ValueError('Mixture weights do not sum to 1.')

        self.pmix = pmix
        self.means = means
        self.variances = variances 
Example 10
Project: kernel-gof   Author: wittawatj   File: data.py    MIT License 6 votes vote down vote up
def __init__(self, means, variances, pmix=None):
        """
        means: a k x d 2d array specifying the means.
        variances: a k x d x d numpy array containing k covariance matrices,
            one for each component.
        pmix: a one-dimensional length-k array of mixture weights. Sum to one.
        """
        k, d = means.shape
        if k != variances.shape[0]:
            raise ValueError('Number of components in means and variances do not match.')

        if pmix is None:
            pmix = old_div(np.ones(k),float(k))

        if np.abs(np.sum(pmix) - 1) > 1e-8:
            raise ValueError('Mixture weights do not sum to 1.')

        self.pmix = pmix
        self.means = means
        self.variances = variances 
Example 11
Project: kernel-gof   Author: wittawatj   File: data.py    MIT License 6 votes vote down vote up
def _blocked_gibbs_next(self, X, H):
        """
        Sample from the mutual conditional distributions.
        """
        dh = H.shape[1]
        n, dx = X.shape
        B = self.B
        b = self.b

        # Draw H.
        XB2C = np.dot(X, self.B) + 2.0*self.c
        # Ph: n x dh matrix
        Ph = DSGaussBernRBM.sigmoid(XB2C)
        # H: n x dh
        H = (np.random.rand(n, dh) <= Ph)*2 - 1.0
        assert np.all(np.abs(H) - 1 <= 1e-6 )
        # Draw X.
        # mean: n x dx
        mean = old_div(np.dot(H, B.T),2.0) + b
        X = np.random.randn(n, dx) + mean
        return X, H 
Example 12
Project: kernel-gof   Author: wittawatj   File: density.py    MIT License 6 votes vote down vote up
def __init__(self, means, variances, pmix=None):
        """
        means: a k x d 2d array specifying the means.
        variances: a one-dimensional length-k array of variances
        pmix: a one-dimensional length-k array of mixture weights. Sum to one.
        """
        k, d = means.shape
        if k != len(variances):
            raise ValueError('Number of components in means and variances do not match.')

        if pmix is None:
            pmix = old_div(np.ones(k),float(k))

        if np.abs(np.sum(pmix) - 1) > 1e-8:
            raise ValueError('Mixture weights do not sum to 1.')

        self.pmix = pmix
        self.means = means
        self.variances = variances 
Example 13
Project: kernel-gof   Author: wittawatj   File: density.py    MIT License 6 votes vote down vote up
def __init__(self, means, variances, pmix=None):
        """
        means: a k x d 2d array specifying the means.
        variances: a k x d x d numpy array containing a stack of k covariance
            matrices, one for each mixture component.
        pmix: a one-dimensional length-k array of mixture weights. Sum to one.
        """
        k, d = means.shape
        if k != variances.shape[0]:
            raise ValueError('Number of components in means and variances do not match.')

        if pmix is None:
            pmix = old_div(np.ones(k),float(k))

        if np.abs(np.sum(pmix) - 1) > 1e-8:
            raise ValueError('Mixture weights do not sum to 1.')

        self.pmix = pmix
        self.means = means
        self.variances = variances 
Example 14
Project: lightML   Author: jfzhang95   File: LinearClassification.py    MIT License 5 votes vote down vote up
def fit(self, X, y):
        # def loss function
        def calc_loss(W):
            y_pred = logistic_predictions(W, XMat)
            label_probabilities = y_pred * yMat + (1 - y_pred) * (1 - yMat)
            if self.reg is None:
                return -np.sum(np.log(label_probabilities))
            elif self.reg == 'l1':
                return -np.sum(np.log(label_probabilities))+np.sum(self.alpha*(np.abs(W[0:-1])))
            elif self.reg == 'l2':
                return -np.sum(np.log(label_probabilities))+np.sum(self.alpha*W[0:-1]*W[0:-1])
            else:
                print("the reg can only be None, l1 or l2!")
                return ValueError

        verbose = self.verbose
        print_step = self.print_step
        max_iters = self.max_iters

        XMat = np.array(X)
        yMat = np.array(y)

        if XMat.shape[0] != yMat.shape[0]:
            yMat = yMat.T
        assert XMat.shape[0] == yMat.shape[0]

        grad_fun = elementwise_grad(calc_loss)

        n_samples, n_features = X.shape
        n_outdim = y.shape[1]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])

        self.W = np.random.randn(n_features+1, n_outdim) * 0.1
        for it in range(max_iters + 1):
            loss = calc_loss(self.W)
            grad_params = grad_fun(self.W)
            # update params
            self.W -= self.lr * grad_params

            if verbose and it % print_step == 0:
                print('iteration %d / %d: loss %f' % (it, max_iters, loss)) 
Example 15
Project: lightML   Author: jfzhang95   File: LinearRegression.py    MIT License 5 votes vote down vote up
def fit(self, X, y):
        # def loss function
        def calc_linear_loss(W):
            y_pred = np.dot(XMat, W)
            return np.sqrt((np.power(yMat - y_pred, 2))).mean() \
                   + np.sum(self.alpha * np.abs(W[0:-1]))

        verbose = self.verbose
        print_step = self.print_step
        max_iters = self.max_iters

        XMat = np.array(X)
        yMat = np.array(y)

        if XMat.shape[0] != yMat.shape[0]:
            yMat = yMat.T
        assert XMat.shape[0] == yMat.shape[0]

        grad_fun = elementwise_grad(calc_linear_loss)

        n_samples, n_features = X.shape
        n_outdim = y.shape[1]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])

        self.W = np.random.randn(n_features + 1, n_outdim)
        for it in range(max_iters+1):
            loss = calc_linear_loss(self.W)
            grad_params = grad_fun(self.W)

            # update params
            self.W -= self.lr * grad_params

            if verbose and it % print_step == 0:
                print('iteration %d / %d: loss %f' % (it, max_iters, loss)) 
Example 16
Project: autograd-forward   Author: BB-UCL   File: test_numpy.py    MIT License 5 votes vote down vote up
def test_abs():     unary_ufunc_check(np.abs, lims=[0.1, 4.0]) 
Example 17
Project: qoc   Author: SchusterLab   File: targetdensityinfidelity.py    MIT License 5 votes vote down vote up
def cost(self, controls, densities, sytem_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        densities
        system_eval_step

        Returns:
        cost
        """
        # The cost is the infidelity of each evolved state and its target state.
        # NOTE: Autograd doesn't support vjps of anp.trace with axis arguments.
        # Nor does it support the vjp of anp.einsum(...ii->..., a).
        # Therefore, we must use a for loop to index the traces.
        # The following computations are equivalent to:
        # inner_products = (anp.trace(anp.matmul(self.target_densities_dagger, densities),
        #                             axis1=-1, axis2=-2) / self.hilbert_size)
        prods = anp.matmul(self.target_densities_dagger, densities)
        fidelity_sum = 0
        for i, prod in enumerate(prods):
            inner_prod = anp.trace(prod)
            fidelity = anp.abs(inner_prod)
            fidelity_sum = fidelity_sum + fidelity
        fidelity_normalized = fidelity_sum / (self.density_count * self.hilbert_size)
        infidelity = 1 - fidelity_normalized

        return infidelity * self.cost_multiplier 
Example 18
Project: qoc   Author: SchusterLab   File: targetdensityinfidelitytime.py    MIT License 5 votes vote down vote up
def cost(self, controls, densities, sytem_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        densities
        system_eval_step

        Returns:
        cost
        """
        # The cost is the infidelity of each evolved density and its target density.
        # NOTE: Autograd doesn't support vjps of anp.trace with axis arguments.
        # Nor does it support the vjp of anp.einsum(...ii->..., a).
        # Therefore, we must use a for loop to index the traces.
        # The following computations are equivalent to:
        # inner_products = (anp.trace(anp.matmul(self.target_densities_dagger, densities),
        #                             axis1=-1, axis2=-2) / self.hilbert_size)
        prods = anp.matmul(self.target_densities_dagger, densities)
        fidelity_sum = 0
        for i, prod in enumerate(prods):
            inner_prod = anp.trace(prod)
            fidelity = anp.abs(inner_prod)
            fidelity_sum = fidelity_sum + fidelity
        fidelity_normalized = fidelity_sum / (self.density_count * self.hilbert_size)
        infidelity = 1 - fidelity_normalized
        cost_normalized = infidelity / self.cost_eval_count

        return cost_normalized * self.cost_multiplier 
Example 19
Project: qoc   Author: SchusterLab   File: controlbandwidthmax.py    MIT License 5 votes vote down vote up
def cost(self, controls, states, system_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        states
        system_eval_step

        Returns:
        cost
        """
        cost = 0
        # Iterate over the controls, penalize each control that has
        # frequencies greater than its maximum frequency.
        for i, max_bandwidth in enumerate(self.max_bandwidths):
            control_fft = anp.fft.fft(controls[:, i])
            control_fft_sq  = anp.abs(control_fft)
            penalty_freq_indices = anp.nonzero(self.freqs >= max_bandwidth)[0]
            penalized_ffts = control_fft_sq[penalty_freq_indices]
            penalty = anp.sum(penalized_ffts)
            penalty_normalized = penalty / (penalty_freq_indices.shape[0] * anp.max(penalized_ffts))
            cost = cost + penalty_normalized
        cost_normalized =  cost / self.control_count
                       
        return cost_normalized * self.cost_multiplier 
Example 20
Project: qoc   Author: SchusterLab   File: expm.py    MIT License 5 votes vote down vote up
def one_norm(a):
    """
    Return the one-norm of the matrix.

    References:
    [0] https://www.mathworks.com/help/dsp/ref/matrix1norm.html

    Arguments:
    a :: ndarray(N x N) - The matrix to compute the one norm of.
    
    Returns:
    one_norm_a :: float - The one norm of a.
    """
    return anp.max(anp.sum(anp.abs(a), axis=0)) 
Example 21
Project: qoc   Author: SchusterLab   File: mathmethods.py    MIT License 5 votes vote down vote up
def magnus_m6(a, dt, time):
    """
    Construct a magnus expasion of `a` of order six.
    
    References:
    [1] https://arxiv.org/abs/1709.06483

    Arguments:
    a :: (time :: float) -> ndarray (a_shape)
        - the matrix to expand
    dt :: float - the time step
    time :: float - the current time

    Returns:
    m6 :: ndarray (a_shape) - magnus expansion
    """
    t1 = time + dt * _M6_C1
    t2 = time + dt * _M6_C2
    t3 = time + dt * _M6_C3
    a1 = a(t1)
    a2 = a(t2)
    a3 = a(t3)
    b1 = dt * a2
    b2 = _M6_F0 * dt * (a3 - a1)
    b3 = _M6_F1 * dt * (a3 - 2 * a2 + a1)
    b1_b2_commutator = commutator(b1, b2)
    m6 = (b1 + _M6_F2 * b3 + _M6_F3
            * commutator(-20 * b1 - b3 + b1_b2_commutator,
                         b2 - _M6_F4
                         * commutator(b1, 2 * b3 + b1_b2_commutator)))
    return m6


### LINDBLAD METHODS ### 
Example 22
Project: differentiable-atomistic-potentials   Author: google   File: lennardjones.py    Apache License 2.0 5 votes vote down vote up
def stress(params, positions, cell, strain=np.zeros((3, 3))):
  """Compute the stress on a Lennard-Jones system.

    Parameters
    ----------

    params : dictionary of paramters.
      Defaults to {'sigma': 1.0, 'epsilon': 1.0}

    positions : array of floats. Shape = (natoms, 3)

    cell: array of unit cell vectors. Shape = (3, 3)

    Returns
    -------
    stress : an array of stress components. Shape = (6,)
    [sxx, syy, szz, syz, sxz, sxy]

    """
  dEdst = elementwise_grad(energy, 3)

  volume = np.abs(np.linalg.det(cell))

  der = dEdst(params, positions, cell, strain)
  result = (der + der.T) / 2 / volume
  return np.take(result, [0, 4, 8, 5, 2, 1])


# Oneway LennardJones potential 
Example 23
Project: differentiable-atomistic-potentials   Author: google   File: lennardjones.py    Apache License 2.0 5 votes vote down vote up
def stress_oneway(params, positions, cell, strain=np.zeros((3, 3))):
  """Compute the stress on a Lennard-Jones system.

    Parameters
    ----------

    params : dictionary of paramters.
      Defaults to {'sigma': 1.0, 'epsilon': 1.0}

    positions : array of floats. Shape = (natoms, 3)

    cell: array of unit cell vectors. Shape = (3, 3)

    Returns
    -------
    stress : an array of stress components. Shape = (6,)
    [sxx, syy, szz, syz, sxz, sxy]

    """
  dEdst = elementwise_grad(energy_oneway, 3)

  volume = np.abs(np.linalg.det(cell))

  der = dEdst(params, positions, cell, strain)
  result = (der + der.T) / 2 / volume
  return np.take(result, [0, 4, 8, 5, 2, 1]) 
Example 24
Project: momi2   Author: popgenmethods   File: math_functions.py    GNU General Public License v3.0 5 votes vote down vote up
def transformed_expi(x):
    abs_x = np.abs(x)
    ser = abs_x < 1. / 45.
    nser = np.logical_not(ser)

#     ret = np.zeros(x.shape)
#     ret[ser], ret[nser] = transformed_expi_series(x[ser]), transformed_expi_naive(x[nser])))
#     return ret

    # We use np.concatenate to combine.
    # would be better to use ret[ser] and ret[nser] as commented out above
    # but array assignment not yet supported by autograd
    assert np.all(abs_x[:-1] >= abs_x[1:])
    return np.concatenate((transformed_expi_naive(x[nser]), transformed_expi_series(x[ser]))) 
Example 25
Project: momi2   Author: popgenmethods   File: math_functions.py    GNU General Public License v3.0 5 votes vote down vote up
def expm1d(x, eps=1e-6):
    x = np.array(x)
    abs_x = np.abs(x)
    if x.shape:
        # FIXME: don't require abs_x to be increasing
        assert np.all(abs_x[1:] >= abs_x[:-1])
        small = abs_x < eps
        big = ~small
        return np.concatenate([expm1d_taylor(x[small]),
                               expm1d_naive(x[big])])
    elif abs_x < eps:
        return expm1d_taylor(x)
    else:
        return expm1d_naive(x) 
Example 26
Project: momi2   Author: popgenmethods   File: test_normalizing_constant.py    GNU General Public License v3.0 5 votes vote down vote up
def check_num_snps(sampled_n_dict, demo, num_loci, mut_rate, ascertainment_pop=None, error_matrices=None):
    if error_matrices is not None or ascertainment_pop is not None:
        # TODO
        raise NotImplementedError

    #seg_sites = momi.simulate_ms(
    #    ms_path, demo, num_loci=num_loci, mut_rate=mut_rate)
    #sfs = seg_sites.sfs

    num_bases = 1000
    sfs = demo.simulate_data(
        sampled_n_dict=sampled_n_dict,
        muts_per_gen=mut_rate/num_bases,
        recoms_per_gen=0,
        length=num_bases,
        num_replicates=num_loci)._sfs

    n_sites = sfs.n_snps(vector=True)

    n_sites_mean = np.mean(n_sites)
    n_sites_sd = np.std(n_sites)

    # TODO this test isn't very useful because expected_branchlen is not used anywhere internally anymore
    n_sites_theoretical = demo.expected_branchlen(sampled_n_dict) * mut_rate
    #n_sites_theoretical = momi.expected_total_branch_len(
    #    demo, ascertainment_pop=ascertainment_pop, error_matrices=error_matrices) * mut_rate

    zscore = -np.abs(n_sites_mean - n_sites_theoretical) / n_sites_sd
    pval = scipy.stats.norm.cdf(zscore) * 2.0

    assert pval >= .05 
Example 27
Project: momi2   Author: popgenmethods   File: test_msprime.py    GNU General Public License v3.0 5 votes vote down vote up
def my_t_test(labels, theoretical, observed, min_samples=25):

    assert theoretical.ndim == 1 and observed.ndim == 2
    assert len(theoretical) == observed.shape[
        0] and len(theoretical) == len(labels)

    n_observed = np.sum(observed > 0, axis=1)
    theoretical, observed = theoretical[
        n_observed > min_samples], observed[n_observed > min_samples, :]
    labels = np.array(list(map(str, labels)))[n_observed > min_samples]
    n_observed = n_observed[n_observed > min_samples]

    runs = observed.shape[1]
    observed_mean = np.mean(observed, axis=1)
    bias = observed_mean - theoretical
    variances = np.var(observed, axis=1)

    t_vals = bias / np.sqrt(variances) * np.sqrt(runs)

    # get the p-values
    abs_t_vals = np.abs(t_vals)
    p_vals = 2.0 * scipy.stats.t.sf(abs_t_vals, df=runs - 1)
    print("# labels, p-values, empirical-mean, theoretical-mean, nonzero-counts")
    toPrint = np.array([labels, p_vals, observed_mean,
                        theoretical, n_observed]).transpose()
    toPrint = toPrint[np.array(toPrint[:, 1], dtype='float').argsort()[
        ::-1]]  # reverse-sort by p-vals
    print(toPrint)

    print("Note p-values are for t-distribution, which may not be a good approximation to the true distribution")

    # p-values should be uniformly distributed
    # so then the min p-value should be beta distributed
    return scipy.stats.beta.cdf(np.min(p_vals), 1, len(p_vals)) 
Example 28
Project: SyntheticStatistics   Author: BlissChapman   File: data.py    MIT License 5 votes vote down vote up
def __init__(self, pbs, qbs, pmix=None, qmix=None):
        """
        pbs: a 2d numpy array (cx x 2) of lower/upper bounds.
            pbs[i, 0] and pbs[i, 1] specifies the lower and upper bound
            for the ith mixture component of P.
        qbs: a 2d numpy array (cy x 2) of lower/upper bounds.
        pmix: The mixture weight vector for P. A numpy array of length cx.
            Automatically set to uniform weights if unspecified.
        qmix: The mixture weight vector for Q. A numpy array of length cy.
        """
        cx = pbs.shape[0]
        cy = qbs.shape[0]
        if pbs.shape[1] != 2:
            raise ValueError('pbs must have 2 columns')
        if qbs.shape[1] != 2:
            raise ValueError('qbs must have 2 columns')
        if pmix is None:
            pmix = old_div(np.ones(cx),float(cx))
        if qmix is None:
            qmix = old_div(np.ones(cy),float(cy))

        if len(pmix) != cx:
            raise ValueError('Length of pmix does not match #rows of pbs')
        if len(qmix) != cy:
            raise ValueError('Length of qmix does not match #rows of qbs')

        if np.abs(np.sum(pmix) - 1) > 1e-6:
            raise ValueError('mixture weights pmix has to sum to 1')
        if np.abs(np.sum(qmix) - 1) > 1e-6:
            raise ValueError('mixture weights qmix has to sum to 1')

        if not np.all(pbs[:, 1] - pbs[:, 0] > 0):
            raise ValueError('Require upper - lower to be positive. False for p')

        if not np.all(qbs[:, 1] - qbs[:, 0] > 0):
            raise ValueError('Require upper - lower to be positive. False for q')
        self.pbs = pbs
        self.qbs = qbs
        self.pmix = pmix
        self.qmix = qmix 
Example 29
Project: networked-popularity   Author: avalanchesiqi   File: predictors.py    MIT License 5 votes vote down vote up
def arnet_cost_function(params, model_input, model_output):
    yhat = arnet_predict(params, model_input)[0]
    # minimize SMAPE
    return np.mean(200 * np.nan_to_num(np.abs(model_output - yhat) / (np.abs(model_output) + np.abs(yhat)))) 
Example 30
Project: networked-popularity   Author: avalanchesiqi   File: predictors.py    MIT License 5 votes vote down vote up
def smape_loss(y_true, y_pred):
    return K.mean(200 * K.abs(y_pred - y_true) / (K.abs(y_pred) + K.abs(y_true))) 
Example 31
Project: scikit-lego   Author: koaning   File: linear_model.py    MIT License 5 votes vote down vote up
def fit(self, X, y):
        X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES)
        if self.effect not in self.allowed_effects:
            raise ValueError(f"effect {self.effect} must be in {self.allowed_effects}")

        def deadzone(errors):
            if self.effect == "linear":
                return np.where(errors > self.threshold, errors, np.zeros(errors.shape))
            if self.effect == "quadratic":
                return np.where(
                    errors > self.threshold, errors ** 2, np.zeros(errors.shape)
                )

        def training_loss(weights):
            diff = np.abs(np.dot(X, weights) - y)
            if self.relative:
                diff = diff / y
            return np.mean(deadzone(diff))

        n, k = X.shape

        # Build a function that returns gradients of training loss using autograd.
        training_gradient_fun = grad(training_loss)

        # Check the gradients numerically, just to be safe.
        weights = np.random.normal(0, 1, k)
        if self.check_grad:
            check_grads(training_loss, modes=["rev"])(weights)

        # Optimize weights using gradient descent.
        self.loss_log_ = np.zeros(self.n_iter)
        self.wts_log_ = np.zeros((self.n_iter, k))
        self.deriv_log_ = np.zeros((self.n_iter, k))
        for i in range(self.n_iter):
            weights -= training_gradient_fun(weights) * self.stepsize
            self.wts_log_[i, :] = weights.ravel()
            self.loss_log_[i] = training_loss(weights)
            self.deriv_log_[i, :] = training_gradient_fun(weights).ravel()
        self.coefs_ = weights
        return self 
Example 32
Project: scikit-lego   Author: koaning   File: linear_model.py    MIT License 5 votes vote down vote up
def constraints(self, y_hat, y_true, sensitive, n_obs):
        if self.covariance_threshold is not None:
            dec_boundary_cov = y_hat @ (sensitive - np.mean(sensitive, axis=0)) / n_obs
            return [cp.abs(dec_boundary_cov) <= self.covariance_threshold]
        else:
            return [] 
Example 33
Project: scikit-lego   Author: koaning   File: linear_model.py    MIT License 5 votes vote down vote up
def constraints(self, y_hat, y_true, sensitive, n_obs):
        if self.covariance_threshold is not None:
            n_obs = len(y_true[y_true == self.positive_target])
            dec_boundary_cov = (
                y_hat[y_true == self.positive_target]
                @ (
                    sensitive[y_true == self.positive_target]
                    - np.mean(sensitive, axis=0)
                )
                / n_obs
            )
            return [cp.abs(dec_boundary_cov) <= self.covariance_threshold]
        else:
            return [] 
Example 34
Project: ceviche   Author: fancompute   File: optimize_mode_converter.py    MIT License 5 votes vote down vote up
def viz_sim(epsr):
    """Solve and visualize a simulation with permittivity 'epsr'
    """
    simulation = fdfd_ez(omega, dl, epsr, [Npml, Npml])
    Hx, Hy, Ez = simulation.solve(source)
    fig, ax = plt.subplots(1, 2, constrained_layout=True, figsize=(6,3))
    ceviche.viz.real(Ez, outline=epsr, ax=ax[0], cbar=False)
    ax[0].plot(input_slice.x*np.ones(len(input_slice.y)), input_slice.y, 'g-')
    ax[0].plot(output_slice.x*np.ones(len(output_slice.y)), output_slice.y, 'r-')
    ceviche.viz.abs(epsr, ax=ax[1], cmap='Greys');
    plt.show()
    return (simulation, ax) 
Example 35
Project: ceviche   Author: fancompute   File: optimize_mode_converter.py    MIT License 5 votes vote down vote up
def measure_modes(Ez):
    return npa.abs(npa.sum(npa.conj(Ez)*probe)) 
Example 36
Project: ceviche   Author: fancompute   File: optimize_1_3.py    MIT License 5 votes vote down vote up
def viz_sim(epsr):
    """Solve and visualize a simulation with permittivity 'epsr'
    """
    simulation = fdfd_ez(omega, dl, epsr, [Npml, Npml])
    Hx, Hy, Ez = simulation.solve(source)
    fig, ax = plt.subplots(1, 2, constrained_layout=True, figsize=(6,3))
    ceviche.viz.real(Ez, outline=epsr, ax=ax[0], cbar=False)
    ax[0].plot(input_slice.x*np.ones(len(input_slice.y)), input_slice.y, 'g-')
    for output_slice in output_slices:
        ax[0].plot(output_slice.x*np.ones(len(output_slice.y)), output_slice.y, 'r-')
    ceviche.viz.abs(epsr, ax=ax[1], cmap='Greys');
    plt.show()
    return (simulation, ax) 
Example 37
Project: ceviche   Author: fancompute   File: optimize_1_3.py    MIT License 5 votes vote down vote up
def measure_modes(Ez, probe_ind=0):
    return npa.abs(npa.sum(npa.conj(Ez)*probes[probe_ind])) 
Example 38
Project: ceviche   Author: fancompute   File: optimize_accelerator.py    MIT License 5 votes vote down vote up
def Emax(Ex, Ey, eps_r):
    E_mag = npa.sqrt(npa.square(npa.abs(Ex)) + npa.square(npa.abs(Ey)))
    material_density = (eps_r - 1) / (eps_max - 1)
    return npa.max(E_mag * material_density)

# average electric field magnitude in the domain 
Example 39
Project: ceviche   Author: fancompute   File: optimize_box.py    MIT License 5 votes vote down vote up
def intensity(eps_arr):

    eps_r = eps_arr.reshape((Nx, Ny))
    # set the permittivity of the FDFD and solve the fields
    F.eps_r = eps_r
    Ex, Ey, Hz = F.solve(source)

    # compute the gradient and normalize if you want
    I = npa.sum(npa.square(npa.abs(Hz * probe)))
    return -I / I_H0

# define the gradient for autograd 
Example 40
Project: ceviche   Author: fancompute   File: primitives.py    MIT License 5 votes vote down vote up
def out_fn(output_vector):
        # this function takes the output of each primitive and returns a real scalar (sort of like the objective function)
        return npa.abs(npa.sum(output_vector)) 
Example 41
Project: ceviche   Author: fancompute   File: test_gradients_fdfd_complex.py    MIT License 5 votes vote down vote up
def tes1t_Hz_reverse(self):

        print('\ttesting reverse-mode Hz in FDFD')

        f = fdfd_hz(self.omega, self.dL, self.eps_r, self.pml)

        def J_fdfd(eps_arr):

            eps_r = eps_arr.reshape((self.Nx, self.Ny))

            # set the permittivity
            f.eps_r = eps_r

            # set the source amplitude to the permittivity at that point
            Ex, Ey, Hz = f.solve(eps_r * self.source_hz, iterative=True)

            return npa.sum(npa.square(npa.abs(Hz))) \
                 + npa.sum(npa.square(npa.abs(Ex))) \
                 + npa.sum(npa.square(npa.abs(Ey)))

        grad_autograd_rev = jacobian(J_fdfd, mode='reverse')(self.eps_arr)
        grad_numerical = jacobian(J_fdfd, mode='numerical')(self.eps_arr)

        if VERBOSE:
            print('\tobjective function value: ', J_fdfd(self.eps_arr))
            print('\tgrad (auto):  \n\t\t', grad_autograd_rev)
            print('\tgrad (num):   \n\t\t\n', grad_numerical)

        self.check_gradient_error(grad_numerical, grad_autograd_rev) 
Example 42
Project: ceviche   Author: fancompute   File: test_gradients_fdfd_complex.py    MIT License 5 votes vote down vote up
def test_Hz_forward(self):

        print('\ttesting forward-mode Hz in FDFD')

        f = fdfd_hz(self.omega, self.dL, self.eps_r, self.pml)

        def J_fdfd(c):

            # set the permittivity
            f.eps_r = c * self.eps_r

            # set the source amplitude to the permittivity at that point
            Ex, Ey, Hz = f.solve(c * self.eps_r * self.source_hz)

            return npa.square(npa.abs(Hz)) \
                 + npa.square(npa.abs(Ex)) \
                 + npa.square(npa.abs(Ey))

        grad_autograd_for = jacobian(J_fdfd, mode='forward')(1.0)
        grad_numerical = jacobian(J_fdfd, mode='numerical')(1.0)

        if VERBOSE:
            print('\tobjective function value: ', J_fdfd(1.0))
            print('\tgrad (auto):  \n\t\t', grad_autograd_for)
            print('\tgrad (num):   \n\t\t', grad_numerical)

        self.check_gradient_error(grad_numerical, grad_autograd_for) 
Example 43
Project: ceviche   Author: fancompute   File: test_gradients_fdfd_complex.py    MIT License 5 votes vote down vote up
def test_Ez_forward(self):

        print('\ttesting forward-mode Ez in FDFD')

        f = fdfd_ez(self.omega, self.dL, self.eps_r, self.pml)

        def J_fdfd(c):

            # set the permittivity
            f.eps_r = c * self.eps_r

            # set the source amplitude to the permittivity at that point
            Hx, Hy, Ez = f.solve(c * self.eps_r * self.source_ez)

            return npa.square(npa.abs(Ez)) \
                 + npa.square(npa.abs(Hx)) \
                 + npa.square(npa.abs(Hy))

        grad_autograd_for = jacobian(J_fdfd, mode='forward')(1.0)
        grad_numerical = jacobian(J_fdfd, mode='numerical')(1.0)

        if VERBOSE:
            print('\tobjective function value: ', J_fdfd(1.0))
            print('\tgrad (auto):  \n\t\t', grad_autograd_for)
            print('\tgrad (num):   \n\t\t', grad_numerical)

        self.check_gradient_error(grad_numerical, grad_autograd_for) 
Example 44
Project: ceviche   Author: fancompute   File: test_gradients_fdfd.py    MIT License 5 votes vote down vote up
def test_Hz_reverse(self):

        print('\ttesting reverse-mode Hz in FDFD')

        f = fdfd_hz(self.omega, self.dL, self.eps_r, self.pml)

        def J_fdfd(eps_arr):

            eps_r = eps_arr.reshape((self.Nx, self.Ny))

            # set the permittivity
            f.eps_r = eps_r

            # set the source amplitude to the permittivity at that point
            Ex, Ey, Hz = f.solve(eps_r * self.source_hz)

            return npa.sum(npa.square(npa.abs(Hz))) \
                 + npa.sum(npa.square(npa.abs(Ex))) \
                 + npa.sum(npa.square(npa.abs(Ey)))


        grad_autograd_rev = jacobian(J_fdfd, mode='reverse')(self.eps_arr)
        grad_numerical = jacobian(J_fdfd, mode='numerical')(self.eps_arr)

        if VERBOSE:
            print('\tobjective function value: ', J_fdfd(self.eps_arr))
            print('\tgrad (auto):  \n\t\t', grad_autograd_rev)
            print('\tgrad (num):   \n\t\t\n', grad_numerical)

        self.check_gradient_error(grad_numerical, grad_autograd_rev) 
Example 45
Project: ceviche   Author: fancompute   File: test_gradients_fdfd.py    MIT License 5 votes vote down vote up
def test_Hz_forward(self):

        print('\ttesting forward-mode Hz in FDFD')

        f = fdfd_hz(self.omega, self.dL, self.eps_r, self.pml)

        def J_fdfd(c):

            # set the permittivity
            f.eps_r = c * self.eps_r

            # set the source amplitude to the permittivity at that point
            Ex, Ey, Hz = f.solve(c * self.eps_r * self.source_hz)

            return npa.square(npa.abs(Hz)) \
                 + npa.square(npa.abs(Ex)) \
                 + npa.square(npa.abs(Ey))

        grad_autograd_for = jacobian(J_fdfd, mode='forward')(1.0)
        grad_numerical = jacobian(J_fdfd, mode='numerical')(1.0)

        if VERBOSE:
            print('\tobjective function value: ', J_fdfd(1.0))
            print('\tgrad (auto):  \n\t\t', grad_autograd_for)
            print('\tgrad (num):   \n\t\t', grad_numerical)

        self.check_gradient_error(grad_numerical, grad_autograd_for) 
Example 46
Project: ceviche   Author: fancompute   File: test_gradients_fdfd.py    MIT License 5 votes vote down vote up
def test_Ez_reverse(self):

        print('\ttesting reverse-mode Ez in FDFD')

        f = fdfd_ez(self.omega, self.dL, self.eps_r, self.pml)

        def J_fdfd(eps_arr):

            eps_r = eps_arr.reshape((self.Nx, self.Ny))

            # set the permittivity
            f.eps_r = eps_r

            # set the source amplitude to the permittivity at that point
            Hx, Hy, Ez = f.solve(eps_r * self.source_ez)

            return npa.sum(npa.square(npa.abs(Ez))) \
                 + npa.sum(npa.square(npa.abs(Hx))) \
                 + npa.sum(npa.square(npa.abs(Hy)))

        grad_autograd_rev = jacobian(J_fdfd, mode='reverse')(self.eps_arr)
        grad_numerical = jacobian(J_fdfd, mode='numerical')(self.eps_arr)

        if VERBOSE:
            print('\tobjective function value: ', J_fdfd(self.eps_arr))
            print('\tgrad (auto):  \n\t\t', grad_autograd_rev)
            print('\tgrad (num):   \n\t\t', grad_numerical)

        self.check_gradient_error(grad_numerical, grad_autograd_rev) 
Example 47
Project: ceviche   Author: fancompute   File: test_gradients_fdfd.py    MIT License 5 votes vote down vote up
def test_Ez_forward(self):

        print('\ttesting forward-mode Ez in FDFD')

        f = fdfd_ez(self.omega, self.dL, self.eps_r, self.pml)

        def J_fdfd(c):

            # set the permittivity
            f.eps_r = c * self.eps_r

            # set the source amplitude to the permittivity at that point
            Hx, Hy, Ez = f.solve(c * self.eps_r * self.source_ez)

            return npa.square(npa.abs(Ez)) \
                 + npa.square(npa.abs(Hx)) \
                 + npa.square(npa.abs(Hy))

        grad_autograd_for = jacobian(J_fdfd, mode='forward')(1.0)
        grad_numerical = jacobian(J_fdfd, mode='numerical')(1.0)

        if VERBOSE:
            print('\tobjective function value: ', J_fdfd(1.0))
            print('\tgrad (auto):  \n\t\t', grad_autograd_for)
            print('\tgrad (num):   \n\t\t', grad_numerical)

        self.check_gradient_error(grad_numerical, grad_autograd_for) 
Example 48
Project: ceviche   Author: fancompute   File: test_param_opt.py    MIT License 5 votes vote down vote up
def intensity(eps_arr):

    eps_r = eps_arr.reshape((Nx, Ny))
    # set the permittivity of the FDFD and solve the fields
    F.eps_r = eps_r
    Ex, Ey, Hz = F.solve(source)

    # compute the gradient and normalize if you want
    I = np.sum(np.square(np.abs(Hz * probe)))
    I_E0, I_H0 = get_normalization(F, source, probe)

    return -I / I_H0 
Example 49
Project: disropt   Author: DISROPT   File: abs.py    GNU General Public License v3.0 5 votes vote down vote up
def _to_cvxpy(self):
        import cvxpy as cvx
        return cvx.abs(self.fn._to_cvxpy()) 
Example 50
Project: disropt   Author: DISROPT   File: abs.py    GNU General Public License v3.0 5 votes vote down vote up
def eval(self, x: np.ndarray) -> np.ndarray:
        return anp.abs(self.fn.eval(x)).reshape(self.output_shape)