Python autograd.numpy.max() Examples

The following are code examples for showing how to use autograd.numpy.max(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: STSC   Author: wOOL   File: stsc_autograd.py    The Unlicense 6 votes vote down vote up
def get_rotation_matrix(X, C):
    ij_list = [(i, j) for i in range(C) for j in range(C) if i < j]

    def cost(theta_list):
        U_list = generate_U_list(ij_list, theta_list, C)
        R = reduce(npy.dot, U_list, npy.eye(C))
        Z = X.dot(R)
        M = npy.max(Z, axis=1, keepdims=True)
        return npy.sum((Z / M) ** 2)

    theta_list_init = npy.array([0.0] * int(C * (C - 1) / 2))
    opt = minimize(cost,
                   x0=theta_list_init,
                   method='CG',
                   jac=grad(cost),
                   options={'disp': False})
    return opt.fun, reduce(npy.dot, generate_U_list(ij_list, opt.x, C), npy.eye(C)) 
Example 2
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 6 votes vote down vote up
def RBF_p_diag(sigma, length_scale, std):

    if (numpy.max(sigma) - numpy.min(sigma)) == 0:
        sigma = sigma * 0

    sigma2 = sigma ** 2

    S = (sigma2 + sigma2 + (length_scale ** 2))

    K = length_scale * (S ** -0.5) * (std ** 2)

    length_scale_g = (std ** 2 * (sigma2 + sigma2)) / (length_scale ** 2 + sigma2 + sigma2) ** (3 / 2)

    std_g = 2 * K / std

    sigma_g = -(length_scale*sigma*std**2)/(length_scale**2 + sigma**2 + sigma**2)**(3/2)

    mu_g = numpy.zeros_like(sigma_g)

    if (numpy.max(sigma) - numpy.min(sigma)) == 0:
        sigma_g = sigma_g * 0

    return K, length_scale_g, std_g, mu_g, sigma_g 
Example 3
Project: vittles   Author: rgiordan   File: sensitivity_lib.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, fun, order1, order2):
        self._order1 = order1
        self._order2 = order2

        self._eval_fun_derivs = [[ fun ]]
        for x1_ind in range(self._order1 + 1):
            if x1_ind > 0: # The first row should be 0-th order x1 partials.
                # Append one x1 derivative.
                next_deriv = _append_jvp(
                    self._eval_fun_derivs[x1_ind - 1][0],
                    num_base_args=2, argnum=0)
                self._eval_fun_derivs.append([ next_deriv ])
            for x2_ind in range(self._order2):
                # Append one x2 derivative.  Note that the array already contains
                # a 0-th order x2 partial, and we are appending order2 new
                # derivatives, for a max order of order2 in order2 + 1 columns.
                next_deriv = _append_jvp(
                    self._eval_fun_derivs[x1_ind][x2_ind],
                    num_base_args=2, argnum=1)
                self._eval_fun_derivs[x1_ind].append(next_deriv) 
Example 4
Project: vittles   Author: rgiordan   File: sensitivity_lib.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, fun, order1, order2):
        self._order1 = order1
        self._order2 = order2

        # Build an array of higher order reverse mode partial derivatives.
        # Traverse the rows (x1 partials) first, then traverse the
        # columns (x2 partials).
        self._eval_deriv_arrays = [[ fun ]]
        for x1_ind in range(self._order1 + 1):
            if x1_ind > 0:
                # Append one x1 derivative.
                next_deriv = autograd.jacobian(
                    self._eval_deriv_arrays[x1_ind - 1][0], argnum=0)
                self._eval_deriv_arrays.append([ next_deriv ])
            for x2_ind in range(self._order2):
                # Append one x2 derivative.  Note that the array already has
                # a 0-th order x2 partial, and we are appending order2 new
                # derivatives, for a max order of order2 in order2 + 1 columns.
                next_deriv = autograd.jacobian(
                    self._eval_deriv_arrays[x1_ind][x2_ind], argnum=1)
                self._eval_deriv_arrays[x1_ind].append(next_deriv) 
Example 5
Project: ceviche   Author: fancompute   File: optimize_accelerator.py    MIT License 6 votes vote down vote up
def accel_gradient(eps_arr, mode='max'):

    # set the permittivity of the FDFD and solve the fields
    F.eps_r = eps_arr.reshape((Nx, Ny))
    Ex, Ey, Hz = F.solve(source)

    # compute the gradient and normalize if you want
    G = npa.sum(Ey * eta / Ny)

    if mode == 'max':
        return -np.abs(G) / Emax(Ex, Ey, eps_r)
    elif mode == 'avg':
        return -np.abs(G) / Eavg(Ex, Ey)
    else:        
        return -np.abs(G / E0)

# define the gradient for autograd 
Example 6
Project: kernel-gof   Author: wittawatj   File: util.py    MIT License 6 votes vote down vote up
def bound_by_data(Z, Data):
    """
    Determine lower and upper bound for each dimension from the Data, and project 
    Z so that all points in Z live in the bounds.

    Z: m x d 
    Data: n x d

    Return a projected Z of size m x d.
    """
    n, d = Z.shape
    Low = np.min(Data, 0)
    Up = np.max(Data, 0)
    LowMat = np.repeat(Low[np.newaxis, :], n, axis=0)
    UpMat = np.repeat(Up[np.newaxis, :], n, axis=0)

    Z = np.maximum(LowMat, Z)
    Z = np.minimum(UpMat, Z)
    return Z 
Example 7
Project: Efficient_Augmentation   Author: mkuchnik   File: LinearSVM.py    MIT License 5 votes vote down vote up
def hinge(s):
    return np.max(0, 1 - s) 
Example 8
Project: STSC   Author: wOOL   File: stsc_manopt.py    The Unlicense 5 votes vote down vote up
def get_rotation_matrix(X, C):
    def cost(R):
        Z = npy.dot(X, R)
        M = npy.max(Z, axis=1, keepdims=True)
        return npy.sum((Z / M) ** 2)

    manifold = Stiefel(C, C)
    problem = Problem(manifold=manifold, cost=cost, verbosity=0)
    solver = SteepestDescent(logverbosity=0)
    opt = solver.solve(problem=problem, x=npy.eye(C))
    return cost(opt), opt 
Example 9
Project: autograd-forward   Author: BB-UCL   File: test_numpy.py    MIT License 5 votes vote down vote up
def test_max():  stat_check(np.max) 
Example 10
Project: autograd-forward   Author: BB-UCL   File: test_numpy.py    MIT License 5 votes vote down vote up
def test_max():  stat_check(np.max) 
Example 11
Project: qoc   Author: SchusterLab   File: controlbandwidthmax.py    MIT License 5 votes vote down vote up
def cost(self, controls, states, system_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        states
        system_eval_step

        Returns:
        cost
        """
        cost = 0
        # Iterate over the controls, penalize each control that has
        # frequencies greater than its maximum frequency.
        for i, max_bandwidth in enumerate(self.max_bandwidths):
            control_fft = anp.fft.fft(controls[:, i])
            control_fft_sq  = anp.abs(control_fft)
            penalty_freq_indices = anp.nonzero(self.freqs >= max_bandwidth)[0]
            penalized_ffts = control_fft_sq[penalty_freq_indices]
            penalty = anp.sum(penalized_ffts)
            penalty_normalized = penalty / (penalty_freq_indices.shape[0] * anp.max(penalized_ffts))
            cost = cost + penalty_normalized
        cost_normalized =  cost / self.control_count
                       
        return cost_normalized * self.cost_multiplier 
Example 12
Project: qoc   Author: SchusterLab   File: expm.py    MIT License 5 votes vote down vote up
def one_norm(a):
    """
    Return the one-norm of the matrix.

    References:
    [0] https://www.mathworks.com/help/dsp/ref/matrix1norm.html

    Arguments:
    a :: ndarray(N x N) - The matrix to compute the one norm of.
    
    Returns:
    one_norm_a :: float - The one norm of a.
    """
    return anp.max(anp.sum(anp.abs(a), axis=0)) 
Example 13
Project: ReducedVarianceReparamGradients   Author: andymiller   File: opt.py    MIT License 5 votes vote down vote up
def plot_gradients(self, dims=[1, 10, 50]):
        import matplotlib.pyplot as plt
        import seaborn as sns; sns.set_style("white")
        noisy_grads    = np.array(self.grad_trace)
        true_grads     = np.array(self.true_grad_trace)
        filt_grads     = np.array([g[0] for g in self.filtered_grad_trace])
        filt_grads_var = np.array([g[1] for g in self.filtered_grad_trace])
        tgrid = np.arange(true_grads.shape[0])
        fig, axarr = plt.subplots(len(dims), 1, figsize=(12, 3*len(dims)))
        for d, ax in zip(dims, axarr.flatten()):
            ax.plot(tgrid, true_grads[:,d], label="true")
            ax.plot(tgrid, filt_grads[:,d], label="filtered")
            ax.fill_between(tgrid, filt_grads[:,d]-2*np.sqrt(filt_grads_var[:,d]),
                                   filt_grads[:,d]+2*np.sqrt(filt_grads_var[:,d]),
                                   alpha = .25)
            ax.scatter(tgrid, noisy_grads[:,d], s=3)
            #yrange = (np.max([filt_grads[:,d], true_grads[:,d]]),
            #          np.min([filt_grads[:,d], true_grads[:,d]]))
            #ax.set_ylim(yrange)
            ax.set_xlim((tgrid[0], tgrid[-1]))
        ax.legend()
        print "Adam average grad deviation:", \
            np.sqrt(np.mean((filt_grads - true_grads)**2))
        print "sample average deviation: ", \
            np.sqrt(np.mean((noisy_grads - true_grads)**2))
        return fig, axarr 
Example 14
Project: momi2   Author: popgenmethods   File: configurations.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, sampled_pops, conf_arr, sampled_n=None,
                 ascertainment_pop=None):
        """Use build_config_list() instead of calling this constructor directly"""
        # If sampled_n=None, ConfigList.sampled_n will be the max number of
        # observed individuals/alleles per population.
        self.sampled_pops = tuple(sampled_pops)
        self.value = conf_arr

        if ascertainment_pop is None:
            ascertainment_pop = [True] * len(sampled_pops)
        self.ascertainment_pop = np.array(ascertainment_pop)
        self.ascertainment_pop.setflags(write=False)
        if all(not a for a in self.ascertainment_pop):
            raise ValueError(
                "At least one of the populations must be used for "
                "ascertainment of polymorphic sites")

        max_n = np.max(np.sum(self.value, axis=2), axis=0)

        if sampled_n is None:
            sampled_n = max_n
        sampled_n = np.array(sampled_n)
        if np.any(sampled_n < max_n):
            raise ValueError("config greater than sampled_n")
        self.sampled_n = sampled_n
        if not np.sum(sampled_n[self.ascertainment_pop]) >= 2:
            raise ValueError("The total sample size of the ascertainment "
                             "populations must be >= 2")

        config_sampled_n = np.sum(self.value, axis=2)
        self.has_missing_data = np.any(config_sampled_n != self.sampled_n)

        if np.any(np.sum(self.value[:, self.ascertainment_pop, :], axis=1)
                  == 0):
            raise ValueError("Monomorphic sites not allowed. In addition, all"
                             " sites must be polymorphic when restricted to"
                             " the ascertainment populations") 
Example 15
Project: momi2   Author: popgenmethods   File: test_inference.py    GNU General Public License v3.0 5 votes vote down vote up
def test_underflow_robustness(folded):
    num_runs = 1000
    sampled_pops = (1, 2, 3)
    sampled_n = (5, 5, 5)

    n_bases = int(1e3)
    demo = momi.DemographicModel(1.0, .25, muts_per_gen=2.5 / n_bases)
    for p in sampled_pops:
        demo.add_leaf(p)
    demo.add_time_param("t0")
    demo.add_time_param("t1", lower_constraints=["t0"])
    demo.move_lineages(1, 2, "t0")
    demo.move_lineages(2, 3, "t1")

    true_params = np.array([0.5, 0.7])
    demo.set_params(true_params)

    data = demo.simulate_data(
        length=n_bases,
        recoms_per_gen=0.0,
        num_replicates=num_runs,
        sampled_n_dict=dict(zip(sampled_pops, sampled_n)))

    sfs = data.extract_sfs(1)
    if folded:
        sfs = sfs.fold()

    demo.set_data(sfs)
    demo.set_params({"t0": 0.1, "t1": 100.0})
    optimize_res = demo.optimize()

    print(optimize_res)
    inferred_params = np.array(list(demo.get_params().values()))

    error = (true_params - inferred_params) / true_params
    print("# Truth:\n", true_params)
    print("# Inferred:\n", inferred_params)
    print("# Max Relative Error: %f" % max(abs(error)))
    print("# Relative Error:", "\n", error)

    assert max(abs(error)) < .1 
Example 16
Project: SyntheticStatistics   Author: BlissChapman   File: tst.py    MIT License 5 votes vote down vote up
def simulate_null_dist(eigs, J, n_simulate=2000, seed=7):
        """
        Simulate the null distribution using the spectrum of the covariance
        matrix of the U-statistic. The simulated statistic is n*UME^2 where
        UME is an unbiased estimator.

        - eigs: a numpy array of estimated eigenvalues of the covariance
          matrix. eigs is of length J
        - J: the number of test locations.

        Return a numpy array of simulated statistics.
        """
        # draw at most  J x block_size values at a time
        block_size = max(20, int(old_div(1000.0,J)))
        umes = np.zeros(n_simulate)
        from_ind = 0
        with util.NumpySeedContext(seed=seed):
            while from_ind < n_simulate:
                to_draw = min(block_size, n_simulate-from_ind)
                # draw chi^2 random variables.
                chi2 = np.random.randn(J, to_draw)**2

                # an array of length to_draw
                sim_umes = np.dot(eigs, chi2-1.0)

                # store
                end_ind = from_ind+to_draw
                umes[from_ind:end_ind] = sim_umes
                from_ind = end_ind
        return umes 
Example 17
Project: SyntheticStatistics   Author: BlissChapman   File: plot.py    MIT License 5 votes vote down vote up
def plot_runtime(ex, fname, func_xvalues, xlabel, func_title=None):
    results = glo.ex_load_result(ex, fname)
    value_accessor = lambda job_results: job_results['time_secs']
    vf_pval = np.vectorize(value_accessor)
    # results['test_results'] is a dictionary: 
    # {'test_result': (dict from running perform_test(te) '...':..., }
    times = vf_pval(results['test_results'])
    repeats, _, n_methods = results['test_results'].shape
    time_avg = np.mean(times, axis=0)
    time_std = np.std(times, axis=0)

    xvalues = func_xvalues(results)

    #ns = np.array(results[xkey])
    #te_proportion = 1.0 - results['tr_proportion']
    #test_sizes = ns*te_proportion
    line_styles = exglo.func_plot_fmt_map()
    method_labels = exglo.get_func2label_map()
    
    func_names = [f.__name__ for f in results['method_job_funcs'] ]
    for i in range(n_methods):    
        te_proportion = 1.0 - results['tr_proportion']
        fmt = line_styles[func_names[i]]
        #plt.errorbar(ns*te_proportion, mean_rejs[:, i], std_pvals[:, i])
        method_label = method_labels[func_names[i]]
        plt.errorbar(xvalues, time_avg[:, i], yerr=time_std[:,i], fmt=fmt,
                label=method_label)
            
    ylabel = 'Time (s)'
    plt.ylabel(ylabel)
    plt.xlabel(xlabel)
    plt.gca().set_yscale('log')
    plt.xlim([np.min(xvalues), np.max(xvalues)])
    plt.xticks( xvalues, xvalues)
    plt.legend(loc='best')
    title = '%s. %d trials. '%( results['prob_label'],
            repeats ) if func_title is None else func_title(results)
    plt.title(title)
    #plt.grid()
    return results 
Example 18
Project: samoo   Author: proteekroy   File: cdtlz_problems_main.py    MIT License 5 votes vote down vote up
def _calc_pareto_front(self, ref_dirs, *args, **kwargs):

        F = self.dtlz.pareto_front(ref_dirs, *args, **kwargs)

        a = np.sqrt(np.sum(F ** 2, 1) - 3 / 4 * np.max(F ** 2, axis=1))
        a = np.expand_dims(a, axis=1)
        a = np.tile(a, [1, ref_dirs.shape[1]])
        F = F/a
        # F = F / np.tile((np.sqrt(np.sum(F**2, 2) - 3 / 4 * np.max(F**2, axis=1))), [1, ref_dirs.shape[0]])
        return F 
Example 19
Project: vittles   Author: rgiordan   File: sensitivity_lib.py    Apache License 2.0 5 votes vote down vote up
def _check_hyper_par_value(self, hyper_par, tolerance=1e-8):
        if np.max(np.abs(hyper_par - self._hyper0)) > tolerance:
            raise ValueError(
                'get_opt_par must be evaluated at ',
                self._hyper0, ' != ', hyper_par) 
Example 20
Project: vittles   Author: rgiordan   File: sensitivity_lib.py    Apache License 2.0 5 votes vote down vote up
def _check_location(self, x1, x2, tol=1e-8):
        if (np.max(np.abs(x1 - self._x1)) > tol) or \
            (np.max(np.abs(x2 - self._x2)) > tol):
            raise ValueError(
                'You must use the x1 and x2 set in `set_evaluation_location`.') 
Example 21
Project: ceviche   Author: fancompute   File: optimize_accelerator.py    MIT License 5 votes vote down vote up
def Emax(Ex, Ey, eps_r):
    E_mag = npa.sqrt(npa.square(npa.abs(Ex)) + npa.square(npa.abs(Ey)))
    material_density = (eps_r - 1) / (eps_max - 1)
    return npa.max(E_mag * material_density)

# average electric field magnitude in the domain 
Example 22
Project: kernel-gof   Author: wittawatj   File: util.py    MIT License 5 votes vote down vote up
def constrain(val, min_val, max_val):
    return min(max_val, max(min_val, val)) 
Example 23
Project: kernel-gof   Author: wittawatj   File: plot.py    MIT License 5 votes vote down vote up
def plot_runtime(ex, fname, func_xvalues, xlabel, func_title=None):
    results = glo.ex_load_result(ex, fname)
    value_accessor = lambda job_results: job_results['time_secs']
    vf_pval = np.vectorize(value_accessor)
    # results['job_results'] is a dictionary: 
    # {'test_result': (dict from running perform_test(te) '...':..., }
    times = vf_pval(results['job_results'])
    repeats, _, n_methods = results['job_results'].shape
    time_avg = np.mean(times, axis=0)
    time_std = np.std(times, axis=0)

    xvalues = func_xvalues(results)

    #ns = np.array(results[xkey])
    #te_proportion = 1.0 - results['tr_proportion']
    #test_sizes = ns*te_proportion
    line_styles = func_plot_fmt_map()
    method_labels = get_func2label_map()
    
    func_names = [f.__name__ for f in results['method_job_funcs'] ]
    for i in range(n_methods):    
        te_proportion = 1.0 - results['tr_proportion']
        fmt = line_styles[func_names[i]]
        #plt.errorbar(ns*te_proportion, mean_rejs[:, i], std_pvals[:, i])
        method_label = method_labels[func_names[i]]
        plt.errorbar(xvalues, time_avg[:, i], yerr=time_std[:,i], fmt=fmt,
                label=method_label)
            
    ylabel = 'Time (s)'
    plt.ylabel(ylabel)
    plt.xlabel(xlabel)
    plt.xlim([np.min(xvalues), np.max(xvalues)])
    plt.xticks( xvalues, xvalues )
    plt.legend(loc='best')
    plt.gca().set_yscale('log')
    title = '%s. %d trials. '%( results['prob_label'],
            repeats ) if func_title is None else func_title(results)
    plt.title(title)
    #plt.grid()
    return results 
Example 24
Project: kernel-gof   Author: wittawatj   File: goftest.py    MIT License 5 votes vote down vote up
def simulate_null_dist(eigs, J, n_simulate=2000, seed=7):
        """
        Simulate the null distribution using the spectrums of the covariance 
        matrix of the U-statistic. The simulated statistic is n*FSSD^2 where
        FSSD is an unbiased estimator.

        - eigs: a numpy array of estimated eigenvalues of the covariance
          matrix. eigs is of length d*J, where d is the input dimension, and 
        - J: the number of test locations.

        Return a numpy array of simulated statistics.
        """
        d = old_div(len(eigs),J)
        assert d>0
        # draw at most d x J x block_size values at a time
        block_size = max(20, int(old_div(1000.0,(d*J))))
        fssds = np.zeros(n_simulate)
        from_ind = 0
        with util.NumpySeedContext(seed=seed):
            while from_ind < n_simulate:
                to_draw = min(block_size, n_simulate-from_ind)
                # draw chi^2 random variables. 
                chi2 = np.random.randn(d*J, to_draw)**2

                # an array of length to_draw 
                sim_fssds = eigs.dot(chi2-1.0)
                # store 
                end_ind = from_ind+to_draw
                fssds[from_ind:end_ind] = sim_fssds
                from_ind = end_ind
        return fssds 
Example 25
Project: kernel-gof   Author: wittawatj   File: goftest.py    MIT License 5 votes vote down vote up
def optimize_auto_init(p, dat, J, **ops):
        """
        Optimize parameters by calling optimize_locs_widths(). Automatically 
        initialize the test locations and the Gaussian width.

        Return optimized locations, Gaussian width, optimization info
        """
        assert J>0
        # Use grid search to initialize the gwidth
        X = dat.data()
        n_gwidth_cand = 5
        gwidth_factors = 2.0**np.linspace(-3, 3, n_gwidth_cand) 
        med2 = util.meddistance(X, 1000)**2

        k = kernel.KGauss(med2*2)
        # fit a Gaussian to the data and draw to initialize V0
        V0 = util.fit_gaussian_draw(X, J, seed=829, reg=1e-6)
        list_gwidth = np.hstack( ( (med2)*gwidth_factors ) )
        besti, objs = GaussFSSD.grid_search_gwidth(p, dat, V0, list_gwidth)
        gwidth = list_gwidth[besti]
        assert util.is_real_num(gwidth), 'gwidth not real. Was %s'%str(gwidth)
        assert gwidth > 0, 'gwidth not positive. Was %.3g'%gwidth
        logging.info('After grid search, gwidth=%.3g'%gwidth)

        
        V_opt, gwidth_opt, info = GaussFSSD.optimize_locs_widths(p, dat,
                gwidth, V0, **ops) 

        # set the width bounds
        #fac_min = 5e-2
        #fac_max = 5e3
        #gwidth_lb = fac_min*med2
        #gwidth_ub = fac_max*med2
        #gwidth_opt = max(gwidth_lb, min(gwidth_opt, gwidth_ub))
        return V_opt, gwidth_opt, info 
Example 26
Project: paragami   Author: rgiordan   File: simplex_patterns.py    Apache License 2.0 5 votes vote down vote up
def logsumexp(mat, axis):
    """Calculate logsumexp along the axis ``axis`` without dropping the axis.
    """
    if not type(axis) is int:
        raise ValueError(
            'This version of logsumexp is designed for only one axis.')
    mat_max = np.max(mat, axis=axis, keepdims=True)
    exp_mat_norm = np.exp(mat - mat_max)
    return np.log(np.sum(exp_mat_norm, axis=axis, keepdims=True)) + mat_max 
Example 27
Project: paragami   Author: rgiordan   File: test_optimization_lib.py    Apache License 2.0 5 votes vote down vote up
def _test_matrix_sqrt(self, mat):
        dim = mat.shape[0]
        id_mat = np.eye(mat.shape[0])
        eig_vals = np.linalg.eigvals(mat)
        ev_min = np.min(eig_vals)
        ev_max = np.max(eig_vals)
        ev0 = np.real(ev_min + (ev_max - ev_min) / 3)
        ev1 = np.real(ev_min + 2 * (ev_max - ev_min) / 3)

        for test_ev_min in [None, ev0]:
            for test_ev_max in [None, ev1]:
                h_sqrt_mult, h_inv_sqrt_mult = \
                    paragami.optimization_lib._get_sym_matrix_inv_sqrt_funcs(
                        mat, ev_min=test_ev_min, ev_max=test_ev_max)
                h_inv_sqrt = _get_matrix_from_operator(h_inv_sqrt_mult, dim)
                h_sqrt = _get_matrix_from_operator(h_sqrt_mult, dim)

                assert_array_almost_equal(id_mat, h_inv_sqrt @ h_sqrt)
                h = h_sqrt @ h_sqrt.T
                assert_array_almost_equal(
                    id_mat, h_inv_sqrt @ h @ h_inv_sqrt.T)
                eig_vals_test = np.linalg.eigvals(h)
                eig_vals_test = np.array([np.real(v) for v in eig_vals_test])
                if test_ev_min is not None:
                    self.assertTrue(np.min(eig_vals_test) >=
                                    test_ev_min - 1e-8)
                else:
                    assert_array_almost_equal(ev_min, np.min(eig_vals_test))
                if test_ev_max is not None:
                    self.assertTrue(np.max(eig_vals_test) <=
                                    test_ev_max + 1e-8)
                else:
                    assert_array_almost_equal(ev_max, np.max(eig_vals_test)) 
Example 28
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 4 votes vote down vote up
def RBF_p_test(mu, sigma, mu_train, sigma_train, length_scale, std):

    if (numpy.max(sigma_train) - numpy.min(sigma_train)) == 0:
        sigma_train = sigma_train * 0
        sigma = sigma * 0

    n_train = numpy.shape(mu_train)[0]

    n_test = numpy.shape(mu)[0]

    sigma2 = sigma ** 2

    sigma2_train = sigma_train ** 2

    mu_1 = mu_train.repeat(n_test, axis=1)

    mu_1 = numpy.vstack([mu_1, mu.transpose()])

    sigma_1 = sigma_train.repeat(n_test, axis=1)

    sigma_1 = numpy.vstack([sigma_1, sigma.transpose()])

    sigma2_1 = sigma2_train.repeat(n_test, axis=1)

    sigma2_1 = numpy.vstack([sigma2_1, sigma.transpose()])

    mu_2 = mu.transpose().repeat(n_train + 1, axis=0)

    sigma_2 = sigma.transpose().repeat(n_train + 1, axis=0)

    sigma2_2 = sigma2.transpose().repeat(n_train + 1, axis=0)

    S = (sigma2_1 + sigma2_2 + (length_scale ** 2))

    K = length_scale * (S ** -0.5) * numpy.exp(-0.5 * ((mu_1 - mu_2) ** 2) / S) * (std ** 2)

    length_scale_g = (std ** 2 * numpy.exp(-(mu_1 - mu_2) ** 2 / (2 * (length_scale ** 2 + sigma2_1 + sigma2_2))) *
                      (length_scale ** 2 * mu_1 ** 2 - 2 * length_scale ** 2 * mu_1 * mu_2 +
                       length_scale ** 2 * mu_2 ** 2 + length_scale ** 2 * sigma2_1 + length_scale ** 2 * sigma2_2 +
                       sigma2_1 ** 2 + 2 * sigma2_1 * sigma2_2 + sigma2_2 ** 2)) / (length_scale ** 2
                                                                                    + sigma2_1 + sigma2_2) ** (5 / 2)

    std_g = 2 * K / std

    mu_g = -(length_scale * std ** 2 * numpy.exp(
        -(mu_1 - mu_2) ** 2 / (2 * (length_scale ** 2 + sigma2_1 + sigma2_2))) *
             (2 * mu_1 - 2 * mu_2)) / (2 * (length_scale ** 2 + sigma2_1 + sigma2_2) ** (3 / 2))

    sigma_g = -(length_scale * sigma_1 * std ** 2 * numpy.exp(
        -(mu_1 - mu_2) ** 2 / (2 * (length_scale ** 2 + sigma_1 ** 2 + sigma_2 ** 2)))
                * (length_scale ** 2 - mu_1 ** 2 + 2 * mu_1 * mu_2 - mu_2 ** 2 + sigma_1 ** 2 + sigma_2 ** 2)) / (
                          length_scale ** 2 +
                          sigma_1 ** 2 +
                          sigma_2 ** 2) ** (5 / 2)

    if (numpy.max(sigma) - numpy.min(sigma)) == 0:
        sigma_g = sigma_g * 0

    return K, length_scale_g, std_g, mu_g, sigma_g 
Example 29
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 4 votes vote down vote up
def RBF_p(mu, sigma, length_scale, std):
    # mu and sigma are column vector

    if (numpy.max(sigma) - numpy.min(sigma)) == 0:
        sigma = sigma * 0

    sigma2 = sigma ** 2

    n = numpy.shape(mu)[0]

    mu_1 = mu.repeat(n, axis=1)

    sigma_1 = sigma.repeat(n, axis=1)

    sigma2_1 = sigma2.repeat(n, axis=1)

    mu_2 = mu.transpose().repeat(n, axis=0)

    sigma2_2 = sigma2.transpose().repeat(n, axis=0)

    sigma_2 = sigma.transpose().repeat(n, axis=0)

    S = (sigma2_1 + sigma2_2 + (length_scale ** 2))

    K = length_scale * (S ** -0.5) * numpy.exp(-0.5 * ((mu_1 - mu_2) ** 2) / S) * (std ** 2)

    length_scale_g = (std ** 2 * numpy.exp(-(mu_1 - mu_2) ** 2 / (2 * (length_scale ** 2 + sigma2_1 + sigma2_2))) *
                      (length_scale ** 2 * mu_1 ** 2 - 2 * length_scale ** 2 * mu_1 * mu_2 +
                       length_scale ** 2 * mu_2 ** 2 + length_scale ** 2 * sigma2_1 + length_scale ** 2 * sigma2_2 +
                       sigma2_1 ** 2 + 2 * sigma2_1 * sigma2_2 + sigma2_2 ** 2)) / (length_scale ** 2
                                                                                    + sigma2_1 + sigma2_2) ** (5 / 2)

    std_g = 2 * K / std

    mu_g = -(length_scale*std**2*numpy.exp(-(mu_1 - mu_2)**2/(2*(length_scale**2 + sigma2_1 + sigma2_2))) * \
             (2*mu_1 - 2*mu_2))/(2*(length_scale**2 + sigma2_1 + sigma2_2)**(3/2))

    sigma_g = -(length_scale*sigma_1*std**2*numpy.exp(-(mu_1 - mu_2)**2/(2*(length_scale**2 + sigma_1**2 + sigma_2**2)))
                * (length_scale**2 - mu_1**2 + 2*mu_1*mu_2 - mu_2**2 + sigma_1**2 + sigma_2**2))/(length_scale**2 +
                                                                                                  sigma_1**2 +
                                                                                                  sigma_2**2)**(5/2)

    if (numpy.max(sigma) - numpy.min(sigma)) == 0:
        sigma_g = sigma_g * 0

    return K, length_scale_g, std_g, mu_g, sigma_g 
Example 30
Project: qoc   Author: SchusterLab   File: expm.py    MIT License 4 votes vote down vote up
def expm_pade(a):
    """
    Compute the matrix exponential via pade approximation.

    References:
    [0] http://eprints.ma.man.ac.uk/634/1/high05e.pdf
    [1] https://github.com/scipy/scipy/blob/v0.14.0/scipy/linalg/_expm_frechet.py#L10

    Arguments:
    a :: ndarray(N x N) - The matrix to exponentiate.
    
    Returns:
    expm_a :: ndarray(N x N) - The exponential of a.
    """
    # If the one norm is sufficiently small,
    # pade orders up to 13 are well behaved.
    scale = 0
    size = a.shape[0]
    pade_order = None
    one_norm_ = one_norm(a)
    for pade_order_ in PADE_ORDERS:
        if one_norm_ < THETA[pade_order_]:
            pade_order = pade_order_
        #ENDIF
    #ENDFOR

    # If the one norm is large, scaling and squaring
    # is required.
    if pade_order is None:
        pade_order = 13
        scale = max(0, int(anp.ceil(anp.log2(one_norm_ / THETA[13]))))
        a = a * (2 ** -scale)

    # Execute pade approximant.
    i = anp.eye(size)
    u, v = PADE[pade_order](a, i)
    r = anp.linalg.solve(-u + v, u + v)

    # Do squaring if necessary.
    for _ in range(scale):
        r = anp.matmul(r, r)

    return r


### EXPM IMPLEMENTATION VIA EIGEN DECOMPOSITION AND DIAGONALIZATION ### 
Example 31
Project: momi2   Author: popgenmethods   File: demography.py    GNU General Public License v3.0 4 votes vote down vote up
def admixture_operator(n_node, p):
    # axis0=n_from_parent, axis1=der_from_parent, axis2=der_in_parent
    der_in_parent = np.tile(np.arange(n_node + 1), (n_node + 1, n_node + 1, 1))
    n_from_parent = np.transpose(der_in_parent, [2, 0, 1])
    der_from_parent = np.transpose(der_in_parent, [0, 2, 1])

    anc_in_parent = n_node - der_in_parent
    anc_from_parent = n_from_parent - der_from_parent

    x = comb(der_in_parent, der_from_parent) * comb(
        anc_in_parent, anc_from_parent) / comb(n_node, n_from_parent)
    # rearrange so axis0=1, axis1=der_in_parent, axis2=der_from_parent, axis3=n_from_parent
    x = np.transpose(x)
    x = np.reshape(x, [1] + list(x.shape))

    n = np.arange(n_node+1)
    B = comb(n_node, n)

    # the two arrays to convolve_sum_axes
    x1 = (x * B * ((1-p)**n) * (p**(n[::-1])))
    x2 = x[:, :, :, ::-1]

    # reduce array size; approximate low probability events with 0
    mu = n_node * (1-p)
    sigma = np.sqrt(n_node * p * (1-p))
    n_sd = 4
    lower = np.max((0, np.floor(mu - n_sd*sigma)))
    upper = np.min((n_node, np.ceil(mu + n_sd*sigma))) + 1
    lower, upper = int(lower), int(upper)

    ##x1 = x1[:,:,:upper,lower:upper]
    ##x2 = x2[:,:,:(n_node-lower+1),lower:upper]

    ret = convolve_sum_axes(x1, x2)
    # axis0=der_in_parent1, axis1=der_in_parent2, axis2=der_in_child
    ret = np.reshape(ret, ret.shape[1:])
    if ret.shape[2] < (n_node+1):
        ret = np.pad(ret, [(0,0),(0,0),(0,n_node+1-ret.shape[2])], "constant")
    return ret[:, :, :(n_node+1)]


#@memoize
#def _der_in_admixture_node(n_node):
#    '''
#    returns 4d-array, [n_from_parent1, der_in_child, der_in_parent1, der_in_parent2].
#    Used by Demography._admixture_prob_helper
#    '''
#    # axis0=n_from_parent, axis1=der_from_parent, axis2=der_in_parent
#    der_in_parent = np.tile(np.arange(n_node + 1), (n_node + 1, n_node + 1, 1))
#    n_from_parent = np.transpose(der_in_parent, [2, 0, 1])
#    der_from_parent = np.transpose(der_in_parent, [0, 2, 1])
#
#    anc_in_parent = n_node - der_in_parent
#    anc_from_parent = n_from_parent - der_from_parent
#
#    x = scipy.special.comb(der_in_parent, der_from_parent) * scipy.special.comb(
#        anc_in_parent, anc_from_parent) / scipy.special.comb(n_node, n_from_parent)
#
#    ret, labels = convolve_axes(
#        x, x[::-1, ...], [[c for c in 'ijk'], [c for c in 'ilm']], ['j', 'l'], 'n')
#    return np.einsum('%s->inkm' % ''.join(labels), ret[..., :(n_node + 1)]) 
Example 32
Project: momi2   Author: popgenmethods   File: test_inference.py    GNU General Public License v3.0 4 votes vote down vote up
def test_archaic_and_pairwisediffs():
    #logging.basicConfig(level=logging.DEBUG)
    theta = 1
    N_e = 1.0
    join_time = 1.0
    num_runs = 1000

    def logit(p):
        return np.log(p / (1. - p))

    def expit(x):
        return 1. / (1. + np.exp(-x))

    n_bases = 1000
    model = momi.DemographicModel(
        N_e, muts_per_gen=theta/4./N_e/n_bases)

    model.add_time_param(
        "sample_t", random.uniform(0.001, join_time-.001) / join_time,
        upper=join_time)
    model.add_size_param("N", 1.0)
    model.add_leaf("a", N="N")
    model.add_leaf("b", t="sample_t", N="N")
    model.move_lineages("a", "b", join_time)

    data = model.simulate_data(length=n_bases,
                               recoms_per_gen=0,
                               num_replicates=num_runs,
                               sampled_n_dict={"a": 2, "b": 2})

    model.set_data(data.extract_sfs(1),
                   use_pairwise_diffs=False,
                   mem_chunk_size=-1)

    true_params = np.array(list(model.get_params().values()))
    #model.set_x([logit(random.uniform(.001, join_time-.001) / join_time),
    model.set_params([
        logit(random.uniform(.001, join_time-.001) / join_time),
        random.uniform(-1, 1)],
                     scaled=True)
    res = model.optimize(method="trust-ncg", hessp=True)
    inferred_params = np.array(list(model.get_params().values()))

    assert np.max(np.abs(np.log(true_params / inferred_params))) < .2 
Example 33
Project: variational-smc   Author: blei-lab   File: variational_smc.py    MIT License 4 votes vote down vote up
def sim_q(prop_params, model_params, y, smc_obj, rs, verbose=False):
    """
    Simulates a single sample from the VSMC approximation.

    Requires an SMC object with 2 member functions:
    -- sim_prop(t, x_{t-1}, y, prop_params, model_params, rs)
    -- log_weights(t, x_t, x_{t-1}, y, prop_params, model_params)
    """
    # Extract constants
    T = y.shape[0]
    Dx = smc_obj.Dx
    N = smc_obj.N

    # Initialize SMC
    X = np.zeros((N,T,Dx))
    logW = np.zeros(N)
    W = np.zeros((N,T))
    ESS = np.zeros(T)

    for t in range(T):
        # Resampling
        if t > 0:
            ancestors = resampling(W[:,t-1], rs)
            X[:,:t,:] = X[ancestors,:t,:]

        # Propagation
        X[:,t,:] = smc_obj.sim_prop(t, X[:,t-1,:], y, prop_params, model_params, rs)

        # Weighting
        logW = smc_obj.log_weights(t, X[:,t,:], X[:,t-1,:], y, prop_params, model_params)
        max_logW = np.max(logW)
        W[:,t] = np.exp(logW-max_logW)
        W[:,t] /= np.sum(W[:,t])
        ESS[t] = 1./np.sum(W[:,t]**2)

    # Sample from the empirical approximation
    bins = np.cumsum(W[:,-1])
    u = rs.rand()
    B = np.digitize(u,bins)

    if verbose:
        print('Mean ESS', np.mean(ESS)/N)
        print('Min ESS', np.min(ESS))
        
    return X[B,:,:]