Python autograd.numpy.linspace() Examples

The following are code examples for showing how to use autograd.numpy.linspace(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 6 votes vote down vote up
def prior_error(mu_shift, w, n_u):

    a = -numpy.abs(w[:, numpy.arange(0, n_u)*3] + mu_shift[1])

    b = numpy.abs(w[:, numpy.arange(0, n_u)*3+1] + mu_shift[3])

    c = w[:, numpy.arange(0, n_u)*3+2] + mu_shift[5]

    q = numpy.linspace(1e-8, 1 - 1e-8, 128)

    # q = q.ravel()

    q_hat = numpy.mean(1 / (1 + numpy.exp(numpy.log(q)[None, None, :] * a[:, :, None] +
                                          numpy.log(1 - q)[None, None, :] * b[:, :, None] +
                                          c[:, :, None])), axis=0)

    return numpy.mean((q - q_hat) ** 2) 
Example 2
Project: kernel-activation-functions   Author: ispamm   File: kafnets.py    MIT License 6 votes vote down vote up
def init_kaf_nn(layer_sizes, scale=0.01, rs=np.random.RandomState(0), dict_size=20, boundary=3.0):
    """ 
    Initialize the parameters of a KAF feedforward network.
        - dict_size: the size of the dictionary for every neuron.
        - boundary: the boundary for the activation functions.
    """
    
    # Initialize the dictionary
    D = np.linspace(-boundary, boundary, dict_size).reshape(-1, 1)
    
    # Rule of thumb for gamma
    interval = D[1,0] - D[0,0];
    gamma = 0.5/np.square(2*interval)
    D = D.reshape(1, 1, -1)
    
    # Initialize a list of parameters for the layer
    w = [(rs.randn(insize, outsize) * scale,                # Weight matrix
                     rs.randn(outsize) * scale,             # Bias vector
                     rs.randn(1, outsize, dict_size) * 0.5) # Mixing coefficients
                     for insize, outsize in zip(layer_sizes[:-1], layer_sizes[1:])]
    
    return w, (D, gamma) 
Example 3
Project: differentiable-atomistic-potentials   Author: google   File: test_ag.py    Apache License 2.0 6 votes vote down vote up
def test0(self):
    """Check the fcc cell neighbors in a variety of repeats."""
    a = 3.6
    for cutoff_radius in np.linspace(a / 2, 5 * a, 10):
      for rep in ((1, 1, 1), (2, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2),
                  (2, 1, 1), (2, 2, 1), (2, 2, 2), (1, 2, 3), (4, 1, 1)):
        atoms = bulk('Cu', 'fcc', a=a).repeat(rep)

        nl = NeighborList(
            [cutoff_radius / 2] * len(atoms),
            skin=0.01,
            self_interaction=False,
            bothways=True)
        nl.update(atoms)
        nns_ase = [len(nl.get_neighbors(i)[0]) for i in range(len(atoms))]

        d, _ = get_distances(atoms.positions, atoms.cell, cutoff_radius)
        inds = (d <= (cutoff_radius + 0.01)) & (d > 0.00)
        nns = inds.sum((1, 2))

        self.assertTrue(np.all(nns_ase == nns)) 
Example 4
Project: eye_hand_calibration   Author: MobileManipulation   File: full_calib.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, grid_size, extrinsics, intrinsics):
        self.extrinsics = extrinsics
        self.intrinsics = intrinsics

        # Build up testing data in a grid
        z = np.linspace(-0.2, 0.2, num=grid_size)
        y = np.linspace(-0.2+0.025, 0.2+0.025, num=grid_size)
        x = np.linspace(0.5, 1.2, num=grid_size)
        g = np.meshgrid(x, y, z)
        cam = np.stack(map(np.ravel, g))
        np.concatenate([cam, np.ones([1, cam.shape[1]])])

        # Compute all truth data in various frames
        self.camera_truth = cam
        self.optical_truth = opt = toCameraFrame(cam, extrinsics)
        self.pixel_truth = cameraProjection(opt, intrinsics) 
Example 5
Project: ParetoMTL   Author: Xi-L   File: run_synthetic_example.py    MIT License 6 votes vote down vote up
def create_pf():
    ps = np.linspace(-1/np.sqrt(2),1/np.sqrt(2))
    pf = []
    
    for x1 in ps:
        #generate solutions on the Pareto front:
        x = np.array([x1,x1])
        
        f, f_dx = concave_fun_eval(x)
        pf.append(f)
            
    pf = np.array(pf)
    
    return pf




### optimization method ### 
Example 6
Project: paragami   Author: rgiordan   File: test_optimization_lib.py    Apache License 2.0 6 votes vote down vote up
def test_truncate_eigenvalues(self):
        dim = 10
        evs = np.linspace(1.5, 10.5, dim)

        evs_trunc = truncate_eigenvalues(evs, None, None)
        assert_array_almost_equal(evs, evs_trunc)

        evs_trunc = truncate_eigenvalues(evs, 2.5, None)
        trunc = evs <= 2.5
        not_trunc = evs > 2.5
        assert_array_almost_equal(2.5, evs_trunc[trunc])
        assert_array_almost_equal(evs[not_trunc], evs_trunc[not_trunc])

        evs_trunc = truncate_eigenvalues(evs, None, 5.5)
        trunc = evs >= 5.5
        not_trunc = evs < 5.5
        assert_array_almost_equal(5.5, evs_trunc[trunc])
        assert_array_almost_equal(evs[not_trunc], evs_trunc[not_trunc]) 
Example 7
Project: differentiable-atomistic-potentials   Author: google   File: test_ag.py    Apache License 2.0 5 votes vote down vote up
def test0(self):
    """check one-way neighborlist for fcc on different repeats."""
    a = 3.6
    for rep in ((1, 1, 1), (2, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2),
                (2, 1, 1), (2, 2, 1), (2, 2, 2), (1, 2, 3), (4, 1, 1)):
      for cutoff_radius in np.linspace(a / 2.1, 5 * a, 5):
        atoms = bulk('Cu', 'fcc', a=a).repeat(rep)
        # It is important to rattle the atoms off the lattice points.
        # Otherwise, float tolerances makes it hard to count correctly.
        atoms.rattle(0.02)
        nl = NeighborList(
            [cutoff_radius / 2] * len(atoms),
            skin=0.0,
            self_interaction=False,
            bothways=False)
        nl.update(atoms)

        neighbors, displacements = get_neighbors_oneway(
            atoms.positions, atoms.cell, cutoff_radius, skin=0.0)

        for i in range(len(atoms)):
          an, ad = nl.get_neighbors(i)
          # Check the same number of neighbors
          self.assertEqual(len(neighbors[i]), len(an))
          # Check the same indices
          self.assertCountEqual(neighbors[i], an)

          # I am not sure how to test for the displacements. 
Example 8
Project: SyntheticStatistics   Author: BlissChapman   File: tst.py    MIT License 5 votes vote down vote up
def visual_test(self, tst_data):
        results = self.perform_test(tst_data)
        s = results['test_stat']
        pval = results['pvalue']
        J = self.test_locs.shape[0]
        domain = np.linspace(stats.chi2.ppf(0.001, J), stats.chi2.ppf(0.9999, J), 200)
        plt.plot(domain, stats.chi2.pdf(domain, J), label='$\chi^2$ (df=%d)'%J)
        plt.stem([s], [old_div(stats.chi2.pdf(J, J),2)], 'or-', label='test stat')
        plt.legend(loc='best', frameon=True)
        plt.title('%s. p-val: %.3g. stat: %.3g'%(type(self).__name__, pval, s))
        plt.show()

    #=============================== 
Example 9
Project: vittles   Author: rgiordan   File: test_utils.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, dim):
        # Put lower bounds so we're testing the contraining functions
        # and so that derivatives of all orders are nonzero.
        self.dim = dim
        self.theta_pattern = \
            paragami.NumericArrayPattern(shape=(dim, ), lb=-20.)
        self.lambda_pattern = \
            paragami.NumericArrayPattern(shape=(dim, ), lb=-20.0)

        vec = np.linspace(0.1, 0.3, num=dim)
        self.matrix = np.outer(vec, vec) + np.eye(dim) 
Example 10
Project: vittles   Author: rgiordan   File: test_utils.py    Apache License 2.0 5 votes vote down vote up
def get_default_lambda(self):
        return np.linspace(0.5, 10.0, num=self.dim) 
Example 11
Project: Elephant   Author: 983   File: approximate_house.py    The Unlicense 5 votes vote down vote up
def make_curve(parameters):
    # make a curve, given some fourier series coefficients
    
    t = np.linspace(0, 2*np.pi, len(house), endpoint=False)

    result = np.zeros(len(t))

    for k in range(len(parameters)//2):
        a = parameters[k]
        b = parameters[k + len(parameters)//2]
        
        result += a*np.cos(k*t) + b*np.sin(k*t)
    
    return result 
Example 12
Project: ParetoMTL   Author: Xi-L   File: run_synthetic_example.py    MIT License 5 votes vote down vote up
def circle_points(r, n):
    # generate evenly distributed preference vector
    circles = []
    for r, n in zip(r, n):
        t = np.linspace(0, 0.5 * np.pi, n)
        x = r * np.cos(t)
        y = r * np.sin(t)
        circles.append(np.c_[x, y])
    return circles


### the synthetic multi-objective problem ### 
Example 13
Project: pylqr   Author: navigator8972   File: pylqr_trajctrl.py    GNU General Public License v3.0 5 votes vote down vote up
def PyLQR_TrajCtrl_TrackingTest():
    n_pnts = 200
    x_coord = np.linspace(0.0, 2*np.pi, n_pnts)
    y_coord = np.sin(x_coord)
    #concatenate to have trajectory
    ref_traj = np.array([x_coord, y_coord]).T
    weight_mats = [ np.eye(ref_traj.shape[1])*100 ]

    #draw reference trajectory
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.hold(True)
    ax.plot(ref_traj[:, 0], ref_traj[:, 1], '.-k', linewidth=3.5)
    ax.plot([ref_traj[0, 0]], [ref_traj[0, 1]], '*k', markersize=16)

    lqr_traj_ctrl = PyLQR_TrajCtrl(use_autograd=True)
    lqr_traj_ctrl.build_ilqr_tracking_solver(ref_traj, weight_mats)

    n_queries = 5

    for i in range(n_queries):
        #start from a perturbed point
        x0 = ref_traj[0, :] + np.random.rand(2) * 2 - 1
        syn_traj = lqr_traj_ctrl.synthesize_trajectory(x0)
        #plot it
        ax.plot(syn_traj[:, 0], syn_traj[:, 1], linewidth=3.5)

    plt.show()
    return 
Example 14
Project: Stein-Density-Ratio-Estimation   Author: anewgithubname   File: data.py    MIT License 5 votes vote down vote up
def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate,
                  rs=npr.RandomState(0)):
    """Based on code by Ryan P. Adams."""
    rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False)

    features = rs.randn(num_classes*num_per_class, 2) \
        * np.array([radial_std, tangential_std])
    features[:, 0] += 1
    labels = np.repeat(np.arange(num_classes), num_per_class)

    angles = rads[labels] + rate * np.exp(features[:,0])
    rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)])
    rotations = np.reshape(rotations.T, (-1, 2, 2))

    return np.einsum('ti,tij->tj', features, rotations) 
Example 15
Project: kernel-gof   Author: wittawatj   File: ex2_prob_params.py    MIT License 5 votes vote down vote up
def job_mmd_opt(p, data_source, tr, te, r):
    """
    MMD test of Gretton et al., 2012 used as a goodness-of-fit test.
    Require the ability to sample from p i.e., the UnnormalizedDensity p has 
    to be able to return a non-None from get_datasource()

    With optimization. Gaussian kernel.
    """
    data = tr + te
    X = data.data()
    with util.ContextTimer() as t:
        # median heuristic 
        pds = p.get_datasource()
        datY = pds.sample(data.sample_size(), seed=r+294)
        Y = datY.data()
        XY = np.vstack((X, Y))

        med = util.meddistance(XY, subsample=1000)

        # Construct a list of kernels to try based on multiples of the median
        # heuristic
        #list_gwidth = np.hstack( (np.linspace(20, 40, 10), (med**2)
        #    *(2.0**np.linspace(-2, 2, 20) ) ) )
        list_gwidth = (med**2)*(2.0**np.linspace(-3, 3, 30) ) 
        list_gwidth.sort()
        candidate_kernels = [kernel.KGauss(gw2) for gw2 in list_gwidth]

        mmd_opt = mgof.QuadMMDGofOpt(p, n_permute=300, alpha=alpha, seed=r+56)
        mmd_result = mmd_opt.perform_test(data,
                candidate_kernels=candidate_kernels,
                tr_proportion=tr_proportion, reg=1e-3)
    return { 'test_result': mmd_result, 'time_secs': t.secs}


# Define our custom Job, which inherits from base class IndependentJob 
Example 16
Project: kernel-gof   Author: wittawatj   File: ex1_vary_n.py    MIT License 5 votes vote down vote up
def job_mmd_opt(p, data_source, tr, te, r):
    """
    MMD test of Gretton et al., 2012 used as a goodness-of-fit test.
    Require the ability to sample from p i.e., the UnnormalizedDensity p has 
    to be able to return a non-None from get_datasource()

    With optimization. Gaussian kernel.
    """
    data = tr + te
    X = data.data()
    with util.ContextTimer() as t:
        # median heuristic 
        pds = p.get_datasource()
        datY = pds.sample(data.sample_size(), seed=r+294)
        Y = datY.data()
        XY = np.vstack((X, Y))

        med = util.meddistance(XY, subsample=1000)

        # Construct a list of kernels to try based on multiples of the median
        # heuristic
        #list_gwidth = np.hstack( (np.linspace(20, 40, 10), (med**2)
        #    *(2.0**np.linspace(-2, 2, 20) ) ) )
        list_gwidth = (med**2)*(2.0**np.linspace(-4, 4, 30) ) 
        list_gwidth.sort()
        candidate_kernels = [kernel.KGauss(gw2) for gw2 in list_gwidth]

        mmd_opt = mgof.QuadMMDGofOpt(p, n_permute=300, alpha=alpha, seed=r)
        mmd_result = mmd_opt.perform_test(data,
                candidate_kernels=candidate_kernels,
                tr_proportion=tr_proportion, reg=1e-3)
    return { 'test_result': mmd_result, 'time_secs': t.secs} 
Example 17
Project: kernel-gof   Author: wittawatj   File: plot.py    MIT License 5 votes vote down vote up
def box_meshgrid(func, xbound, ybound, nx=50, ny=50):
    """
    Form a meshed grid (to be used with a contour plot) on a box
    specified by xbound, ybound. Evaluate the grid with [func]: (n x 2) -> n.
    
    - xbound: a tuple (xmin, xmax)
    - ybound: a tuple (ymin, ymax)
    - nx: number of points to evluate in the x direction
    
    return XX, YY, ZZ where XX is a 2D nd-array of size nx x ny
    """
    
    # form a test location grid to try 
    minx, maxx = xbound
    miny, maxy = ybound
    loc0_cands = np.linspace(minx, maxx, nx)
    loc1_cands = np.linspace(miny, maxy, ny)
    lloc0, lloc1 = np.meshgrid(loc0_cands, loc1_cands)
    # nd1 x nd0 x 2
    loc3d = np.dstack((lloc0, lloc1))
    # #candidates x 2
    all_loc2s = np.reshape(loc3d, (-1, 2) )
    # evaluate the function
    func_grid = func(all_loc2s)
    func_grid = np.reshape(func_grid, (ny, nx))
    
    assert lloc0.shape[0] == ny
    assert lloc0.shape[1] == nx
    assert np.all(lloc0.shape == lloc1.shape)
    
    return lloc0, lloc1, func_grid 
Example 18
Project: kernel-gof   Author: wittawatj   File: goftest.py    MIT License 5 votes vote down vote up
def optimize_auto_init(p, dat, J, **ops):
        """
        Optimize parameters by calling optimize_locs_widths(). Automatically 
        initialize the test locations and the Gaussian width.

        Return optimized locations, Gaussian width, optimization info
        """
        assert J>0
        # Use grid search to initialize the gwidth
        X = dat.data()
        n_gwidth_cand = 5
        gwidth_factors = 2.0**np.linspace(-3, 3, n_gwidth_cand) 
        med2 = util.meddistance(X, 1000)**2

        k = kernel.KGauss(med2*2)
        # fit a Gaussian to the data and draw to initialize V0
        V0 = util.fit_gaussian_draw(X, J, seed=829, reg=1e-6)
        list_gwidth = np.hstack( ( (med2)*gwidth_factors ) )
        besti, objs = GaussFSSD.grid_search_gwidth(p, dat, V0, list_gwidth)
        gwidth = list_gwidth[besti]
        assert util.is_real_num(gwidth), 'gwidth not real. Was %s'%str(gwidth)
        assert gwidth > 0, 'gwidth not positive. Was %.3g'%gwidth
        logging.info('After grid search, gwidth=%.3g'%gwidth)

        
        V_opt, gwidth_opt, info = GaussFSSD.optimize_locs_widths(p, dat,
                gwidth, V0, **ops) 

        # set the width bounds
        #fac_min = 5e-2
        #fac_max = 5e3
        #gwidth_lb = fac_min*med2
        #gwidth_ub = fac_max*med2
        #gwidth_opt = max(gwidth_lb, min(gwidth_opt, gwidth_ub))
        return V_opt, gwidth_opt, info 
Example 19
Project: kernel-gof   Author: wittawatj   File: goftest.py    MIT License 5 votes vote down vote up
def power_criterion(p, dat, b, c, test_locs, reg=1e-2):
        k = kernel.KIMQ(b=b, c=c)
        return FSSD.power_criterion(p, dat, k, test_locs, reg)

    #@staticmethod
    #def optimize_auto_init(p, dat, J, **ops):
    #    """
    #    Optimize parameters by calling optimize_locs_widths(). Automatically 
    #    initialize the test locations and the Gaussian width.

    #    Return optimized locations, Gaussian width, optimization info
    #    """
    #    assert J>0
    #    # Use grid search to initialize the gwidth
    #    X = dat.data()
    #    n_gwidth_cand = 5
    #    gwidth_factors = 2.0**np.linspace(-3, 3, n_gwidth_cand) 
    #    med2 = util.meddistance(X, 1000)**2

    #    k = kernel.KGauss(med2*2)
    #    # fit a Gaussian to the data and draw to initialize V0
    #    V0 = util.fit_gaussian_draw(X, J, seed=829, reg=1e-6)
    #    list_gwidth = np.hstack( ( (med2)*gwidth_factors ) )
    #    besti, objs = GaussFSSD.grid_search_gwidth(p, dat, V0, list_gwidth)
    #    gwidth = list_gwidth[besti]
    #    assert util.is_real_num(gwidth), 'gwidth not real. Was %s'%str(gwidth)
    #    assert gwidth > 0, 'gwidth not positive. Was %.3g'%gwidth
    #    logging.info('After grid search, gwidth=%.3g'%gwidth)

        
    #    V_opt, gwidth_opt, info = GaussFSSD.optimize_locs_widths(p, dat,
    #            gwidth, V0, **ops) 

    #    # set the width bounds
    #    #fac_min = 5e-2
    #    #fac_max = 5e3
    #    #gwidth_lb = fac_min*med2
    #    #gwidth_ub = fac_max*med2
    #    #gwidth_opt = max(gwidth_lb, min(gwidth_opt, gwidth_ub))
    #    return V_opt, gwidth_opt, info 
Example 20
Project: paragami   Author: rgiordan   File: test_autograd_supplement_lib.py    Apache License 2.0 5 votes vote down vote up
def test_gammaln_functions(self):
        for x in np.linspace(2.5, 3.5, 10):
            check_grads(sp.special.digamma)(x)
            check_grads(sp.special.psi)(x)
            check_grads(sp.special.gamma)(x)
            check_grads(sp.special.gammaln)(x)
            check_grads(sp.special.rgamma)(x)
            for n in range(4):
                check_grads(lambda x: sp.special.polygamma(int(n), x))(x) 
Example 21
Project: paragami   Author: rgiordan   File: test_utils.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, dim):
        # Put lower bounds so we're testing the contraining functions
        # and so that derivatives of all orders are nonzero.
        self.dim = dim
        self.theta_pattern = \
            paragami.NumericArrayPattern(shape=(dim, ), lb=-10.)
        self.lambda_pattern = \
            paragami.NumericArrayPattern(shape=(dim, ), lb=-10.0)

        vec = np.linspace(0.1, 0.3, num=dim)
        self.matrix = np.outer(vec, vec) + np.eye(dim)

        self.lam = self.get_default_lambda() 
Example 22
Project: paragami   Author: rgiordan   File: test_utils.py    Apache License 2.0 5 votes vote down vote up
def get_default_lambda(self):
        return np.linspace(0.5, 10.0, num=self.dim) 
Example 23
Project: SyntheticStatistics   Author: BlissChapman   File: tst.py    MIT License 4 votes vote down vote up
def optimize_freqs_width(tst_data, alpha, n_test_freqs=10, max_iter=400,
            freqs_step_size=0.2, gwidth_step_size=0.01, batch_proportion=1.0,
            tol_fun=1e-3, seed=1):
        """Optimize the test frequencies and the Gaussian kernel width by
        maximizing the test power. X, Y should not be the same data as used
        in the actual test (i.e., should be a held-out set).

        - max_iter: #gradient descent iterations
        - batch_proportion: (0,1] value to be multipled with nx giving the batch
            size in stochastic gradient. 1 = full gradient ascent.
        - tol_fun: termination tolerance of the objective value

        Return (test_freqs, gaussian_width, info)
        """
        J = n_test_freqs
        """
        Optimize the empirical version of Lambda(T) i.e., the criterion used
        to optimize the test locations, for the test based
        on difference of mean embeddings with Gaussian kernel.
        Also optimize the Gaussian width.

        :return a theano function T |-> Lambda(T)
        """
        d = tst_data.dim()
        # set the seed
        rand_state = np.random.get_state()
        np.random.seed(seed)

        # draw frequencies randomly from the standard Gaussian.
        # TODO: Can we do better?
        T0 = np.random.randn(J, d)
        # reset the seed back to the original
        np.random.set_state(rand_state)

        # grid search to determine the initial gwidth
        mean_sd = tst_data.mean_std()
        scales = 2.0**np.linspace(-4, 3, 20)
        list_gwidth = np.hstack( (mean_sd*scales*(d**0.5), 2**np.linspace(-4, 4, 20) ))
        list_gwidth.sort()
        besti, powers = SmoothCFTest.grid_search_gwidth(tst_data, T0,
                list_gwidth, alpha)
        # initialize with the best width from the grid search
        gwidth0 = list_gwidth[besti]
        assert util.is_real_num(gwidth0), 'gwidth0 not real. Was %s'%str(gwidth0)
        assert gwidth0 > 0, 'gwidth0 not positive. Was %.3g'%gwidth0

        func_z = SmoothCFTest.construct_z_theano
        # info = optimization info
        T, gamma, info = optimize_T_gaussian_width(tst_data, T0, gwidth0, func_z,
                max_iter=max_iter, T_step_size=freqs_step_size,
                gwidth_step_size=gwidth_step_size, batch_proportion=batch_proportion,
                tol_fun=tol_fun)
        assert util.is_real_num(gamma), 'gamma is not real. Was %s' % str(gamma)

        ninfo = {'test_freqs': info['Ts'], 'test_freqs0': info['T0'],
                'gwidths': info['gwidths'], 'obj_values': info['obj_values'],
                'gwidth0': gwidth0, 'gwidth0_powers': powers}
        return (T, gamma, ninfo  ) 
Example 24
Project: SyntheticStatistics   Author: BlissChapman   File: tst.py    MIT License 4 votes vote down vote up
def optimize_locs_width(tst_data, alpha, n_test_locs=10, max_iter=400,
            locs_step_size=0.1, gwidth_step_size=0.01, batch_proportion=1.0,
            tol_fun=1e-3, reg=1e-5, seed=1):
        """Optimize the test locations and the Gaussian kernel width by
        maximizing the test power. X, Y should not be the same data as used
        in the actual test (i.e., should be a held-out set).

        - max_iter: #gradient descent iterations
        - batch_proportion: (0,1] value to be multipled with nx giving the batch
            size in stochastic gradient. 1 = full gradient ascent.
        - tol_fun: termination tolerance of the objective value

        Return (test_locs, gaussian_width, info)
        """
        J = n_test_locs
        """
        Optimize the empirical version of Lambda(T) i.e., the criterion used
        to optimize the test locations, for the test based
        on difference of mean embeddings with Gaussian kernel.
        Also optimize the Gaussian width.

        :return a theano function T |-> Lambda(T)
        """

        med = util.meddistance(tst_data.stack_xy(), 1000)
        T0 = MeanEmbeddingTest.init_locs_2randn(tst_data, n_test_locs,
                subsample=10000, seed=seed)
        #T0 = MeanEmbeddingTest.init_check_subset(tst_data, n_test_locs, med**2,
        #      n_cand=30, seed=seed+10)
        func_z = MeanEmbeddingTest.construct_z_theano
        # Use grid search to initialize the gwidth
        list_gwidth2 = np.hstack( ( (med**2) *(2.0**np.linspace(-3, 4, 30) ) ) )
        list_gwidth2.sort()
        besti, powers = MeanEmbeddingTest.grid_search_gwidth(tst_data, T0,
                list_gwidth2, alpha)
        gwidth0 = list_gwidth2[besti]
        assert util.is_real_num(gwidth0), 'gwidth0 not real. Was %s'%str(gwidth0)
        assert gwidth0 > 0, 'gwidth0 not positive. Was %.3g'%gwidth0

        # info = optimization info
        T, gamma, info = optimize_T_gaussian_width(tst_data, T0, gwidth0, func_z,
                max_iter=max_iter, T_step_size=locs_step_size,
                gwidth_step_size=gwidth_step_size, batch_proportion=batch_proportion,
                tol_fun=tol_fun, reg=reg)
        assert util.is_real_num(gamma), 'gamma is not real. Was %s' % str(gamma)

        ninfo = {'test_locs': info['Ts'], 'test_locs0': info['T0'],
                'gwidths': info['gwidths'], 'obj_values': info['obj_values'],
                'gwidth0': gwidth0, 'gwidth0_powers': powers}
        return (T, gamma, ninfo  ) 
Example 25
Project: PyORBIT   Author: LucaMalavolta   File: common.py    MIT License 4 votes vote down vote up
def nested_sampling_prior_prepare(kind, bounds, pams, space):
    """
    This subroutine computes the coefficient of the spline interpolation of the inverse cumulative function
    In some special cases, ruterns the parameters required by the intrinsic function, e.g. scipi.stats.norm.icf
    according to their implementation in nested_sampling_prior_compute()

    :param kind: type of prior
    :param bounds: list/array with lower and upper limits for parameter exploration
    :param pams: parameters relative to the prior probability function
    :return:
    """

    if kind == 'Uniform':
        return bounds

    if kind == 'Gaussian':
        return pams

    if kind == 'beta':
        return pams

    """ All the following priors are defined only if the variable is sampled in the Natural space"""
    if space is not 'Linear':
        print()
        print(' *** ERROR in the YAML file ***')
        print(' You are using a prior that is not supported in a not-Linear sampling space')
        print(' add this keyword in the YAML file for each parameter not sampled in the ')
        print(' Linear space and with a prior other than Uniform or Gaussian')
        print('   spaces: ')
        print('       pam: Linear')
        print()
        quit()

    x_var = np.linspace(0.000000, 1.000000, num=10001, endpoint=True, dtype=np.double)*(bounds[1]-bounds[0]) + bounds[0]
    area = np.zeros(len(x_var), dtype=np.double)

    for x_num, x_val in enumerate(x_var):
        area[x_num:] += np.exp(giveback_priors(kind, bounds, pams, x_val)) * (1. / 10000.) + 0.000000000001
    area[0] = 0
    area /= area[-1]

    return splrep(area, x_var) 
Example 26
Project: pylqr   Author: navigator8972   File: pylqr_trajctrl.py    GNU General Public License v3.0 4 votes vote down vote up
def PyLQR_TrajCtrl_GeneralTest():
    #build RBF basis
    rbf_basis = np.array([
        [-1.0, -1.0],
        [-1.0, 1.0],
        [1.0, -1.0],
        [1.0, 1.0]
        ])
    gamma = 1
    T = 100
    R = 1e-5
    # rbf_funcs = [lambda x, u, t, aux: np.exp(-gamma*np.linalg.norm(x[0:2]-basis)**2) + .01*np.linalg.norm(u)**2 for basis in rbf_basis]
    rbf_funcs = [
    lambda x, u, t, aux: -np.exp(-gamma*np.linalg.norm(x[0:2]-rbf_basis[0])**2) + R*np.linalg.norm(u)**2,
    lambda x, u, t, aux: -np.exp(-gamma*np.linalg.norm(x[0:2]-rbf_basis[1])**2) + R*np.linalg.norm(u)**2,
    lambda x, u, t, aux: -np.exp(-gamma*np.linalg.norm(x[0:2]-rbf_basis[2])**2) + R*np.linalg.norm(u)**2,
    lambda x, u, t, aux: -np.exp(-gamma*np.linalg.norm(x[0:2]-rbf_basis[3])**2) + R*np.linalg.norm(u)**2
    ]

    weights = np.array([.75, .5, .25, 1.])
    weights = weights / (np.sum(weights) + 1e-6)

    cost_func = lambda x, u, t, aux: np.sum(weights * np.array([basis_func(x, u, t, aux) for basis_func in rbf_funcs]))

    lqr_traj_ctrl = PyLQR_TrajCtrl(use_autograd=True)
    lqr_traj_ctrl.build_ilqr_general_solver(cost_func, n_dims=rbf_basis.shape[1], T=T)

    n_eval_pnts = 50
    coords = np.linspace(-2.5, 2.5, n_eval_pnts)
    xv, yv = np.meshgrid(coords, coords)

    z = [[cost_func(np.array([xv[i, j], yv[i, j]]), np.zeros(2), None, None) for j in range(yv.shape[1])] for i in range(len(xv))]

    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.hold(True)
    ax.contour(xv, yv, z)
    
    n_queries = 5
    u_array = np.random.rand(2, T-1).T * 2 - 1
    
    for i in range(n_queries):
        #start from a perturbed point
        x0 = np.random.rand(2) * 4 - 2
        syn_traj = lqr_traj_ctrl.synthesize_trajectory(x0, u_array)
        #plot it
        ax.plot([x0[0]], [x0[1]], 'k*', markersize=12.0)
        ax.plot(syn_traj[:, 0], syn_traj[:, 1], linewidth=3.5)

    plt.show()

    return 
Example 27
Project: kernel-gof   Author: wittawatj   File: ex3_vary_nlocs.py    MIT License 4 votes vote down vote up
def job_fssdq_opt(p, data_source, tr, te, r, J, null_sim=None):
    """
    FSSD with optimization on tr. Test on te. Use a Gaussian kernel.
    """
    if null_sim is None:
        null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=r)

    Xtr = tr.data()
    with util.ContextTimer() as t:
        # Use grid search to initialize the gwidth
        n_gwidth_cand = 5
        gwidth_factors = 2.0**np.linspace(-3, 3, n_gwidth_cand) 
        med2 = util.meddistance(Xtr, 1000)**2

        k = kernel.KGauss(med2*2)
        # fit a Gaussian to the data and draw to initialize V0
        V0 = util.fit_gaussian_draw(Xtr, J, seed=r+1, reg=1e-6)
        list_gwidth = np.hstack( ( (med2)*gwidth_factors ) )
        besti, objs = gof.GaussFSSD.grid_search_gwidth(p, tr, V0, list_gwidth)
        gwidth = list_gwidth[besti]
        assert util.is_real_num(gwidth), 'gwidth not real. Was %s'%str(gwidth)
        assert gwidth > 0, 'gwidth not positive. Was %.3g'%gwidth
        logging.info('After grid search, gwidth=%.3g'%gwidth)
        
        ops = {
            'reg': 1e-2,
            'max_iter': 50,
            'tol_fun': 1e-4,
            'disp': True,
            'locs_bounds_frac': 10.0,
            'gwidth_lb': 1e-1,
            'gwidth_ub': 1e3,
            }

        V_opt, gwidth_opt, info = gof.GaussFSSD.optimize_locs_widths(p, tr,
                gwidth, V0, **ops) 
        # Use the optimized parameters to construct a test
        k_opt = kernel.KGauss(gwidth_opt)
        fssd_opt = gof.FSSD(p, k_opt, V_opt, null_sim=null_sim, alpha=alpha)
        fssd_opt_result = fssd_opt.perform_test(te)
    return {'test_result': fssd_opt_result, 'time_secs': t.secs, 
            'goftest': fssd_opt, 'opt_info': info,
            } 
Example 28
Project: kernel-gof   Author: wittawatj   File: ex2_prob_params.py    MIT License 4 votes vote down vote up
def job_fssdJ1q_opt(p, data_source, tr, te, r, J=1, null_sim=None):
    """
    FSSD with optimization on tr. Test on te. Use a Gaussian kernel.
    """
    if null_sim is None:
        null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=r)

    Xtr = tr.data()
    with util.ContextTimer() as t:
        # Use grid search to initialize the gwidth
        n_gwidth_cand = 5
        gwidth_factors = 2.0**np.linspace(-3, 3, n_gwidth_cand) 
        med2 = util.meddistance(Xtr, 1000)**2

        k = kernel.KGauss(med2*2)
        # fit a Gaussian to the data and draw to initialize V0
        V0 = util.fit_gaussian_draw(Xtr, J, seed=r+1, reg=1e-6)
        list_gwidth = np.hstack( ( (med2)*gwidth_factors ) )
        besti, objs = gof.GaussFSSD.grid_search_gwidth(p, tr, V0, list_gwidth)
        gwidth = list_gwidth[besti]
        assert util.is_real_num(gwidth), 'gwidth not real. Was %s'%str(gwidth)
        assert gwidth > 0, 'gwidth not positive. Was %.3g'%gwidth
        logging.info('After grid search, gwidth=%.3g'%gwidth)
        
        ops = {
            'reg': 1e-2,
            'max_iter': 40,
            'tol_fun': 1e-4,
            'disp': True,
            'locs_bounds_frac':10.0,
            'gwidth_lb': 1e-1,
            'gwidth_ub': 1e4,
            }

        V_opt, gwidth_opt, info = gof.GaussFSSD.optimize_locs_widths(p, tr,
                gwidth, V0, **ops) 
        # Use the optimized parameters to construct a test
        k_opt = kernel.KGauss(gwidth_opt)
        fssd_opt = gof.FSSD(p, k_opt, V_opt, null_sim=null_sim, alpha=alpha)
        fssd_opt_result = fssd_opt.perform_test(te)
    return {'test_result': fssd_opt_result, 'time_secs': t.secs, 
            'goftest': fssd_opt, 'opt_info': info,
            } 
Example 29
Project: kernel-gof   Author: wittawatj   File: ex1_vary_n.py    MIT License 4 votes vote down vote up
def job_fssdJ1q_opt(p, data_source, tr, te, r, J=1, null_sim=None):
    """
    FSSD with optimization on tr. Test on te. Use a Gaussian kernel.
    """
    if null_sim is None:
        null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=r)

    Xtr = tr.data()
    with util.ContextTimer() as t:
        # Use grid search to initialize the gwidth
        n_gwidth_cand = 5
        gwidth_factors = 2.0**np.linspace(-3, 3, n_gwidth_cand) 
        med2 = util.meddistance(Xtr, 1000)**2

        k = kernel.KGauss(med2)
        # fit a Gaussian to the data and draw to initialize V0
        V0 = util.fit_gaussian_draw(Xtr, J, seed=r+1, reg=1e-6)
        list_gwidth = np.hstack( ( (med2)*gwidth_factors ) )
        besti, objs = gof.GaussFSSD.grid_search_gwidth(p, tr, V0, list_gwidth)
        gwidth = list_gwidth[besti]
        assert util.is_real_num(gwidth), 'gwidth not real. Was %s'%str(gwidth)
        assert gwidth > 0, 'gwidth not positive. Was %.3g'%gwidth
        logging.info('After grid search, gwidth=%.3g'%gwidth)
        
        ops = {
            'reg': 1e-2,
            'max_iter': 30,
            'tol_fun': 1e-5,
            'disp': True,
            'locs_bounds_frac':30.0,
            'gwidth_lb': 1e-1,
            'gwidth_ub': 1e4,
            }

        V_opt, gwidth_opt, info = gof.GaussFSSD.optimize_locs_widths(p, tr,
                gwidth, V0, **ops) 
        # Use the optimized parameters to construct a test
        k_opt = kernel.KGauss(gwidth_opt)
        fssd_opt = gof.FSSD(p, k_opt, V_opt, null_sim=null_sim, alpha=alpha)
        fssd_opt_result = fssd_opt.perform_test(te)
    return {'test_result': fssd_opt_result, 'time_secs': t.secs, 
            'goftest': fssd_opt, 'opt_info': info,
            } 
Example 30
Project: kernel-gof   Author: wittawatj   File: mmd.py    MIT License 4 votes vote down vote up
def perform_test(self, dat, candidate_kernels=None, return_mmdtest=False,
            tr_proportion=0.2, reg=1e-3):
        """
        dat: an instance of Data
        candidate_kernels: a list of Kernel's to choose from
        tr_proportion: proportion of sample to be used to choosing the best
            kernel
        reg: regularization parameter for the test power criterion 
        """
        with util.ContextTimer() as t:
            seed = self.seed
            p = self.p
            ds = p.get_datasource()
            p_sample = ds.sample(dat.sample_size(), seed=seed+77)
            xtr, xte = p_sample.split_tr_te(tr_proportion=tr_proportion, seed=seed+18)
            # ytr, yte are of type data.Data
            ytr, yte = dat.split_tr_te(tr_proportion=tr_proportion, seed=seed+12)

            # training and test data
            tr_tst_data = fdata.TSTData(xtr.data(), ytr.data())
            te_tst_data = fdata.TSTData(xte.data(), yte.data())

            if candidate_kernels is None:
                # Assume a Gaussian kernel. Construct a list of 
                # kernels to try based on multiples of the median heuristic
                med = util.meddistance(tr_tst_data.stack_xy(), 1000)
                list_gwidth = np.hstack( ( (med**2) *(2.0**np.linspace(-4, 4, 10) ) ) )
                list_gwidth.sort()
                candidate_kernels = [kernel.KGauss(gw2) for gw2 in list_gwidth]

            alpha = self.alpha

            # grid search to choose the best Gaussian width
            besti, powers = tst.QuadMMDTest.grid_search_kernel(tr_tst_data,
                    candidate_kernels, alpha, reg=reg)
            # perform test 
            best_ker = candidate_kernels[besti]
            mmdtest = tst.QuadMMDTest(best_ker, self.n_permute, alpha=alpha)
            results = mmdtest.perform_test(te_tst_data)
            if return_mmdtest:
                results['mmdtest'] = mmdtest

        results['time_secs'] = t.secs
        return results 
Example 31
Project: paragami   Author: rgiordan   File: test_optimization_lib.py    Apache License 2.0 4 votes vote down vote up
def test_transform_eigenspace(self):
        dim = 6
        a = np.random.random((dim, dim))
        a = 0.5 * (a.T + a) + np.eye(dim)
        eigvals, eigvecs = np.linalg.eigh(a)
        vec = np.random.random(dim)

        # Test basic transforms.
        a_mult = transform_eigenspace(eigvecs, eigvals, lambda x: x)
        assert_array_almost_equal(a @ vec, a_mult(vec))

        a_inv_mult = transform_eigenspace(eigvecs, eigvals, lambda x: 1 / x)
        assert_array_almost_equal(np.linalg.solve(a, vec), a_inv_mult(vec))

        a_sqrt_mult = transform_eigenspace(eigvecs, eigvals, np.sqrt)

        for i in range(dim):
            vec2 = np.zeros(dim)
            vec2[i] = 1
            assert_array_almost_equal(
                vec2.T @ a @ vec, a_sqrt_mult(vec2).T @ a_sqrt_mult(vec))

        # Test a transform with an incomplete eigenspace.
        trans_ind = 2
        a_trans = transform_eigenspace(
            eigvecs[:, 0:trans_ind], eigvals[0:trans_ind], lambda x: 10)
        for i in range(trans_ind):
            vec = eigvecs[:, i]
            assert_array_almost_equal(10 * vec, a_trans(vec))

        for i in range(trans_ind + 1, dim):
            vec = eigvecs[:, i]
            assert_array_almost_equal(vec, a_trans(vec))

        # Test eigenvalue truncation.
        eigvals = np.linspace(0, dim, dim) + 1
        a2 = eigvecs @ np.diag(eigvals) @ eigvecs.T
        ev_min = 2.5
        ev_max = 5.5
        a_trans = transform_eigenspace(
            eigvecs, eigvals,
            lambda x: truncate_eigenvalues(x, ev_min=ev_min, ev_max=ev_max))

        for i in range(dim):
            vec = eigvecs[:, i]
            if eigvals[i] <= ev_min:
                assert_array_almost_equal(ev_min * vec, a_trans(vec))
            elif eigvals[i] >= ev_max:
                assert_array_almost_equal(ev_max * vec, a_trans(vec))
            else:
                assert_array_almost_equal(a2 @ vec, a_trans(vec))