Python autograd.numpy.linspace() Examples

The following are 30 code examples of autograd.numpy.linspace(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd.numpy , or try the search function .
Example #1
Source File: bayesian_neural_net.py    From autograd with MIT License 6 votes vote down vote up
def callback(params, t, g):
        print("Iteration {} lower bound {}".format(t, -objective(params, t)))

        # Sample functions from posterior.
        rs = npr.RandomState(0)
        mean, log_std = unpack_params(params)
        #rs = npr.RandomState(0)
        sample_weights = rs.randn(10, num_weights) * np.exp(log_std) + mean
        plot_inputs = np.linspace(-8, 8, num=400)
        outputs = predictions(sample_weights, np.expand_dims(plot_inputs, 1))

        # Plot data and functions.
        plt.cla()
        ax.plot(inputs.ravel(), targets.ravel(), 'bx')
        ax.plot(plot_inputs, outputs[:, :, 0].T)
        ax.set_ylim([-2, 3])
        plt.draw()
        plt.pause(1.0/60.0)

    # Initialize variational parameters 
Example #2
Source File: deep_gaussian_process.py    From autograd with MIT License 6 votes vote down vote up
def callback(params):
        print("Log marginal likelihood {}".format(log_marginal_likelihood(params)))

        # Show posterior marginals.
        plot_xs = np.reshape(np.linspace(-5, 5, 300), (300,1))
        pred_mean, pred_cov = combined_predict_fun(params, X, y, plot_xs)
        plot_gp(ax_end_to_end, X, y, pred_mean, pred_cov, plot_xs)
        ax_end_to_end.set_title("X to y")

        layer1_params, layer2_params, hiddens = unpack_all_params(params)
        h_star_mean, h_star_cov = predict_layer_funcs[0](layer1_params, X, hiddens, plot_xs)
        y_star_mean, y_star_cov = predict_layer_funcs[0](layer2_params, np.atleast_2d(hiddens).T, y, plot_xs)

        plot_gp(ax_x_to_h, X, hiddens,                  h_star_mean, h_star_cov, plot_xs)
        ax_x_to_h.set_title("X to hiddens")

        plot_gp(ax_h_to_y, np.atleast_2d(hiddens).T, y, y_star_mean, y_star_cov, plot_xs)
        ax_h_to_y.set_title("hiddens to y")

        plt.draw()
        plt.pause(1.0/60.0)

    # Initialize covariance parameters and hiddens. 
Example #3
Source File: zdt.py    From pymoo with Apache License 2.0 6 votes vote down vote up
def _calc_pareto_front(self, n_points=100, flatten=True):
        regions = [[0, 0.0830015349],
                   [0.182228780, 0.2577623634],
                   [0.4093136748, 0.4538821041],
                   [0.6183967944, 0.6525117038],
                   [0.8233317983, 0.8518328654]]

        pf = []

        for r in regions:
            x1 = anp.linspace(r[0], r[1], int(n_points / len(regions)))
            x2 = 1 - anp.sqrt(x1) - x1 * anp.sin(10 * anp.pi * x1)
            pf.append(anp.array([x1, x2]).T)

        if not flatten:
            pf = anp.concatenate([pf[None,...] for pf in pf])
        else:
            pf = anp.row_stack(pf)

        return pf 
Example #4
Source File: kafnets.py    From kernel-activation-functions with MIT License 6 votes vote down vote up
def init_kaf_nn(layer_sizes, scale=0.01, rs=np.random.RandomState(0), dict_size=20, boundary=3.0):
    """ 
    Initialize the parameters of a KAF feedforward network.
        - dict_size: the size of the dictionary for every neuron.
        - boundary: the boundary for the activation functions.
    """
    
    # Initialize the dictionary
    D = np.linspace(-boundary, boundary, dict_size).reshape(-1, 1)
    
    # Rule of thumb for gamma
    interval = D[1,0] - D[0,0];
    gamma = 0.5/np.square(2*interval)
    D = D.reshape(1, 1, -1)
    
    # Initialize a list of parameters for the layer
    w = [(rs.randn(insize, outsize) * scale,                # Weight matrix
                     rs.randn(outsize) * scale,             # Bias vector
                     rs.randn(1, outsize, dict_size) * 0.5) # Mixing coefficients
                     for insize, outsize in zip(layer_sizes[:-1], layer_sizes[1:])]
    
    return w, (D, gamma) 
Example #5
Source File: run_synthetic_example.py    From ParetoMTL with MIT License 6 votes vote down vote up
def create_pf():
    ps = np.linspace(-1/np.sqrt(2),1/np.sqrt(2))
    pf = []
    
    for x1 in ps:
        #generate solutions on the Pareto front:
        x = np.array([x1,x1])
        
        f, f_dx = concave_fun_eval(x)
        pf.append(f)
            
    pf = np.array(pf)
    
    return pf




### optimization method ### 
Example #6
Source File: methods.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def _build_errors_df(name_errors, label):
  """Helper to build errors DataFrame."""
  series = []
  percentiles = np.linspace(0, 100, 21)
  index = percentiles / 100
  for name, errors in name_errors:
    series.append(pd.Series(
        np.nanpercentile(errors, q=percentiles), index=index, name=name))
  df = pd.concat(series, axis=1)
  df.columns.name = 'derivative'
  df.index.name = 'quantile'
  df = df.stack().rename('error').reset_index()
  with np.errstate(divide='ignore'):
    df['log(error)'] = np.log(df['error'])
  if label is not None:
    df['label'] = label
  return df 
Example #7
Source File: plot.py    From kernel-gof with MIT License 5 votes vote down vote up
def box_meshgrid(func, xbound, ybound, nx=50, ny=50):
    """
    Form a meshed grid (to be used with a contour plot) on a box
    specified by xbound, ybound. Evaluate the grid with [func]: (n x 2) -> n.
    
    - xbound: a tuple (xmin, xmax)
    - ybound: a tuple (ymin, ymax)
    - nx: number of points to evluate in the x direction
    
    return XX, YY, ZZ where XX is a 2D nd-array of size nx x ny
    """
    
    # form a test location grid to try 
    minx, maxx = xbound
    miny, maxy = ybound
    loc0_cands = np.linspace(minx, maxx, nx)
    loc1_cands = np.linspace(miny, maxy, ny)
    lloc0, lloc1 = np.meshgrid(loc0_cands, loc1_cands)
    # nd1 x nd0 x 2
    loc3d = np.dstack((lloc0, lloc1))
    # #candidates x 2
    all_loc2s = np.reshape(loc3d, (-1, 2) )
    # evaluate the function
    func_grid = func(all_loc2s)
    func_grid = np.reshape(func_grid, (ny, nx))
    
    assert lloc0.shape[0] == ny
    assert lloc0.shape[1] == nx
    assert np.all(lloc0.shape == lloc1.shape)
    
    return lloc0, lloc1, func_grid 
Example #8
Source File: neural_net_regression.py    From autograd with MIT License 5 votes vote down vote up
def callback(params, t, g):
        print("Iteration {} log likelihood {}".format(t, -objective(params, t)))

        # Plot data and functions.
        plt.cla()
        ax.plot(inputs.ravel(), targets.ravel(), 'bx', ms=12)
        plot_inputs = np.reshape(np.linspace(-7, 7, num=300), (300,1))
        outputs = nn_predict(params, plot_inputs)
        ax.plot(plot_inputs, outputs, 'r', lw=3)
        ax.set_ylim([-1, 1])
        plt.draw()
        plt.pause(1.0/60.0) 
Example #9
Source File: test_scipy.py    From autograd with MIT License 5 votes vote down vote up
def test_odeint():
        combo_check(integrate.odeint, [1,2,3])([func], [R(3)], [np.linspace(0.1, 0.2, 4)],
                                                 [(R(3), R(3))])

    ## Linalg 
Example #10
Source File: test_numpy.py    From autograd with MIT License 5 votes vote down vote up
def test_linspace():
    for num in [0, 1, 5]:
        def fun(x, y): return np.linspace(x, y, num)

        check_grads(fun)(1.2, 3.4)
        check_grads(fun)(1.2, -3.4)
        check_grads(fun)(1.2, 1.2) 
Example #11
Source File: run_synthetic_example.py    From ParetoMTL with MIT License 5 votes vote down vote up
def circle_points(r, n):
    # generate evenly distributed preference vector
    circles = []
    for r, n in zip(r, n):
        t = np.linspace(0, 0.5 * np.pi, n)
        x = r * np.cos(t)
        y = r * np.sin(t)
        circles.append(np.c_[x, y])
    return circles


### the synthetic multi-objective problem ### 
Example #12
Source File: ex2_prob_params.py    From kernel-gof with MIT License 5 votes vote down vote up
def job_mmd_opt(p, data_source, tr, te, r):
    """
    MMD test of Gretton et al., 2012 used as a goodness-of-fit test.
    Require the ability to sample from p i.e., the UnnormalizedDensity p has 
    to be able to return a non-None from get_datasource()

    With optimization. Gaussian kernel.
    """
    data = tr + te
    X = data.data()
    with util.ContextTimer() as t:
        # median heuristic 
        pds = p.get_datasource()
        datY = pds.sample(data.sample_size(), seed=r+294)
        Y = datY.data()
        XY = np.vstack((X, Y))

        med = util.meddistance(XY, subsample=1000)

        # Construct a list of kernels to try based on multiples of the median
        # heuristic
        #list_gwidth = np.hstack( (np.linspace(20, 40, 10), (med**2)
        #    *(2.0**np.linspace(-2, 2, 20) ) ) )
        list_gwidth = (med**2)*(2.0**np.linspace(-3, 3, 30) ) 
        list_gwidth.sort()
        candidate_kernels = [kernel.KGauss(gw2) for gw2 in list_gwidth]

        mmd_opt = mgof.QuadMMDGofOpt(p, n_permute=300, alpha=alpha, seed=r+56)
        mmd_result = mmd_opt.perform_test(data,
                candidate_kernels=candidate_kernels,
                tr_proportion=tr_proportion, reg=1e-3)
    return { 'test_result': mmd_result, 'time_secs': t.secs}


# Define our custom Job, which inherits from base class IndependentJob 
Example #13
Source File: ex1_vary_n.py    From kernel-gof with MIT License 5 votes vote down vote up
def job_mmd_opt(p, data_source, tr, te, r):
    """
    MMD test of Gretton et al., 2012 used as a goodness-of-fit test.
    Require the ability to sample from p i.e., the UnnormalizedDensity p has 
    to be able to return a non-None from get_datasource()

    With optimization. Gaussian kernel.
    """
    data = tr + te
    X = data.data()
    with util.ContextTimer() as t:
        # median heuristic 
        pds = p.get_datasource()
        datY = pds.sample(data.sample_size(), seed=r+294)
        Y = datY.data()
        XY = np.vstack((X, Y))

        med = util.meddistance(XY, subsample=1000)

        # Construct a list of kernels to try based on multiples of the median
        # heuristic
        #list_gwidth = np.hstack( (np.linspace(20, 40, 10), (med**2)
        #    *(2.0**np.linspace(-2, 2, 20) ) ) )
        list_gwidth = (med**2)*(2.0**np.linspace(-4, 4, 30) ) 
        list_gwidth.sort()
        candidate_kernels = [kernel.KGauss(gw2) for gw2 in list_gwidth]

        mmd_opt = mgof.QuadMMDGofOpt(p, n_permute=300, alpha=alpha, seed=r)
        mmd_result = mmd_opt.perform_test(data,
                candidate_kernels=candidate_kernels,
                tr_proportion=tr_proportion, reg=1e-3)
    return { 'test_result': mmd_result, 'time_secs': t.secs} 
Example #14
Source File: neural_net_regression.py    From autograd with MIT License 5 votes vote down vote up
def build_toy_dataset(n_data=80, noise_std=0.1):
    rs = npr.RandomState(0)
    inputs  = np.concatenate([np.linspace(0, 3, num=n_data/2),
                              np.linspace(6, 8, num=n_data/2)])
    targets = np.cos(inputs) + rs.randn(n_data) * noise_std
    inputs = (inputs - 4.0) / 2.0
    inputs  = inputs[:, np.newaxis]
    targets = targets[:, np.newaxis] / 2.0
    return inputs, targets 
Example #15
Source File: goftest.py    From kernel-gof with MIT License 5 votes vote down vote up
def optimize_auto_init(p, dat, J, **ops):
        """
        Optimize parameters by calling optimize_locs_widths(). Automatically 
        initialize the test locations and the Gaussian width.

        Return optimized locations, Gaussian width, optimization info
        """
        assert J>0
        # Use grid search to initialize the gwidth
        X = dat.data()
        n_gwidth_cand = 5
        gwidth_factors = 2.0**np.linspace(-3, 3, n_gwidth_cand) 
        med2 = util.meddistance(X, 1000)**2

        k = kernel.KGauss(med2*2)
        # fit a Gaussian to the data and draw to initialize V0
        V0 = util.fit_gaussian_draw(X, J, seed=829, reg=1e-6)
        list_gwidth = np.hstack( ( (med2)*gwidth_factors ) )
        besti, objs = GaussFSSD.grid_search_gwidth(p, dat, V0, list_gwidth)
        gwidth = list_gwidth[besti]
        assert util.is_real_num(gwidth), 'gwidth not real. Was %s'%str(gwidth)
        assert gwidth > 0, 'gwidth not positive. Was %.3g'%gwidth
        logging.info('After grid search, gwidth=%.3g'%gwidth)

        
        V_opt, gwidth_opt, info = GaussFSSD.optimize_locs_widths(p, dat,
                gwidth, V0, **ops) 

        # set the width bounds
        #fac_min = 5e-2
        #fac_max = 5e3
        #gwidth_lb = fac_min*med2
        #gwidth_ub = fac_max*med2
        #gwidth_opt = max(gwidth_lb, min(gwidth_opt, gwidth_ub))
        return V_opt, gwidth_opt, info 
Example #16
Source File: goftest.py    From kernel-gof with MIT License 5 votes vote down vote up
def power_criterion(p, dat, b, c, test_locs, reg=1e-2):
        k = kernel.KIMQ(b=b, c=c)
        return FSSD.power_criterion(p, dat, k, test_locs, reg)

    #@staticmethod
    #def optimize_auto_init(p, dat, J, **ops):
    #    """
    #    Optimize parameters by calling optimize_locs_widths(). Automatically 
    #    initialize the test locations and the Gaussian width.

    #    Return optimized locations, Gaussian width, optimization info
    #    """
    #    assert J>0
    #    # Use grid search to initialize the gwidth
    #    X = dat.data()
    #    n_gwidth_cand = 5
    #    gwidth_factors = 2.0**np.linspace(-3, 3, n_gwidth_cand) 
    #    med2 = util.meddistance(X, 1000)**2

    #    k = kernel.KGauss(med2*2)
    #    # fit a Gaussian to the data and draw to initialize V0
    #    V0 = util.fit_gaussian_draw(X, J, seed=829, reg=1e-6)
    #    list_gwidth = np.hstack( ( (med2)*gwidth_factors ) )
    #    besti, objs = GaussFSSD.grid_search_gwidth(p, dat, V0, list_gwidth)
    #    gwidth = list_gwidth[besti]
    #    assert util.is_real_num(gwidth), 'gwidth not real. Was %s'%str(gwidth)
    #    assert gwidth > 0, 'gwidth not positive. Was %.3g'%gwidth
    #    logging.info('After grid search, gwidth=%.3g'%gwidth)

        
    #    V_opt, gwidth_opt, info = GaussFSSD.optimize_locs_widths(p, dat,
    #            gwidth, V0, **ops) 

    #    # set the width bounds
    #    #fac_min = 5e-2
    #    #fac_max = 5e3
    #    #gwidth_lb = fac_min*med2
    #    #gwidth_ub = fac_max*med2
    #    #gwidth_opt = max(gwidth_lb, min(gwidth_opt, gwidth_ub))
    #    return V_opt, gwidth_opt, info 
Example #17
Source File: bnh.py    From pymop with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_front(self, n_pareto_points=100):
        x1 = anp.linspace(0, 5, n_pareto_points)
        x2 = anp.copy(x1)
        x2[x1 >= 3] = 3
        return anp.vstack((4 * anp.square(x1) + 4 * anp.square(x2), anp.square(x1 - 5) + anp.square(x2 - 5))).T 
Example #18
Source File: zdt.py    From pymop with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_front(self, n_pareto_points=100):
        x = anp.linspace(0, 1, n_pareto_points)
        return anp.array([x, 1 - anp.sqrt(x)]).T 
Example #19
Source File: zdt.py    From pymop with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_front(self, n_pareto_points=100):
        x = anp.linspace(0, 1, n_pareto_points)
        return anp.array([x, 1 - anp.power(x, 2)]).T 
Example #20
Source File: zdt.py    From pymop with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_front(self, n_pareto_points=100):
        regions = [[0, 0.0830015349],
                   [0.182228780, 0.2577623634],
                   [0.4093136748, 0.4538821041],
                   [0.6183967944, 0.6525117038],
                   [0.8233317983, 0.8518328654]]

        pareto_front = anp.array([]).reshape((-1, 2))
        for r in regions:
            x1 = anp.linspace(r[0], r[1], int(n_pareto_points / len(regions)))
            x2 = 1 - anp.sqrt(x1) - x1 * anp.sin(10 * anp.pi * x1)
            pareto_front = anp.concatenate((pareto_front, anp.array([x1, x2]).T), axis=0)
        return pareto_front 
Example #21
Source File: zdt.py    From pymop with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_front(self, n_pareto_points=100):
        x = anp.linspace(0, 1, n_pareto_points)
        return anp.array([x, 1 - anp.sqrt(x)]).T 
Example #22
Source File: ica.py    From autograd with MIT License 5 votes vote down vote up
def color_scatter(ax, xs, ys):
    colors = cm.rainbow(np.linspace(0, 1, len(ys)))
    for x, y, c in zip(xs, ys, colors):
        ax.scatter(x, y, color=c) 
Example #23
Source File: bnh.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_front(self, n_points=100):
        x1 = anp.linspace(0, 5, n_points)
        x2 = anp.linspace(0, 5, n_points)
        x2[x1 >= 3] = 3

        X = anp.column_stack([x1, x2])
        return self.evaluate(X, return_values_of=["F"]) 
Example #24
Source File: zdt.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_front(self, n_pareto_points=100):
        x = anp.linspace(0, 1, n_pareto_points)
        return anp.array([x, 1 - anp.sqrt(x)]).T 
Example #25
Source File: zdt.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_front(self, n_pareto_points=100):
        x = anp.linspace(0, 1, n_pareto_points)
        return anp.array([x, 1 - anp.power(x, 2)]).T 
Example #26
Source File: zdt.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_front(self, n_pareto_points=100):
        x = 1 + anp.linspace(0, 1, n_pareto_points) * 30
        pf = anp.column_stack([x, (self.m-1) / x])
        if self.normalize:
            pf = normalize(pf)
        return pf 
Example #27
Source File: zdt.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_front(self, n_pareto_points=100):
        x = anp.linspace(0.2807753191, 1, n_pareto_points)
        return anp.array([x, 1 - anp.power(x, 2)]).T 
Example #28
Source File: geometry.py    From AeroSandbox with MIT License 5 votes vote down vote up
def cosspace(min=0, max=1, n_points=50):
    mean = (max + min) / 2
    amp = (max - min) / 2

    return mean + amp * np.cos(np.linspace(np.pi, 0, n_points)) 
Example #29
Source File: geometry.py    From AeroSandbox with MIT License 5 votes vote down vote up
def linspace_3D(start, stop, n_points):
    # Given two points (a start and an end), returns an interpolated array of points on the line between the two.
    # Inputs:
    #   * start: 3D coordinates expressed as a 1D numpy array, shape==(3).
    #   * end: 3D coordinates expressed as a 1D numpy array, shape==(3).
    #   * n_points: Number of points to be interpolated (including endpoints), a scalar.
    # Outputs:
    #   * points: Array of 3D coordinates expressed as a 2D numpy array, shape==(N, 3)
    x = np.linspace(start[0], stop[0], n_points)
    y = np.linspace(start[1], stop[1], n_points)
    z = np.linspace(start[2], stop[2], n_points)

    points = np.column_stack((x, y, z))
    return points 
Example #30
Source File: methods.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def grid(num, ndim, large=False):
  """Build a uniform grid with num points along each of ndim axes."""
  if not large:
    _check_not_too_large(np.power(num, ndim) * ndim)
  x = np.linspace(0, 1, num, dtype='float64')
  w = 1 / (num - 1)
  points = np.stack(
      np.meshgrid(*[x for _ in range(ndim)], indexing='ij'), axis=-1)
  return points, w