Python numpy.logspace() Examples

The following are 30 code examples of numpy.logspace(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: test_stability_selection.py    From stability-selection with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_no_features():
    n, p, k = 100, 200, 0

    X, y, important_betas = _generate_dummy_regression_data(n=n, k=k)

    base_estimator = Pipeline([
        ('scaler', StandardScaler()),
        ('model', Lasso())
    ])

    lambdas_grid = np.logspace(-1, 1, num=10)

    selector = StabilitySelection(base_estimator=base_estimator,
                                  lambda_name='model__alpha',
                                  lambda_grid=lambdas_grid)
    selector.fit(X, y)

    assert_almost_equal(selector.transform(X),
                        np.empty(0).reshape((X.shape[0], 0))) 
Example #2
Source File: frd_test.py    From python-control with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def testMIMOSmooth(self):
        sys = StateSpace([[-0.5, 0.0], [0.0, -1.0]],
                         [[1.0, 0.0], [0.0, 1.0]],
                         [[1.0, 0.0], [0.0, 1.0], [1.0, 1.0]],
                         [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
        sys2 = np.matrix([[1, 0, 0], [0, 1, 0]]) * sys
        omega = np.logspace(-1, 2, 10)
        f1 = FRD(sys, omega, smooth=True)
        f2 = FRD(sys2, omega, smooth=True)
        np.testing.assert_array_almost_equal(
            (f1*f2).freqresp([0.1, 1.0, 10])[0],
            (sys*sys2).freqresp([0.1, 1.0, 10])[0])
        np.testing.assert_array_almost_equal(
            (f1*f2).freqresp([0.1, 1.0, 10])[1],
            (sys*sys2).freqresp([0.1, 1.0, 10])[1])
        np.testing.assert_array_almost_equal(
            (f1*f2).freqresp([0.1, 1.0, 10])[2],
            (sys*sys2).freqresp([0.1, 1.0, 10])[2]) 
Example #3
Source File: LSDMap_HillslopeMorphology.py    From LSDMappingTools with MIT License 6 votes vote down vote up
def PlotEStarRStarTheoretical():
    """
    This makes the theoretical E* vs R* plot. It prints to the current open figure.
    SMM Note: This would be better if it used a supploed figure. Can the default be get_clf()?

    MDH

    """
    # Calculate analytical relationship
    EStar = np.logspace(-1,3,1000)
    RStar = CalculateRStar(EStar)

    # Plot with open figure
    plt.plot(EStar,RStar,'k--')


#-------------------------------------------------------------------------------#
# PLOTTING FUNCTIONS
#-------------------------------------------------------------------------------#
# SMM: Checked and working 13/06/2018 
Example #4
Source File: frd_test.py    From python-control with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_size_mismatch(self):
        sys1 = FRD(ct.rss(2, 2, 2), np.logspace(-1, 1, 10))

        # Different number of inputs
        sys2 = FRD(ct.rss(3, 1, 2), np.logspace(-1, 1, 10))
        self.assertRaises(ValueError, FRD.__add__, sys1, sys2)

        # Different number of outputs
        sys2 = FRD(ct.rss(3, 2, 1), np.logspace(-1, 1, 10))
        self.assertRaises(ValueError, FRD.__add__, sys1, sys2)

        # Inputs and outputs don't match
        self.assertRaises(ValueError, FRD.__mul__, sys2, sys1)

        # Feedback mismatch
        self.assertRaises(ValueError, FRD.feedback, sys2, sys1) 
Example #5
Source File: stability_selection.py    From stability-selection with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, base_estimator=LogisticRegression(penalty='l1'), lambda_name='C',
                 lambda_grid=np.logspace(-5, -2, 25), n_bootstrap_iterations=100,
                 sample_fraction=0.5, threshold=0.6, bootstrap_func=bootstrap_without_replacement,
                 bootstrap_threshold=None, verbose=0, n_jobs=1, pre_dispatch='2*n_jobs',
                 random_state=None):
        self.base_estimator = base_estimator
        self.lambda_name = lambda_name
        self.lambda_grid = lambda_grid
        self.n_bootstrap_iterations = n_bootstrap_iterations
        self.sample_fraction = sample_fraction
        self.threshold = threshold
        self.bootstrap_func = bootstrap_func
        self.bootstrap_threshold = bootstrap_threshold
        self.verbose = verbose
        self.n_jobs = n_jobs
        self.pre_dispatch = pre_dispatch
        self.random_state = random_state 
Example #6
Source File: config_test.py    From python-control with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_custom_bode_default(self):
        ct.config.defaults['bode.dB'] = True
        ct.config.defaults['bode.deg'] = True
        ct.config.defaults['bode.Hz'] = True

        # Generate a Bode plot
        plt.figure()
        omega = np.logspace(-3, 3, 100)
        ct.bode_plot(self.sys, omega, dB=True)
        mag_x, mag_y = (((plt.gcf().axes[0]).get_lines())[0]).get_data()
        np.testing.assert_almost_equal(mag_y[0], 20*log10(10), decimal=3)

        # Override defaults
        plt.figure()
        ct.bode_plot(self.sys, omega, Hz=True, deg=False, dB=True)
        mag_x, mag_y = (((plt.gcf().axes[0]).get_lines())[0]).get_data()
        phase_x, phase_y = (((plt.gcf().axes[1]).get_lines())[0]).get_data()
        np.testing.assert_almost_equal(mag_x[0], 0.001 / (2*pi), decimal=6)
        np.testing.assert_almost_equal(mag_y[0], 20*log10(10), decimal=3)
        np.testing.assert_almost_equal(phase_y[-1], -pi, decimal=2)

        ct.reset_defaults() 
Example #7
Source File: test_stability_selection.py    From stability-selection with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_stability_selection_regression():
    n, p, k = 500, 1000, 5

    X, y, important_betas = _generate_dummy_regression_data(n=n, k=k)

    base_estimator = Pipeline([
        ('scaler', StandardScaler()),
        ('model', Lasso())
    ])

    lambdas_grid = np.logspace(-1, 1, num=10)

    selector = StabilitySelection(base_estimator=base_estimator,
                                  lambda_name='model__alpha',
                                  lambda_grid=lambdas_grid)
    selector.fit(X, y)

    chosen_betas = selector.get_support(indices=True)

    assert_almost_equal(important_betas, chosen_betas) 
Example #8
Source File: test_stability_selection.py    From stability-selection with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_with_complementary_pairs_bootstrap():
    n, p, k = 500, 1000, 5

    X, y, important_betas = _generate_dummy_regression_data(n=n, k=k)

    base_estimator = Pipeline([
        ('scaler', StandardScaler()),
        ('model', Lasso())
    ])

    lambdas_grid = np.logspace(-1, 1, num=10)

    selector = StabilitySelection(base_estimator=base_estimator,
                                  lambda_name='model__alpha',
                                  lambda_grid=lambdas_grid,
                                  bootstrap_func='complementary_pairs')
    selector.fit(X, y)

    chosen_betas = selector.get_support(indices=True)

    assert_almost_equal(important_betas, chosen_betas) 
Example #9
Source File: test_stability_selection.py    From stability-selection with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_different_shape():
    n, p, k = 100, 200, 5

    X, y, important_betas = _generate_dummy_regression_data(n=n, k=k)

    base_estimator = Pipeline([
        ('scaler', StandardScaler()),
        ('model', Lasso())
    ])

    lambdas_grid = np.logspace(-1, 1, num=10)

    selector = StabilitySelection(base_estimator=base_estimator,
                                  lambda_name='model__alpha',
                                  lambda_grid=lambdas_grid)
    selector.fit(X, y)
    selector.transform(X[:, :-2]) 
Example #10
Source File: bdalg_test.py    From python-control with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_feedback_args(self):
        # Added 25 May 2019 to cover missing exception handling in feedback()
        # If first argument is not LTI or convertable, generate an exception
        args = ([1], self.sys2)
        self.assertRaises(TypeError, ctrl.feedback, *args)

        # If second argument is not LTI or convertable, generate an exception
        args = (self.sys1, np.array([1]))
        self.assertRaises(TypeError, ctrl.feedback, *args)

        # Convert first argument to FRD, if needed
        h = TransferFunction([1], [1, 2, 2])
        omega = np.logspace(-1, 2, 10)
        frd = ctrl.FRD(h, omega)
        sys = ctrl.feedback(1, frd)
        self.assertTrue(isinstance(sys, ctrl.FRD)) 
Example #11
Source File: margin_test.py    From python-control with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_nocross(self):
        # what happens when no gain/phase crossover?
        s = TransferFunction([1, 0], [1])
        h1 = 1/(1+s)
        h2 = 3*(10+s)/(2+s)
        h3 = 0.01*(10-s)/(2+s)/(1+s)
        gm, pm, wm, wg, wp, ws = stability_margins(h1)
        assert_array_almost_equal(
            [gm, pm, wg, wp],
            [float('Inf'), float('Inf'), float('NaN'), float('NaN')]) 
        gm, pm, wm, wg, wp, ws = stability_margins(h2)
        self.assertEqual(pm, float('Inf'))
        gm, pm, wm, wg, wp, ws = stability_margins(h3)
        self.assertTrue(np.isnan(wp))
        omega = np.logspace(-2,2, 100)
        out1b = stability_margins(FRD(h1, omega))
        out2b = stability_margins(FRD(h2, omega))
        out3b = stability_margins(FRD(h3, omega)) 
Example #12
Source File: test_stability_selection.py    From stability-selection with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_stability_plot():
    n, p, k = 500, 200, 5

    X, y, important_betas = _generate_dummy_regression_data(n=n, k=k)

    base_estimator = Pipeline([
        ('scaler', StandardScaler()),
        ('model', Lasso())
    ])

    lambdas_grid = np.logspace(-1, 1, num=10)

    selector = StabilitySelection(base_estimator=base_estimator,
                                  lambda_name='model__alpha',
                                  lambda_grid=lambdas_grid)
    selector.fit(X, y)

    plot_stability_path(selector, threshold_highlight=0.5) 
Example #13
Source File: configure.py    From gmpe-smtk with GNU Affero General Public License v3.0 6 votes vote down vote up
def _define_line_spacing(self, maximum_distance, spacing, as_log=False):
        """
        The user may wish to define the line spacing in either log or
        linear space
        """
        nvals = int(maximum_distance / spacing) + 1
        if as_log:
            spacings = np.logspace(-3., np.log10(maximum_distance), nvals)
            spacings[0] = 0.0
        else:
            spacings = np.linspace(0.0, maximum_distance, nvals)

        if spacings[-1] < (maximum_distance - 1.0E-7):
            spacings = np.hstack([spacings, maximum_distance])

        return spacings 
Example #14
Source File: frd_test.py    From python-control with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_evalfr_deprecated(self):
        sys_tf = ct.tf([1], [1, 2, 1])
        frd_tf = FRD(sys_tf, np.logspace(-1, 1, 3))

        # Deprecated version of the call (should generate warning)
        import warnings
        with warnings.catch_warnings():
            # Make warnings generate an exception
            warnings.simplefilter('error')

            # Make sure that we get a pending deprecation warning
            self.assertRaises(PendingDeprecationWarning, frd_tf.evalfr, 1.)

        # FRD.evalfr() is being deprecated
        import warnings
        with warnings.catch_warnings():
            # Make warnings generate an exception
            warnings.simplefilter('error')

            # Make sure that we get a pending deprecation warning
            self.assertRaises(PendingDeprecationWarning, frd_tf.evalfr, 1.) 
Example #15
Source File: parallel.py    From imitation with MIT License 6 votes vote down vote up
def example_gail_easy():
    sacred_ex_name = "train_adversarial"
    run_name = "example-gail-easy"
    n_seeds = 1
    search_space = {
        "named_configs": tune.grid_search([[env] for env in EASY_ENVS]),
        "config_updates": {
            "init_trainer_kwargs": {
                "init_rl_kwargs": {
                    "learning_rate": tune.grid_search(np.logspace(3e-6, 1e-1, num=3)),
                    "nminibatches": tune.grid_search([16, 32, 64]),
                },
            },
        },
    }
    base_config_updates = {
        "init_tensorboard": True,
        "init_trainer_kwargs": {"use_gail": True},
    } 
Example #16
Source File: test_lasso.py    From celer with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_warm_start():
    """Test Lasso path convergence."""
    X, y = build_dataset(
        n_samples=100, n_features=100, sparse_X=True)
    n_samples, n_features = X.shape
    alpha_max = np.max(np.abs(X.T.dot(y))) / n_samples
    n_alphas = 10
    alphas = alpha_max * np.logspace(0, -2, n_alphas)

    reg1 = Lasso(tol=1e-6, warm_start=True, p0=10)
    reg1.coef_ = np.zeros(n_features)

    for alpha in alphas:
        reg1.set_params(alpha=alpha)
        reg1.fit(X, y)
        # refitting with warm start should take less than 2 iters:
        reg1.fit(X, y)
        # hack because assert_array_less does strict comparison...
        np.testing.assert_array_less(reg1.n_iter_, 2.01) 
Example #17
Source File: test_PlanetPopulation.py    From EXOSIMS with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_dist_sma_radius(self):
        """
        Test that sma and radius values outside of the range have zero probability
        """
        
        for mod in self.allmods:
            if 'dist_sma_radius' in mod.__dict__:
                with RedirectStreams(stdout=self.dev_null):
                    pp = mod(**self.spec)
                
                a = np.logspace(np.log10(pp.arange[0].value/10.),np.log10(pp.arange[1].value*100),100)
                Rp = np.logspace(np.log10(pp.Rprange[0].value/10.),np.log10(pp.Rprange[1].value*100),100)
                
                aa, RR = np.meshgrid(a,Rp)
                
                fr = pp.dist_sma_radius(aa,RR)
                self.assertTrue(np.all(fr[aa < pp.arange[0].value] == 0),'dist_sma_radius low bound failed on sma for %s'%mod.__name__)
                self.assertTrue(np.all(fr[aa > pp.arange[1].value] == 0),'dist_sma_radius high bound failed on sma for %s'%mod.__name__)
                self.assertTrue(np.all(fr[RR < pp.Rprange[0].value] == 0),'dist_sma_radius low bound failed on radius for %s'%mod.__name__)
                self.assertTrue(np.all(fr[RR > pp.Rprange[1].value] == 0),'dist_sma_radius high bound failed on radius for %s'%mod.__name__)
                self.assertTrue(np.all(fr[(aa > pp.arange[0].value) & (aa < pp.arange[1].value) & (RR > pp.Rprange[0].value) & (RR < pp.Rprange[1].value)] > 0),'dist_sma_radius is improper pdf for %s'%mod.__name__) 
Example #18
Source File: test_PlanetPopulation.py    From EXOSIMS with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_dist_mass(self):
        """
        Test that masses outside of the range have zero probability

        """

        for mod in self.allmods:
            if 'dist_mass' in mod.__dict__:
                with RedirectStreams(stdout=self.dev_null):
                    pp = mod(**self.spec)

                Mp = np.logspace(np.log10(pp.Mprange[0].value/10.),np.log10(pp.Mprange[1].value*100.),100) 

                fr = pp.dist_mass(Mp)
                self.assertTrue(np.all(fr[Mp < pp.Mprange[0].value] == 0),'dist_mass high bound failed for %s'%mod.__name__)
                self.assertTrue(np.all(fr[Mp > pp.Mprange[1].value] == 0),'dist_mass high bound failed for %s'%mod.__name__)
                self.assertTrue(np.all(fr[(Mp >= pp.Mprange[0].value) & (Mp <= pp.Mprange[1].value)] > 0)) 
Example #19
Source File: test_PlanetPopulation.py    From EXOSIMS with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_dist_radius(self):
        """
        Test that radii outside of the range have zero probability

        """
        for mod in self.allmods:
            if 'dist_radius' in mod.__dict__:
                with RedirectStreams(stdout=self.dev_null):
                    pp = mod(**self.spec)

                Rp = np.logspace(np.log10(pp.Rprange[0].to('earthRad').value/10.),np.log10(pp.Rprange[1].to('earthRad').value*100.),100) 

                fr = pp.dist_radius(Rp)
                self.assertTrue(np.all(fr[Rp < pp.Rprange[0].to('earthRad').value] == 0),'dist_radius high bound failed for %s'%mod.__name__)
                self.assertTrue(np.all(fr[Rp > pp.Rprange[1].to('earthRad').value] == 0),'dist_radius high bound failed for %s'%mod.__name__)
                self.assertTrue(np.all(fr[(Rp >= pp.Rprange[0].to('earthRad').value) & (Rp <= pp.Rprange[1].to('earthRad').value)] > 0),'dist_radius generates zero probabilities within range for %s'%mod.__name__) 
Example #20
Source File: test_PlanetPopulation.py    From EXOSIMS with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_dist_sma(self):
        """
        Test that smas outside of the range have zero probability

        """

        for mod in self.allmods:
            if 'dist_sma' in mod.__dict__:
                with RedirectStreams(stdout=self.dev_null):
                    pp = mod(**self.spec)

                a = np.logspace(np.log10(pp.arange[0].to('AU').value/10.),np.log10(pp.arange[1].to('AU').value*10.),100)

                fa = pp.dist_sma(a)
                self.assertTrue(np.all(fa[a < pp.arange[0].to('AU').value] == 0),'dist_sma high bound failed for %s'%mod.__name__)
                self.assertTrue(np.all(fa[a > pp.arange[1].to('AU').value] == 0),'dist_sma low bound failed for %s'%mod.__name__)
                self.assertTrue(np.all(fa[(a >= pp.arange[0].to('AU').value) & (a <= pp.arange[1].to('AU').value)] >= 0.),'dist_sma generates negative densities within range for %s'%mod.__name__) 
Example #21
Source File: test_lasso.py    From celer with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_celer_path(sparse_X, alphas, pb):
    """Test Lasso path convergence."""
    X, y = build_dataset(n_samples=30, n_features=50, sparse_X=sparse_X)
    if pb == "logreg":
        y = np.sign(y)
    n_samples = X.shape[0]
    if alphas is not None:
        alpha_max = np.max(np.abs(X.T.dot(y))) / n_samples
        n_alphas = 10
        alphas = alpha_max * np.logspace(0, -2, n_alphas)

    tol = 1e-6
    alphas, coefs, gaps, thetas, n_iters = celer_path(
        X, y, pb, alphas=alphas, tol=tol, return_thetas=True,
        verbose=1, return_n_iter=True)
    np.testing.assert_array_less(gaps, tol)
    # hack because array_less wants strict inequality
    np.testing.assert_array_less(0.99, n_iters) 
Example #22
Source File: test_io.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def test_from_float_hex(self):
        # IEEE doubles and floats only, otherwise the float32
        # conversion may fail.
        tgt = np.logspace(-10, 10, 5).astype(np.float32)
        tgt = np.hstack((tgt, -tgt)).astype(float)
        inp = '\n'.join(map(float.hex, tgt))
        c = TextIO()
        c.write(inp)
        for dt in [float, np.float32]:
            c.seek(0)
            res = np.loadtxt(c, dtype=dt)
            assert_equal(res, tgt, err_msg="%s" % dt) 
Example #23
Source File: breakdown_metric.py    From lingvo with Apache License 2.0 5 votes vote down vote up
def _LogSpacedBinEdgesofPoints(self):
    p = self.params
    return np.logspace(
        np.log10(1.0), np.log10(p.metadata.MaximumNumberOfPoints()),
        p.metadata.NumberOfPointsBins() + 1) 
Example #24
Source File: randmat.py    From vampyre with MIT License 5 votes vote down vote up
def rand_rot_invariant_mat(nz1,nz0,cond_num=10,is_complex=False):
    """
    Creates a rotationally invariant random matrix.
    
    A rotationally invariant matrix is of the form :math:`A=USV^*` where
    :math:`U` and :math:`V` are uniformly distributed on the unitaries 
    (for complex matrices) and orthogonal matrices (for real matrices).
    The singular values :math:`S=diag(s_1,\ldots,s_r)` are logarithmically
    spaced from :math:`1/cond_num` to 1.  The singular values are then 
    scaled to have an average magnitude squared of one.
    """
    
    # Generate a random Gaussian matrix
    if is_complex:
        A = np.random.randn(nz1,nz0) + 1j*np.random.randn(nz1,nz0)
    else:
        A = np.random.randn(nz1,nz0)
        
    # Take the SVD
    U,s,V = np.linalg.svd(A,full_matrices=0)

    # Reset the singular values    
    r = len(s)
    s = np.logspace(-np.log10(cond_num),0,r)
    s = s / np.sqrt(np.mean(s**2))
    
    # Rebuild the matrix
    A = (U*s[None,:]).dot(V)
        
    return A 
Example #25
Source File: lr_finder.py    From signaltrain with GNU General Public License v3.0 5 votes vote down vote up
def lrfind(model, dataloader, optimizer, calc_loss, start=1e-6, stop=4e-3, num_lrs=150, to_screen=False):
    """ Learning Rate finder.  See leslie howard, sylvian gugger & jeremy howard's work """
    print("Running LR Find:",end="",flush=True)

    lrs, losses = [], []
    lr_tries = np.logspace(np.log10(start), np.log10(stop), num_lrs)
    ind, count, repeat = 0, 0, 3
    for x, y, knobs in dataloader:
        count+=1
        if ind >= len(lr_tries):
            break
        lr_try = lr_tries[ind]
        if count % repeat ==0:  # repeat over this many data points per lr value
            ind+=1
            print(".",sep="",end="",flush=True)
        optimizer.param_groups[0]['lr'] = lr_try

        #x_cuda, y_cuda, knobs_cuda = datagen.new()
        x_cuda, y_cuda, knobs_cuda = x.to(device), y.to(device), knobs.to(device)
        x_hat, mag, mag_hat = model.forward(x_cuda, knobs_cuda)
        loss = calc_loss(x_hat.float() ,y_cuda.float(), mag.float())
        lrs.append(lr_try)
        losses.append(loss.item())
        optimizer.zero_grad()
        loss.backward()
        model.clip_grad_norm_()
        optimizer.step()

    plt.figure(1)
    plt.semilogx(lrs,losses)
    if to_screen:
        plt.show()
    else:
        outfile = 'lrfind.png'
        plt.savefig(outfile)
        plt.close(plt.gcf())
        print("\nLR Find finished. See "+outfile)
    return 
Example #26
Source File: ml.py    From info-flow-experiments with GNU General Public License v3.0 5 votes vote down vote up
def train_and_test(X, y, splittype='timed', splitfrac=0.1, nfolds=10, 
        verbose=False):
    
    algos = {   
                'logit':{'C':np.logspace(-5.0, 15.0, num=21, base=2), 'penalty':['l2']},
#               'svc':{'C':np.logspace(-5.0, 15.0, num=21, base=2)} 
#               'kNN':{'k':np.arange(1,20,2), 'p':[1,2,3]}, 
#               'polySVM':{'C':np.logspace(-5.0, 15.0, num=21, base=2), 'degree':[1,2,3,4]},
#               'rbfSVM':{'C':np.logspace(-5.0, 15.0, num=21, base=2), 'gamma':np.logspace(-15.0, 3.0, num=19, base=2)},
#               'randlog':{'C':np.logspace(-5.0, 15.0, num=21, base=2)},
#               'tree':{'ne':np.arange(5,10,2)}
                
            }   
    X_train, y_train, X_test, y_test = split_data(X, y, splittype, splitfrac, verbose)
    if(verbose):
        print "Training Set size: ", len(y_train), "blocks"
        print "Testing Set size: ", len(y_test), "blocks"
    s = datetime.now()
    clf, CVscore = select_and_fit_classifier(nfolds, algos, X_train, y_train, splittype, splitfrac, verbose)
    e = datetime.now()
    if(verbose):
        print "---Time for selecting classifier: ", str(e-s)
    print "CVscore: ", CVscore
    print "Test accuracy: ", test_accuracy(clf, X_test, y_test)
    
    blockSize = X_test.shape[1]
    blocks = X_test.shape[0]
    ypred = np.array([[-1]*blockSize]*blocks)
    for i in range(0,blocks):
        ypred[i] = clf.predict(X_test[i])
    return clf, ypred, y_test 
Example #27
Source File: plot_hillslope_morphology.py    From LSDMappingTools with MIT License 5 votes vote down vote up
def PlotEStarRStarTheoretical():
    """
    MDH
    
    """    
    # Calculate analytical relationship
    EStar = np.logspace(-1,3,1000)
    RStar = CalculateRStar(EStar)
    
    # Plot with open figure
    plt.plot(EStar,RStar,'k--') 
Example #28
Source File: benchmark_cnn.py    From benchmarks with Apache License 2.0 5 votes vote down vote up
def get_perf_timing(batch_size, step_train_times, ewma_alpha=None, scale=1):
  """Calculate benchmark processing speed."""
  times = np.array(step_train_times)
  speeds = batch_size / times
  if ewma_alpha:
    weights = np.logspace(len(times)-1, 0, len(times), base=1-ewma_alpha)
    time_mean = np.average(times, weights=weights)
  else:
    time_mean = np.mean(times)
  speed_mean = scale * batch_size / time_mean
  speed_uncertainty = np.std(speeds) / np.sqrt(float(len(speeds)))
  speed_jitter = 1.4826 * np.median(np.abs(speeds - np.median(speeds)))
  return speed_mean, speed_uncertainty, speed_jitter 
Example #29
Source File: test_io.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_from_float_hex(self):
        # IEEE doubles and floats only, otherwise the float32
        # conversion may fail.
        tgt = np.logspace(-10, 10, 5).astype(np.float32)
        tgt = np.hstack((tgt, -tgt)).astype(float)
        inp = '\n'.join(map(float.hex, tgt))
        c = TextIO()
        c.write(inp)
        for dt in [float, np.float32]:
            c.seek(0)
            res = np.loadtxt(c, dtype=dt)
            assert_equal(res, tgt, err_msg="%s" % dt) 
Example #30
Source File: proxy_a_distance.py    From transferlearning with MIT License 5 votes vote down vote up
def proxy_a_distance(source_X, target_X, verbose=False):
    """
    Compute the Proxy-A-Distance of a source/target representation
    """
    nb_source = np.shape(source_X)[0]
    nb_target = np.shape(target_X)[0]

    if verbose:
        print('PAD on', (nb_source, nb_target), 'examples')

    C_list = np.logspace(-5, 4, 10)

    half_source, half_target = int(nb_source/2), int(nb_target/2)
    train_X = np.vstack((source_X[0:half_source, :], target_X[0:half_target, :]))
    train_Y = np.hstack((np.zeros(half_source, dtype=int), np.ones(half_target, dtype=int)))

    test_X = np.vstack((source_X[half_source:, :], target_X[half_target:, :]))
    test_Y = np.hstack((np.zeros(nb_source - half_source, dtype=int), np.ones(nb_target - half_target, dtype=int)))

    best_risk = 1.0
    for C in C_list:
        clf = svm.SVC(C=C, kernel='linear', verbose=False)
        clf.fit(train_X, train_Y)

        train_risk = np.mean(clf.predict(train_X) != train_Y)
        test_risk = np.mean(clf.predict(test_X) != test_Y)

        if verbose:
            print('[ PAD C = %f ] train risk: %f  test risk: %f' % (C, train_risk, test_risk))

        if test_risk > .5:
            test_risk = 1. - test_risk

        best_risk = min(best_risk, test_risk)

    return 2 * (1. - 2 * best_risk)