Python numpy.linspace() Examples

The following are code examples for showing how to use numpy.linspace(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: b2ac   Author: hbldh   File: ellipse.py    MIT License 8 votes vote down vote up
def polygonize(self, n=73):
        """Gets a approximate polygon array representing the ellipse.

        Note that the last point is the same as the first point, creating a closed
        polygon.

        :param n: The number of points to generate. Default is 73 (one vertex every 5 degrees).
        :type n: int
        :return: An [n  x 2] numpy array, describing the boundary vertices of
                 the polygonized ellipse.
        :rtype: :py:class:`numpy.ndarray`

        """
        t = np.linspace(0, 2 * np.pi, num=n, endpoint=True)
        out = np.zeros((len(t), 2), dtype='float')
        out[:, 0] = (self.center_point[0] +
                     self.radii[0] * np.cos(t) * np.cos(self.rotation_angle) -
                     self.radii[1] * np.sin(t) * np.sin(self.rotation_angle))
        out[:, 1] = (self.center_point[1] +
                     self.radii[0] * np.cos(t) * np.sin(self.rotation_angle) +
                     self.radii[1] * np.sin(t) * np.cos(self.rotation_angle))
        return out 
Example 2
Project: spacesense   Author: spacesense-ai   File: utils.py    GNU Lesser General Public License v3.0 8 votes vote down vote up
def optimize_OneClassSVM(X, n):
    print('searching for optimal hyperparameters...')
    nu = np.linspace(start=1e-5, stop=1e-2, num=n)
    gamma = np.linspace(start=1e-6, stop=1e-3, num=n)
    opt_diff = 1.0
    opt_nu = None
    opt_gamma = None
    for i in range(len(nu)):
        for j in range(len(gamma)):
            classifier = svm.OneClassSVM(kernel="rbf", nu=nu[i], gamma=gamma[j])
            classifier.fit(X)
            label = classifier.predict(X)
            p = 1 - float(sum(label == 1.0)) / len(label)
            diff = math.fabs(p - nu[i])
            if diff < opt_diff:
                opt_diff = diff
                opt_nu = nu[i]
                opt_gamma = gamma[j]
    return opt_nu, opt_gamma 
Example 3
Project: FRIDA   Author: LCAV   File: doa.py    MIT License 7 votes vote down vote up
def compute_mode(self):
        """
        Pre-compute mode vectors from candidate locations (in spherical 
        coordinates).
        """
        if self.num_loc is None:
            raise ValueError('Lookup table appears to be empty. \
                Run build_lookup().')
        self.mode_vec = np.zeros((self.max_bin,self.M,self.num_loc), 
            dtype='complex64')
        if (self.nfft % 2 == 1):
            raise ValueError('Signal length must be even.')
        f = 1.0 / self.nfft * np.linspace(0, self.nfft / 2, self.max_bin) \
            * 1j * 2 * np.pi
        for i in range(self.num_loc):
            p_s = self.loc[:, i]
            for m in range(self.M):
                p_m = self.L[:, m]
                if (self.mode == 'near'):
                    dist = np.linalg.norm(p_m - p_s, axis=1)
                if (self.mode == 'far'):
                    dist = np.dot(p_s, p_m)
                # tau = np.round(self.fs*dist/self.c) # discrete - jagged
                tau = self.fs * dist / self.c  # "continuous" - smoother
                self.mode_vec[:, m, i] = np.exp(f * tau) 
Example 4
Project: xrft   Author: xgcm   File: xrft.py    MIT License 6 votes vote down vote up
def _azimuthal_wvnum(k, l, N, nfactor):
    k = k.values
    l = l.values
    K = np.sqrt(k[np.newaxis,:]**2 + l[:,np.newaxis]**2)
    nbins = int(N/nfactor)
    if k.max() > l.max():
        ki = np.linspace(0., l.max(), nbins)
    else:
        ki = np.linspace(0., k.max(), nbins)

    kidx = np.digitize(np.ravel(K), ki)
    area = np.bincount(kidx)

    kr = np.bincount(kidx, weights=K.ravel()) / area

    return kidx, area, kr 
Example 5
Project: xrft   Author: xgcm   File: test_xrft.py    MIT License 6 votes vote down vote up
def test_cross_phase_2d(self, dask):
        Ny, Nx = (32, 16)
        x = np.linspace(0, 1, num=Nx, endpoint=False)
        y = np.ones(Ny)
        f = 6
        phase_offset = np.pi/2
        signal1 = np.cos(2*np.pi*f*x)  # frequency = 1/(2*pi)
        signal2 = np.cos(2*np.pi*f*x - phase_offset)
        da1 = xr.DataArray(data=signal1*y[:,np.newaxis], name='a',
                          dims=['y','x'], coords={'y':y, 'x':x})
        da2 = xr.DataArray(data=signal2*y[:,np.newaxis], name='b',
                          dims=['y','x'], coords={'y':y, 'x':x})
        with pytest.raises(ValueError):
            xrft.cross_phase(da1, da2, dim=['y','x'])

        if dask:
            da1 = da1.chunk({'x': 16})
            da2 = da2.chunk({'x': 16})
        cp = xrft.cross_phase(da1, da2, dim=['x'])
        actual_phase_offset = cp.sel(freq_x=f).values
        npt.assert_almost_equal(actual_phase_offset, phase_offset) 
Example 6
Project: StructEngPy   Author: zhuoju36   File: spectrum.py    MIT License 6 votes vote down vote up
def __init__(self,alpha_max,Tg,xi):
        gamma=0.9+(0.05-xi)/(0.3+6*xi)
        eta1=0.02+(0.05-xi)/(4+32*xi)
        eta1=eta1 if eta1>0 else 0
        eta2=1+(0.05-xi)/(0.08+1.6*xi)
        eta2=eta2 if eta2>0.55 else 0.55
        T=np.linspace(0,6,601)
        alpha=[]
        for t in T:
            if t<0.1:
                alpha.append(np.interp(t,[0,0.1],[0.45*alpha_max,eta2*alpha_max]))
            elif t<Tg:
                alpha.append(eta2*alpha_max)
            elif t<5*Tg:
                alpha.append((Tg/t)**gamma*eta2*alpha_max)
            else:
                alpha.append((eta2*0.2**gamma-eta1*(t-5*Tg))*alpha_max)
        self.__spectrum={'T':T,'alpha':alpha} 
Example 7
Project: cplot   Author: sunchaoatmo   File: cscontour.py    GNU General Public License v3.0 6 votes vote down vote up
def cshistplot(sample,alpha,xsample,ax,lw,label,color,shade,legend=True,hist=False,**kwargs):
  import numpy as np
  from scipy import stats
  if hist:
    hist,bin_edges=np.histogram(sample, bins='fd', range=None)
    x=(bin_edges[1:]+bin_edges[:-1])*.5
    y=hist/float(len(sample))*100.0
  else:
    x=np.linspace(xsample[0],xsample[-1],100)
    kernal=stats.gaussian_kde(sample, bw_method= "scott")
    #kernal=stats.gaussian_kde(sample, bw_method= "silverman")
    y=kernal(x)
    if max(y)<1:
      y=y*100
  ax.plot(x, y, color=color, label=label,lw=lw, **kwargs)
  if shade:
    ax.fill_between(x, 1e-12, y, facecolor=color, alpha=alpha)

  # Draw the legend here
  ax.legend(loc="best") 
Example 8
Project: OpenFermion-Cirq   Author: quantumlib   File: fermionic_simulation_test.py    Apache License 2.0 6 votes vote down vote up
def test_weights_and_exponent(weights):
    exponents = np.linspace(-1, 1, 8)
    gates = tuple(
        ofc.QuarticFermionicSimulationGate(weights / exponent,
                                         exponent=exponent)
        for exponent in exponents)

    for g1 in gates:
        for g2 in gates:
            assert cirq.approx_eq(g1, g2, atol=1e-100)

    for i, (gate, exponent) in enumerate(zip(gates, exponents)):
        assert gate.exponent == 1
        new_exponent = exponents[-i]
        new_gate = gate._with_exponent(new_exponent)
        assert new_gate.exponent == new_exponent 
Example 9
Project: pygram11   Author: douglasdavis   File: test_histogram.py    MIT License 6 votes vote down vote up
def test_fix2d():
    x = np.random.randn(5000)
    y = np.random.randn(5000)
    bins = 25
    w = np.random.uniform(0.2, 0.5, 5000)

    pygram_h, __ = pygram11.fix2d(x, y, bins=bins, range=((-3, 3), (-2, 2)))
    numpy_h, __, __ = np.histogram2d(
        x, y, bins=[np.linspace(-3, 3, 26), np.linspace(-2, 2, 26)]
    )
    npt.assert_almost_equal(pygram_h, numpy_h, 5)

    pygram_h, __ = pygram11.fix2d(
        x, y, bins=(25, 27), range=((-3, 3), (-2, 1)), weights=w
    )
    numpy_h, __, __ = np.histogram2d(
        x, y, bins=[np.linspace(-3, 3, 26), np.linspace(-2, 1, 28)], weights=w
    )
    npt.assert_almost_equal(pygram_h, numpy_h, 5) 
Example 10
Project: pygram11   Author: douglasdavis   File: test_histogram.py    MIT License 6 votes vote down vote up
def test_numpyAPI_fix2d():
    x = np.random.randn(5000)
    y = np.random.randn(5000)
    bins = 25
    w = np.random.uniform(0.2, 0.5, 5000)

    pygram_h, __ = pygram11.histogram2d(x, y, bins=bins, range=((-3, 3), (-2, 2)))
    numpy_h, __, __ = np.histogram2d(x, y, bins=bins, range=((-3, 3), (-2, 2)))
    npt.assert_almost_equal(pygram_h, numpy_h, 5)

    pygram_h, __ = pygram11.histogram2d(
        x, y, bins=(25, 27), range=((-3, 3), (-2, 1)), weights=w
    )
    numpy_h, __, __ = np.histogram2d(
        x, y, bins=[np.linspace(-3, 3, 26), np.linspace(-2, 1, 28)], weights=w
    )
    npt.assert_almost_equal(pygram_h, numpy_h, 5) 
Example 11
Project: nn_framework   Author: brohrer   File: autoencoder_viz.py    MIT License 6 votes vote down vote up
def plot_connection(self, ax_boss, x0, x1, y0, y1):
        """
        Represent the weights connecting nodes in one layer
        to nodes in the next.
        """
        weight = np.random.sample() * 2 - 1
        x = np.linspace(x0, x1, num=50)
        y = y0 + (y1 - y0) * (
            -np.cos(
                np.pi * (x - x0) / (x1 - x0)
            ) + 1) / 2
        if weight > 0:
            conn_color = self.tan
        else:
            conn_color = self.blue
        ax_boss.plot(x, y, color=conn_color, linewidth=weight) 
Example 12
Project: ros_dmp   Author: abhishek098   File: dmp_discrete.py    Apache License 2.0 6 votes vote down vote up
def gen_centers(self):
        """Set the centre of the Gaussian basis
        functions be spaced evenly throughout run time"""

        '''x_track = self.cs.discrete_rollout()
        t = np.arange(len(x_track))*self.dt
        # choose the points in time we'd like centers to be at
        c_des = np.linspace(0, self.cs.run_time, self.n_bfs)
        self.c = np.zeros(len(c_des))
        for ii, point in enumerate(c_des):
            diff = abs(t - point)
            self.c[ii] = x_track[np.where(diff == min(diff))[0][0]]'''

        # desired activations throughout time
        des_c = np.linspace(0, self.cs.run_time, self.n_bfs)

        self.c = np.ones(len(des_c))
        for n in range(len(des_c)):
            # finding x for desired times t
            self.c[n] = np.exp(-self.cs.ax * des_c[n]) 
Example 13
Project: sfcc   Author: kv-kunalvyas   File: auxiliary.py    MIT License 5 votes vote down vote up
def plotLearningCurves(train, classifier):
    #P.show()
    X = train.values[:, 1::]
    y = train.values[:, 0]

    train_sizes, train_scores, test_scores = learning_curve(
            classifier, X, y, cv=10, n_jobs=-1, train_sizes=np.linspace(.1, 1., 10), verbose=0)

    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)

    plt.figure()
    plt.title("Learning Curves")
    plt.legend(loc="best")
    plt.xlabel("Training samples")
    plt.ylabel("Error Rate")
    plt.ylim((0, 1))
    plt.gca().invert_yaxis()
    plt.grid()

    # Plot the average training and test score lines at each training set size
    plt.plot(train_sizes, train_scores_mean, 'o-', color="b", label="Training score")
    plt.plot(train_sizes, test_scores_mean, 'o-', color="r", label="Test score")

    # Plot the std deviation as a transparent range at each training set size
    plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
                     alpha=0.1, color="b")
    plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std,
                     alpha=0.1, color="r")

    # Draw the plot and reset the y-axis
    plt.draw()
    plt.gca().invert_yaxis()

    # shuffle and split training and test sets
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25)
    classifier.fit(X_train, y_train)
    plt.show() 
Example 14
Project: xrft   Author: xgcm   File: test_xrft.py    MIT License 5 votes vote down vote up
def test_data_1d(request):
    """Create one dimensional test DataArray."""
    Nx = 16
    Lx = 1.0
    x = np.linspace(0, Lx, Nx)
    dx = x[1] - x[0]
    coords = None if request.param == 'nocoords' else [x]
    da = xr.DataArray(np.random.rand(Nx), coords=coords, dims=['x'])
    if request.param == 'dask':
        da = da.chunk()
    return da 
Example 15
Project: xrft   Author: xgcm   File: test_xrft.py    MIT License 5 votes vote down vote up
def test_cross_phase_1d(self, dask):
        N = 32
        x = np.linspace(0, 1, num=N, endpoint=False)
        f = 6
        phase_offset = np.pi/2
        signal1 = np.cos(2*np.pi*f*x)  # frequency = 1/(2*pi)
        signal2 = np.cos(2*np.pi*f*x - phase_offset)
        da1 = xr.DataArray(data=signal1, name='a', dims=['x'], coords={'x': x})
        da2 = xr.DataArray(data=signal2, name='b', dims=['x'], coords={'x': x})

        if dask:
            da1 = da1.chunk({'x': 32})
            da2 = da2.chunk({'x': 32})
        cp = xrft.cross_phase(da1, da2, dim=['x'])

        actual_phase_offset = cp.sel(freq_x=f).values
        npt.assert_almost_equal(actual_phase_offset, phase_offset)
        assert cp.name == 'a_b_phase'

        xrt.assert_equal(xrft.cross_phase(da1, da2), cp)

        with pytest.raises(ValueError):
            xrft.cross_phase(da1, da2.isel(x=0).drop('x'))

        with pytest.raises(ValueError):
            xrft.cross_phase(da1, da2.rename({'x':'y'})) 
Example 16
Project: wikilinks   Author: trovdimi   File: click_distributions.py    MIT License 5 votes vote down vote up
def plot_counts_frequency():

    fig = plt.figure()
    ax = fig.add_subplot(111)


    category_distributions = read_pickle(HOME+'output/category_counts_distribution.obj')
    data = category_distributions['counts']
    data = [int(x[0]) for x in data]
    #to consider the edges that have zero transitions we substract the number transitions from the number of edges in wikipeida
    #number_of_edges = 339463340
    #zeros = np.zeros((number_of_edges - len(data)))
    #data = np.append(zeros, data)
    #bins = [0,11]
    #bins.extend(np.linspace(100,10000))
    #data = data.extend(listofzeros)
    #print data
    hist, bin_edges = np.histogram(data, bins=10000)
    #print len(hist)
    #print len(bin_edges)
    print hist, bin_edges

    ax.set_yscale('log')
    ax.set_xscale('log')
    ax.plot(bin_edges[:-1], hist, marker='o', markersize=3, markeredgecolor='none', color='#D65F5F')

    #ax.set_ylim([10**0, 10**6])
    #ax.set_xlim([10**0, 10**6])
    ax.set_xlabel('Number of transitions')
    ax.set_ylabel('Frequency')

    fig.tight_layout()
    fig.savefig( 'output/agg_counts_distributions.pdf', bbox_inches='tight') 
Example 17
Project: OpenAPS   Author: medicinexlab   File: bglomb.py    MIT License 5 votes vote down vote up
def _get_lomb_scargle(bg_df, start_index, end_index, plot_lomb_array):
        bg_time_array, bg_value_array, bg_gap_start_time, bg_gap_end_time = _make_data_array(bg_df, start_index, end_index, 'bg')
        iob_time_array, iob_value_array, iob_gap_start_time, iob_gap_end_time = _make_data_array(bg_df, start_index, end_index, 'IOB')
        cob_time_array, cob_value_array, cob_gap_start_time, cob_gap_end_time = _make_data_array(bg_df, start_index, end_index, 'COB')

        #Keep track of the data start and end times in the array
        data_gap_start_time = bg_gap_start_time + iob_gap_start_time + cob_gap_start_time
        data_gap_end_time = bg_gap_end_time + iob_gap_end_time + cob_gap_end_time

        period = np.linspace(0, int(bg_time_array.max()), int(bg_time_array.max()) + 1) #set period to be as large as possible

        bg_lomb = _run_lomb_scargle(bg_time_array, bg_value_array, period)
        iob_lomb = _run_lomb_scargle(iob_time_array, iob_value_array, period)
        cob_lomb = _run_lomb_scargle(cob_time_array, cob_value_array, period)

        #Set all bg/cob values below zero equal to zero (iob can be negative if it is below baseline levels)
        bg_lomb[bg_lomb < 0] = 0
        cob_lomb[cob_lomb < 0] = 0

        #Plot lomb-scargle if values in the plot_lomb_array
        if len(plot_lomb_array) > 0:
            plt.clf()
            if "bg" in plot_lomb_array: _plot_lomb(period, bg_lomb, bg_time_array, bg_value_array, "BG")
            if "iob" in plot_lomb_array: _plot_lomb(period, iob_lomb, iob_time_array, iob_value_array, "IOB")
            if "cob" in plot_lomb_array: _plot_lomb(period, cob_lomb, cob_time_array, cob_value_array, "COB")
            plt.legend(loc='upper left')
            plt.show()

        return period, bg_lomb, iob_lomb, cob_lomb, data_gap_start_time, data_gap_end_time


#Class to store the lomb data 
Example 18
Project: FRIDA   Author: LCAV   File: generators.py    MIT License 5 votes vote down vote up
def gen_sig_at_mic(sigmak2_k, phi_k, pos_mic_x,
                   pos_mic_y, omega_band, sound_speed,
                   SNR, Ns=256):
    """
    generate complex base-band signal received at microphones
    :param sigmak2_k: the variance of the circulant complex Gaussian signal
                emitted by the K sources
    :param phi_k: source locations (azimuths)
    :param pos_mic_x: a vector that contains microphones' x coordinates
    :param pos_mic_y: a vector that contains microphones' y coordinates
    :param omega_band: mid-band (ANGULAR) frequency [radian/sec]
    :param sound_speed: speed of sound
    :param SNR: SNR for the received signal at microphones
    :param Ns: number of snapshots used to estimate the covariance matrix
    :return: y_mic: received (complex) signal at microphones
    """
    num_mic = pos_mic_x.size
    xk, yk = polar2cart(1, phi_k)  # source locations in cartesian coordinates
    # reshape to use broadcasting
    xk = np.reshape(xk, (1, -1), order='F')
    yk = np.reshape(yk, (1, -1), order='F')
    pos_mic_x = np.reshape(pos_mic_x, (-1, 1), order='F')
    pos_mic_y = np.reshape(pos_mic_y, (-1, 1), order='F')

    t = np.reshape(np.linspace(0, 10 * np.pi, num=Ns), (1, -1), order='F')
    K = sigmak2_k.size
    sigmak2_k = np.reshape(sigmak2_k, (-1, 1), order='F')

    # x_tilde_k size: K x length_of_t
    # circular complex Gaussian process
    x_tilde_k = np.sqrt(sigmak2_k / 2.) * (np.random.randn(K, Ns) + 1j *
                                           np.random.randn(K, Ns))
    y_mic = np.dot(np.exp(-1j * (xk * pos_mic_x + yk * pos_mic_y) / (sound_speed / omega_band)),
                   x_tilde_k * np.exp(1j * omega_band * t))
    signal_energy = linalg.norm(y_mic, 'fro') ** 2
    noise_energy = signal_energy / 10 ** (SNR * 0.1)
    sigma2_noise = noise_energy / (Ns * num_mic)
    noise = np.sqrt(sigma2_noise / 2.) * (np.random.randn(*y_mic.shape) + 1j *
                                          np.random.randn(*y_mic.shape))
    y_mic_noisy = y_mic + noise
    return y_mic_noisy, y_mic 
Example 19
Project: fbpconv_tf   Author: panakino   File: layers.py    GNU General Public License v3.0 5 votes vote down vote up
def _load_whole_data(current_version, file, dump_folder):
    hashstr = hashlib.sha256((
        "".join(file)+current_version
        ).encode()).hexdigest()
    dump_file=os.path.join(dump_folder,hashstr+".h5")
    print(dump_file)
    rebuild_data=True
    if os.path.exists(dump_file):
        print("dump file existed")
        with h5py.File(dump_file,"r") as h5file:
           if "version" in list(h5file.keys()):
               if h5file["version"].value==current_version:
                       rebuild_data=False

    print("rebuild_data",rebuild_data)
    if rebuild_data:
        data=[]
        mat_contents=sio.loadmat(file)
        gt=np.squeeze(mat_contents['data_gt'])
        sparse=np.zeros_like(gt)
        full=np.zeros_like(gt)

        for ind in range(gt.shape[2]):
            img = np.squeeze(gt[:,:,ind])
            theta = np.linspace(0., 180., 1e3, endpoint=False)
            sinogram = radon(img, theta=theta, circle=False)
            theta_down = theta[0:1000:20]
            sparse[:,:,ind] =iradon(sinogram[:,0:1000:20],theta=theta_down,circle=False)
            full[:,:,ind] =iradon(sinogram,theta=theta,circle=False)
            print("iteration : " , ind, "/", gt.shape[2])

        norm_val=np.amax(sparse)
        print("norm_val", norm_val)
        print("finished rebuild")
        saveh5({"label":full/norm_val*255.,"sparse":sparse/norm_val*255.,"version":current_version},dump_file)

    f_handle=h5py.File(dump_file,"r")
    label=np.array(f_handle["label"])
    sparse=np.array(f_handle["sparse"])
    print("size of label, " , label.shape)
    print("size of sparse, " , sparse.shape) 
Example 20
Project: RandomFourierFeatures   Author: tiskw   File: sample_rff_regression.py    MIT License 5 votes vote down vote up
def main():

    ### Fix seed for random fourier feature calclation
    pyrff.seed(111)

    ### Prepare training data
    Xs_train = np.linspace(0, 3, 21).reshape((21, 1))
    ys_train = np.sin(Xs_train**2)
    Xs_test  = np.linspace(0, 3, 101).reshape((101, 1))
    ys_test  = np.sin(Xs_test**2)

    ### Create classifier instance
    reg = pyrff.RFFRegression(dim_output = 8, std = 0.5)

    ### Train regression with random fourier features
    reg.fit(Xs_train, ys_train)

    ### Conduct prediction for the test data
    predict = reg.predict(Xs_test)

    ### Plot regression results
    mpl.figure(0)
    mpl.title("Regression for function y = sin(x^2) with RFF")
    mpl.xlabel("X")
    mpl.ylabel("Y")
    mpl.plot(Xs_train, ys_train, "o")
    mpl.plot(Xs_test,  ys_test,  ".")
    mpl.plot(Xs_test,  predict,  "-")
    mpl.legend(["Training data", "Test data", "Prediction by RFF regression"])
    mpl.grid()
    mpl.show() 
Example 21
Project: voice-recognition   Author: golabies   File: signal_fft.py    MIT License 5 votes vote down vote up
def my_ft(self):
        mft = fft(self.signal)
        freq = np.linspace(-self.fs / 2, self.fs / 2, len(self.signal))
        mft = mft[freq >= 0]
        freq = freq[freq >= 0]
        mft = mft[::-1]
        mft[2:] = 2 * mft[2:]
        self.freq = freq
        self.out_put = abs(mft) / len(self.signal)
        return self.freq, self.out_put 
Example 22
Project: cplot   Author: sunchaoatmo   File: taylorDiagram.py    GNU General Public License v3.0 5 votes vote down vote up
def add_contours(self, levels=5, **kwargs):
        """Add constant centered RMS difference contours."""

        rs,ts = NP.meshgrid(NP.linspace(self.smin,self.smax),
                            NP.linspace(0,NP.pi/2.0))
        # Compute centered RMS difference
        rms = NP.sqrt(self.refstd**2 + rs**2 - 2*self.refstd*rs*NP.cos(ts))
        
        contours = self.ax.contour(ts, rs, rms, levels,linewidths=0.5,ls='--',zorder=0, **kwargs)
        #contours = self.ax.contour(ts, rs, rms, levels,linewidths=0.01, **kwargs)

        return contours 
Example 23
Project: skylab   Author: coenders   File: utils.py    GNU General Public License v3.0 5 votes vote down vote up
def fit(self, data):
        r"""Perform fit given `data`.

        Parameters
        ----------
        data : array_like
            Test statistic values

        Returns
        -------
        delta_exp_frozen
            Probability density function

        """
        data = np.asarray(data)

        # Get amount of over-fluctuations.
        eta = float(np.count_nonzero(data > 0.)) / len(data)
        eta_err = np.sqrt(eta * (1. - eta) / len(data))

        # Sort data and construct cumulative distribution.
        x = np.sort(data[data > 0.])
        y = np.linspace(1., 0., len(x) + 1)[:-1]

        coeff = np.polyfit(x, np.log(y), self.deg)

        return delta_exp(coeff, eta, eta_err) 
Example 24
Project: skylab   Author: coenders   File: utils.py    GNU General Public License v3.0 5 votes vote down vote up
def skymap(plt, vals, **kwargs):
    fig, ax = plt.subplots(subplot_kw=dict(projection="aitoff"))

    gridsize = 1000

    x = np.linspace(np.pi, -np.pi, 2 * gridsize)
    y = np.linspace(np.pi, 0., gridsize)

    X, Y = np.meshgrid(x, y)

    r = hp.rotator.Rotator(rot=(-180., 0., 0.))

    YY, XX = r(Y.ravel(), X.ravel())

    pix = hp.ang2pix(hp.npix2nside(len(vals)), YY, XX)

    Z = np.reshape(vals[pix], X.shape)

    lon = x[::-1]
    lat = np.pi /2.  - y

    cb = kwargs.pop("colorbar", dict())
    cb.setdefault("orientation", "horizontal")
    cb.setdefault("fraction", 0.075)

    title = cb.pop("title", None)

    p = ax.pcolormesh(lon, lat, Z, **kwargs)

    cbar = fig.colorbar(p, **cb)

    cbar.solids.set_edgecolor("face")
    cbar.update_ticks()
    if title is not None:
        cbar.set_label(title)

    ax.xaxis.set_ticks([])

    return fig, ax 
Example 25
Project: deep-learning-note   Author: wdxtub   File: utils.py    MIT License 5 votes vote down vote up
def train_opt(optimizer_fn, states, hyperparams, features, labels,
              batch_size=10, num_epochs=2):
    # 初始化模型
    net, loss = linreg, squared_loss

    w = torch.nn.Parameter(torch.tensor(np.random.normal(0, 0.01, size=(features.shape[1], 1)), dtype=torch.float32),
                           requires_grad=True)
    b = torch.nn.Parameter(torch.zeros(1, dtype=torch.float32), requires_grad=True)

    def eval_loss():
        return loss(net(features, w, b), labels).mean().item()

    ls = [eval_loss()]
    data_iter = torch.utils.data.DataLoader(
        torch.utils.data.TensorDataset(features, labels), batch_size, shuffle=True)

    for _ in range(num_epochs):
        start = time.time()
        for batch_i, (X, y) in enumerate(data_iter):
            l = loss(net(X, w, b), y).mean()  # 使用平均损失

            # 梯度清零
            if w.grad is not None:
                w.grad.data.zero_()
                b.grad.data.zero_()

            l.backward()
            optimizer_fn([w, b], states, hyperparams)  # 迭代模型参数
            if (batch_i + 1) * batch_size % 100 == 0:
                ls.append(eval_loss())  # 每100个样本记录下当前训练误差
    # 打印结果和作图
    print('loss: %f, %f sec per epoch' % (ls[-1], time.time() - start))
    plt.plot(np.linspace(0, num_epochs, len(ls)), ls)
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.show()


# 本函数与原书不同的是这里第一个参数优化器函数而不是优化器的名字
# 例如: optimizer_fn=torch.optim.SGD, optimizer_hyperparams={"lr": 0.05} 
Example 26
Project: deep-learning-note   Author: wdxtub   File: utils.py    MIT License 5 votes vote down vote up
def train_opt_pytorch(optimizer_fn, optimizer_hyperparams, features, labels,
                      batch_size=10, num_epochs=2):
    # 初始化模型
    net = nn.Sequential(
        nn.Linear(features.shape[-1], 1)
    )
    loss = nn.MSELoss()
    optimizer = optimizer_fn(net.parameters(), **optimizer_hyperparams)

    def eval_loss():
        return loss(net(features).view(-1), labels).item() / 2

    ls = [eval_loss()]
    data_iter = torch.utils.data.DataLoader(
        torch.utils.data.TensorDataset(features, labels), batch_size, shuffle=True)

    for _ in range(num_epochs):
        start = time.time()
        for batch_i, (X, y) in enumerate(data_iter):
            # 除以2是为了和train_ch7保持一致, 因为squared_loss中除了2
            l = loss(net(X).view(-1), y) / 2

            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            if (batch_i + 1) * batch_size % 100 == 0:
                ls.append(eval_loss())
    # 打印结果和作图
    print('loss: %f, %f sec per epoch' % (ls[-1], time.time() - start))
    plt.plot(np.linspace(0, num_epochs, len(ls)), ls)
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.show() 
Example 27
Project: neural-pipeline   Author: toodef   File: tensorboard.py    MIT License 5 votes vote down vote up
def update_losses(self, losses: {}) -> None:
        """
        Update monitor

        :param losses: losses values with keys 'train' and 'validation'
        """
        if self.__writer is None:
            return

        def on_loss(name: str, values: np.ndarray) -> None:
            self.__writer.add_scalars('loss', {name: np.mean(values)}, global_step=self.epoch_num)
            self.__writer.add_histogram('{}/loss_hist'.format(name), np.clip(values, -1, 1).astype(np.float32),
                                        global_step=self.epoch_num, bins=np.linspace(-1, 1, num=11).astype(np.float32))

        self._iterate_by_losses(losses, on_loss) 
Example 28
Project: Scene-Understanding   Author: foamliu   File: class_rebal.py    MIT License 5 votes vote down vote up
def smooth_class_prior(sigma=5, do_plot=False):
    prior_prob = np.load(os.path.join(data_dir, "prior_prob.npy"))
    # add an epsilon to prior prob to avoid 0 vakues and possible NaN
    prior_prob += 1E-3 * np.min(prior_prob)
    # renormalize
    prior_prob = prior_prob / (1.0 * np.sum(prior_prob))

    # Smooth with gaussian
    f = interp1d(np.arange(prior_prob.shape[0]), prior_prob)
    xx = np.linspace(0, prior_prob.shape[0] - 1, 1000)
    yy = f(xx)
    window = gaussian(2000, sigma)  # 2000 pts in the window, sigma=5
    smoothed = convolve(yy, window / window.sum(), mode='same')
    fout = interp1d(xx, smoothed)
    prior_prob_smoothed = np.array([fout(i) for i in range(prior_prob.shape[0])])
    prior_prob_smoothed = prior_prob_smoothed / np.sum(prior_prob_smoothed)

    # Save
    file_name = os.path.join(data_dir, "prior_prob_smoothed.npy")
    np.save(file_name, prior_prob_smoothed)

    if do_plot:
        plt.plot(prior_prob)
        plt.plot(prior_prob_smoothed, "g--")
        plt.plot(xx, smoothed, "r-")
        plt.yscale("log")
        plt.show() 
Example 29
Project: synthetic-data-tutorial   Author: theodi   File: SocialSecurityNumberAttribute.py    MIT License 5 votes vote down vote up
def generate_values_as_candidate_key(self, n):
        if n < 1e9:
            values = np.linspace(0, 1e9 - 1, num=n, dtype=int)
            values = np.random.permutation(values)
            values = [str(i).zfill(9) for i in values]
            return ['{}-{}-{}'.format(i[:3], i[3:5], i[5:]) for i in values]
        else:
            raise Exception('The candidate key "{}" cannot generate more than 1e9 distinct values.', self.name) 
Example 30
Project: spacesense   Author: spacesense-ai   File: classification.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def build_model(self, X_train, n=10):
        nu = np.linspace(start=1e-5, stop=1e-2, num=n)
        gamma = np.linspace(start=1e-6, stop=1e-3, num=n)
        opt_diff = 1.0
        opt_nu = None
        opt_gamma = None
        nu_opt, gamma_opt = optimize_OneClassSVM(X_train, n)
        self.svc_models = svm.OneClassSVM(nu=nu_opt, kernel='rbf', gamma=gamma_opt) 
Example 31
Project: helloworld   Author: pip-uninstaller-python   File: matplotlibTest.py    GNU General Public License v2.0 5 votes vote down vote up
def main():
    # line
    x = np.linspace(-np.pi, np.pi, 256, endpoint=True)
    c, s = np.cos(x), np.sin(x)
    plt.figure(1)
    plt.plot(x, c, color="blue", linewidth=1.0, linestyle="-", label="COS", alpha=0.5)  # 自变量, 因变量
    plt.plot(x, s, "r.", label="SIN")  # 正弦  "-"/"r-"/"r."
    plt.title("COS & SIN")
    ax = plt.gca()
    ax.spines["right"].set_color("none")
    ax.spines["top"].set_color("none")
    ax.spines["left"].set_position(("data", 0))  # 横轴位置
    ax.spines["bottom"].set_position(("data", 0))  # 纵轴位置
    ax.xaxis.set_ticks_position("bottom")
    ax.yaxis.set_ticks_position("left")
    plt.xticks([-np.pi, -np.pi / 2.0, np.pi / 2, np.pi],
               [r'$-\pi/2$', r'$-\pi/2$', r'$0$', r'$+\pi/2$', r'$-\pi$'])
    plt.yticks(np.linspace(-1, 1, 5, endpoint=True))
    for label in ax.get_xticklabels() + ax.get_yticklabels():
        label.set_fontsize(16)
        label.set_bbox(dict(facecolor="white", edgecolor="None", alpha=0.2))
    plt.legend(loc="upper left")  # 左上角的显示图标
    plt.grid()  # 网格线
    # plt.axis([-1, 1, -0.5, 1])  # 显示范围
    plt.fill_between(x, np.abs(x) < 0.5, c, c < 0.5, color="green", alpha=0.25)
    t = 1
    plt.plot([t, t], [0, np.cos(t)], "y", linewidth=3, linestyle="--")
    # 注释
    plt.annotate("cos(1)", xy=(t, np.cos(1)), xycoords="data", xytext=(+10, +30),
                 textcoords="offset points", arrowprops=dict(arrowstyle="->", connectionstyle="arc3, rad=.2"))
    plt.show()


# Scatter --> 散点图 
Example 32
Project: numpynet   Author: uptake   File: examples.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def plot_activations():
    for activation in common.Activation.available:
        x = np.linspace(-10.0, 10.0, 100)
        y = common.Activation(activation).function(x, deriv=False)
        dy = common.Activation(activation).function(x, deriv=True)
        viz_client.plot_func(x, y, title=activation)
        viz_client.plot_func(x, dy, title="d_" + activation)


# TODO write this! 
Example 33
Project: gullikson-scripts   Author: kgullikson88   File: SpectralTypeRelations.py    MIT License 5 votes vote down vote up
def get_color(self, fv, temperature, search_range='valid'):
        """
        Get the color, given the temperature (root-finding)
        :param fv: The FitVals object to use. Should be one of the self.color_relations
        :param temperature: The temperature for which you want a color
        :param search_range: The range of colors to search. The default is the full valid range of the fit.
                             You can extend it if you want by giving a list-like object, but it will give
                             you a warning if the best fit is an extrapolation.
        :return: The color corresponding to the requested temperature
        """
        from kglib.utils import HelperFunctions
        # Determine the test values from search_range
        if isinstance(search_range, str) and search_range.lower() == 'valid':
            test_values = np.linspace(fv.valid[0], fv.valid[1], 1000)
        else:
            test_values = np.linspace(search_range[0], search_range[1], 1000)

        # Evaluate the function at each of the test colors
        test_temperatures = self.evaluate(fv, test_values, is_spt=False)

        # Determine the 'best fit' solution
        temperature = np.array(temperature)
        differences = (temperature.reshape(1, -1) - test_temperatures.reshape(-1, 1))
        idx = np.abs(differences).argmin(axis=0)
        color = test_values[idx]

        # Check if the best-fit solution is an extrapolation
        if HelperFunctions.IsListlike(search_range):
            if HelperFunctions.IsListlike(color):
                if not all([fv.valid[0] < c < fv.valid[1] for c in color]):
                    logging.warn('Best-fit color is an extrapolation from the valid range. Be very careful!')
            elif not fv.valid[0] < color < fv.valid[1]:
                logging.warn('Best-fit color is an extrapolation from the valid range. Be very careful!')
        return color 
Example 34
Project: ANN   Author: waynezv   File: ANN_large_v23.py    MIT License 5 votes vote down vote up
def add_dist_prior(input):
    num_lines = 128
    d_theta = 0.7/180*np.pi
    i_start = 63
    theta_s = -num_lines/2*d_theta + (i_start-1)*d_theta
    theta_e = theta_s + 67*d_theta
    theta = np.arange(theta_s, theta_e, d_theta)
    x = np.linspace(-50,50,32)/1000
    z = np.linspace(20,80,32)/1000
    x = np.repeat(x.reshape(1,32),32,0)
    z = np.repeat(z.reshape(32,1),32,1)
    fc = 5e6
    c = 1540
    l = c/fc
    ele_width = l/2
    kerf = 0.0025/1000
    pitch = ele_width+kerf
    tx_pos_x = np.linspace(-pitch/2-31*pitch, pitch/2+31*pitch, 64)
    tx_pos_z = np.zeros((64,))
    dist = np.matrix(np.zeros((64,1024)))
    for i in range(64):
        dist[i,:] =np.sqrt( \
                (tx_pos_x[i]-x)**2 + \
                (tx_pos_z[i]-z)**2 \
                ).reshape(-1)
    ns, nch, nr, nc = input.shape
    input_new = np.zeros((ns,nch,nr,nc))
    for nsi in range(ns):
        for nchi in range(nch):
            tmp = np.matrix(input[nsi,nchi,:,:])
            input_new[nsi,nchi,:,:] = \
                   tmp*dist*dist.T
    return input_new 
Example 35
Project: ANN   Author: waynezv   File: ANN_large_v22.py    MIT License 5 votes vote down vote up
def add_dist_prior(input):
    num_lines = 128
    d_theta = 0.7/180*np.pi
    i_start = 63
    theta_s = -num_lines/2*d_theta + (i_start-1)*d_theta
    theta_e = theta_s + 67*d_theta
    theta = np.arange(theta_s, theta_e, d_theta)
    x = np.linspace(-50,50,32)/1000
    z = np.linspace(20,80,32)/1000
    x = np.repeat(x.reshape(1,32),32,0)
    z = np.repeat(z.reshape(32,1),32,1)
    fc = 5e6
    c = 1540
    l = c/fc
    ele_width = l/2
    kerf = 0.0025/1000
    pitch = ele_width+kerf
    tx_pos_x = np.linspace(-pitch/2-31*pitch, pitch/2+31*pitch, 64)
    tx_pos_z = np.zeros((64,))
    dist = np.matrix(np.zeros((64,1024)))
    for i in range(64):
        dist[i,:] =np.sqrt( \
                (tx_pos_x[i]-x)**2 + \
                (tx_pos_z[i]-z)**2 \
                ).reshape(-1)
    ns, nch, nr, nc = input.shape
    input_new = np.zeros((ns,nch,nr,nc))
    for nsi in range(ns):
        for nchi in range(nch):
            tmp = np.matrix(input[nsi,nchi,:,:])
            input_new[nsi,nchi,:,:] = \
                   tmp*dist*dist.T
    return input_new 
Example 36
Project: ANN   Author: waynezv   File: ANN_large_v24.py    MIT License 5 votes vote down vote up
def add_dist_prior(input):
    num_lines = 128
    d_theta = 0.7/180*np.pi
    i_start = 63
    theta_s = -num_lines/2*d_theta + (i_start-1)*d_theta
    theta_e = theta_s + 67*d_theta
    theta = np.arange(theta_s, theta_e, d_theta)
    x = np.linspace(-50,50,32)/1000
    z = np.linspace(20,80,32)/1000
    x = np.repeat(x.reshape(1,32),32,0)
    z = np.repeat(z.reshape(32,1),32,1)
    fc = 5e6
    c = 1540
    l = c/fc
    ele_width = l/2
    kerf = 0.0025/1000
    pitch = ele_width+kerf
    tx_pos_x = np.linspace(-pitch/2-31*pitch, pitch/2+31*pitch, 64)
    tx_pos_z = np.zeros((64,))
    dist = np.matrix(np.zeros((64,1024)))
    for i in range(64):
        dist[i,:] =np.sqrt( \
                (tx_pos_x[i]-x)**2 + \
                (tx_pos_z[i]-z)**2 \
                ).reshape(-1)
    ns, nch, nr, nc = input.shape
    input_new = np.zeros((ns,nch,nr,nc))
    for nsi in range(ns):
        for nchi in range(nch):
            tmp = np.matrix(input[nsi,nchi,:,:])
            input_new[nsi,nchi,:,:] = \
                   tmp*dist*dist.T
    return input_new 
Example 37
Project: featkit   Author: ryadzenine   File: numerical.py    MIT License 5 votes vote down vote up
def fit(self, X, y=None):
        self.pc = np.percentile(X, np.linspace(100 / self.nb_bins, 100 - 100 / self.nb_bins, self.nb_bins))
        return self 
Example 38
Project: kitti-object-eval-python   Author: traveller59   File: eval.py    MIT License 5 votes vote down vote up
def do_coco_style_eval(gt_annos,
                       dt_annos,
                       current_classes,
                       overlap_ranges,
                       compute_aos,
                       z_axis=1,
                       z_center=1.0):
    # overlap_ranges: [range, metric, num_class]
    min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]])
    for i in range(overlap_ranges.shape[1]):
        for j in range(overlap_ranges.shape[2]):
            min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j])
    mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval_v2(
        gt_annos,
        dt_annos,
        current_classes,
        min_overlaps,
        compute_aos,
        z_axis=z_axis,
        z_center=z_center)
    # ret: [num_class, num_diff, num_minoverlap]
    mAP_bbox = mAP_bbox.mean(-1)
    mAP_bev = mAP_bev.mean(-1)
    mAP_3d = mAP_3d.mean(-1)
    if mAP_aos is not None:
        mAP_aos = mAP_aos.mean(-1)
    return mAP_bbox, mAP_bev, mAP_3d, mAP_aos 
Example 39
Project: pygram11   Author: douglasdavis   File: test_histogram.py    MIT License 5 votes vote down vote up
def test_fix1d():
    x = np.random.randn(5000)
    bins = 25
    w = np.random.uniform(0.5, 1.0, 5000)

    pygram_h, __ = pygram11.fix1d(x, bins=25, range=(-3, 3))
    numpy_h, __ = np.histogram(x, bins=np.linspace(-3, 3, 26))
    npt.assert_almost_equal(pygram_h, numpy_h, 5)

    pygram_h, __ = pygram11.fix1d(x, bins=25, range=(-3, 3), weights=w)
    numpy_h, __ = np.histogram(x, bins=np.linspace(-3, 3, 26), weights=w)
    npt.assert_almost_equal(pygram_h, numpy_h, 5) 
Example 40
Project: pygram11   Author: douglasdavis   File: test_histogram.py    MIT License 5 votes vote down vote up
def test_numpyAPI_fix1d():
    x = np.random.randn(5000)
    bins = 25
    w = np.random.uniform(0.8, 1, 5000)

    pygram_h, __ = pygram11.histogram(x, bins=25, range=(-3, 3))
    numpy_h, __ = np.histogram(x, bins=np.linspace(-3, 3, 26))
    npt.assert_almost_equal(pygram_h, numpy_h, 5)

    pygram_h, __ = pygram11.histogram(x, bins=25, range=(-3, 3), weights=w)
    numpy_h, __ = np.histogram(x, bins=np.linspace(-3, 3, 26), weights=w)
    npt.assert_almost_equal(pygram_h, numpy_h, 5) 
Example 41
Project: pygram11   Author: douglasdavis   File: test_histogram.py    MIT License 5 votes vote down vote up
def test_fix1d_omp():
        x = np.random.randn(5000)
        bins = 25
        w = np.random.uniform(-0.2, 0.8, 5000)

        pygram_h, __ = pygram11.fix1d(x, bins=25, range=(-3, 3), omp=True)
        numpy_h, __ = np.histogram(x, bins=np.linspace(-3, 3, 26))
        npt.assert_almost_equal(pygram_h, numpy_h, 5)

        pygram_h, __ = pygram11.fix1d(x, bins=25, range=(-3, 3), weights=w, omp=True)
        numpy_h, __ = np.histogram(x, bins=np.linspace(-3, 3, 26), weights=w)
        npt.assert_almost_equal(pygram_h, numpy_h, 5) 
Example 42
Project: pygram11   Author: douglasdavis   File: test_histogram.py    MIT License 5 votes vote down vote up
def test_density_fix1d():
    x = np.random.randn(5000)
    bins = 25
    w = np.random.uniform(0.5, 1.0, 5000)

    pygram_h, __ = pygram11.fix1d(x, bins=25, range=(-3, 3), density=True)
    numpy_h, __ = np.histogram(x, bins=np.linspace(-3, 3, 26), density=True)
    npt.assert_almost_equal(pygram_h, numpy_h, 5)

    pygram_h, __ = pygram11.fix1d(x, bins=25, range=(-3, 3), weights=w, density=True)
    numpy_h, __ = np.histogram(x, bins=np.linspace(-3, 3, 26), weights=w, density=True)
    npt.assert_almost_equal(pygram_h, numpy_h, 5) 
Example 43
Project: HAPI   Author: MAfarrag   File: DistParameters.py    MIT License 5 votes vote down vote up
def calculateK(x,position,UB,LB):
    """
    ===================================================
        calculateK(x,position,UB,LB):
    ===================================================
        
    this function takes value of x parameter and generate 100 random value of k parameters between
    upper & lower constraint then the output will be the value coresponding to the giving position
    
    Inputs:
    ----------
        1- x weighting coefficient to determine the linearity of the water surface
            (one of the parameters of muskingum routing method)
        2- position 
            random position between upper and lower bounds of the k parameter
        3-UB 
            upper bound for k parameter
        3-LB 
            Lower bound for k parameter
    """
    
    constraint1=0.5*1/(1-x) # k has to be smaller than this constraint
    constraint2=0.5*1/x   # k has to be greater than this constraint
    
    if constraint2 >= UB : #if constraint is higher than UB take UB
        constraint2 =UB
        
    if constraint1 <= LB : #if constraint is lower than LB take UB
        constraint1 =LB
    
    generatedK=np.linspace(constraint1,constraint2,101)
    k=generatedK[int(round(position,0))]
    return k 
Example 44
Project: HAPI   Author: MAfarrag   File: DHBV_functions0000000000000.py    MIT License 5 votes vote down vote up
def calculateK(x,position,UB,LB):
    '''
    calculateK(x,position,UB,LB):
        this function takes value of x parameter and generate 100 random value of k parameters between
        upper & lower constraint then the output will be the value coresponding to the giving position
        
        Inputs:
            1- x weighting coefficient to determine the linearity of the water surface
                (one of the parameters of muskingum routing method)
            2- position 
                random position between upper and lower bounds of the k parameter
            3-UB 
                upper bound for k parameter
            3-LB 
                Lower bound for k parameter
    '''
    constraint1=0.5*1/(1-x) # k has to be smaller than this constraint
    constraint2=0.5*1/x   # k has to be greater than this constraint
    
    if constraint2 >= UB : #if constraint is higher than UB take UB
        constraint2 =UB
        
    if constraint1 <= LB : #if constraint is lower than LB take UB
        constraint1 =LB
    
    generatedK=np.linspace(constraint1,constraint2,101)
    k=generatedK[int(round(position,0))]
    return k 
Example 45
Project: HAPI   Author: MAfarrag   File: trial2.py    MIT License 5 votes vote down vote up
def update_data(attrname, old, new):

    # Get the current slider values
    a = amplitude.value
    b = offset.value
    w = phase.value
    k = freq.value

    # Generate the new curve
    x = np.linspace(0, 4*np.pi, N)
    y = a*np.sin(k*x + w) + b

    source.data = dict(x=x, y=y) 
Example 46
Project: HAPI   Author: MAfarrag   File: DistParameters.py    MIT License 5 votes vote down vote up
def calculateK(x,position,UB,LB):
    """
    ===================================================
        calculateK(x,position,UB,LB):
    ===================================================
        
    this function takes value of x parameter and generate 100 random value of k parameters between
    upper & lower constraint then the output will be the value coresponding to the giving position
    
    Inputs:
    ----------
        1- x weighting coefficient to determine the linearity of the water surface
            (one of the parameters of muskingum routing method)
        2- position 
            random position between upper and lower bounds of the k parameter
        3-UB 
            upper bound for k parameter
        3-LB 
            Lower bound for k parameter
    """
    
    constraint1=0.5*1/(1-x) # k has to be smaller than this constraint
    constraint2=0.5*1/x   # k has to be greater than this constraint
    
    if constraint2 >= UB : #if constraint is higher than UB take UB
        constraint2 =UB
        
    if constraint1 <= LB : #if constraint is lower than LB take UB
        constraint1 =LB
    
    generatedK=np.linspace(constraint1,constraint2,101)
    k=generatedK[int(round(position,0))]
    return k 
Example 47
Project: HAPI   Author: MAfarrag   File: DistParameters.py    MIT License 5 votes vote down vote up
def calculateK(x,position,UB,LB):
    """
    ===================================================
        calculateK(x,position,UB,LB):
    ===================================================
        
    this function takes value of x parameter and generate 100 random value of k parameters between
    upper & lower constraint then the output will be the value coresponding to the giving position
    
    Inputs:
    ----------
        1- x weighting coefficient to determine the linearity of the water surface
            (one of the parameters of muskingum routing method)
        2- position 
            random position between upper and lower bounds of the k parameter
        3-UB 
            upper bound for k parameter
        3-LB 
            Lower bound for k parameter
    """
    
    constraint1=0.5*1/(1-x) # k has to be smaller than this constraint
    constraint2=0.5*1/x   # k has to be greater than this constraint
    
    if constraint2 >= UB : #if constraint is higher than UB take UB
        constraint2 =UB
        
    if constraint1 <= LB : #if constraint is lower than LB take UB
        constraint1 =LB
    
    generatedK=np.linspace(constraint1,constraint2,101)
    k=generatedK[int(round(position,0))]
    return k 
Example 48
Project: praktipy   Author: The-Ludwig   File: __init__.py    MIT License 5 votes vote down vote up
def polyplotfit(x, params, N=1000, border=0.05):
    """Plots a polynome, which was fitted.
    x: the original x value which was fitted
    params: the parameters in the polynome
    N: Number of x_values to calculate
    border: percentage of x_range to make a border"""

    dx = x[-1] - x[0]
    x_fit = np.linspace(x[0] - dx*border, x[-1] + dx*border, N)
    y_fit = np.zeros(len(x_fit))
    deg = len(params)
    for i in range(deg):
        y_fit += params[deg-1-i] * x_fit**i

    return (x_fit, y_fit) 
Example 49
Project: praktipy   Author: The-Ludwig   File: __init__.py    MIT License 5 votes vote down vote up
def curveplotfit(f, x, params, N=1000, border=0.05, logscale=False):
    """Plots a general function, which was fitted.
    x: the original x value which was fitted
    params: the parameters in the polynome
    N: Number of x_values to calculate
    border: percentage of x_range to make a border"""
    dx = x[-1] - x[0]
    if logscale:
        x_fit = np.logspace(np.log10(
            x[0]) - np.log10(x[0])*border*0.6, np.log10(x[-1])+np.log10(x[-1])*border*0.2, N)
    else:
        x_fit = np.linspace(x[0] - dx*border, x[-1] + dx*border, N)
    y_fit = f(x_fit, *params)

    return (x_fit, y_fit) 
Example 50
Project: praktipy   Author: The-Ludwig   File: praktiplot.py    MIT License 5 votes vote down vote up
def polyplotfit(x, params, N=1000, border=0.05):
    """Plots a polynome, which was fitted.
    x: the original x value which was fitted
    params: the parameters in the polynome
    N: Number of x_values to calculate
    border: percentage of x_range to make a border"""

    dx = x[-1] - x[0]
    x_fit = np.linspace(x[0] - dx*border, x[-1] + dx*border, N)
    y_fit = np.zeros(len(x_fit))
    deg = len(params)
    for i in range(deg):
        y_fit += params[deg-1-i] * x_fit**i

    return (x_fit, y_fit)